{"code": "def in_flight_request_count(self, node_id=None):\n \n if node_id is not None:\n conn = self._conns.get(node_id)\n if conn is None:\n return 0\n return len(conn.in_flight_requests)\n else:\n return sum([len(conn.in_flight_requests)\n for conn in list(self._conns.values())])", "docstring": "Get the number of in-flight requests for a node or all nodes.\n\nArguments:\nnode_id (int, optional): a specific node to check. If unspecified,\nreturn the total for all nodes\n\nReturns:\nint: pending in-flight requests for the node, or all nodes if None", "source": "juraj-google-style"} {"code": "def set_size(self, w, h):\n self.attributes['width'] = str(w)\n self.attributes['height'] = str(h)", "docstring": "Sets the rectangle size.\n\nArgs:\nw (int): width of the rectangle\nh (int): height of the rectangle", "source": "codesearchnet"} {"code": "def validate_stats(stats_path, schema_path, anomalies_path):\n print('Validating schema against the computed statistics.')\n schema = taxi.read_schema(schema_path)\n stats = tfdv.load_statistics(stats_path)\n anomalies = tfdv.validate_statistics(stats, schema)\n print('Detected following anomalies:')\n print(text_format.MessageToString(anomalies))\n print('Writing anomalies to anomalies path.')\n file_io.write_string_to_file(anomalies_path, text_format.MessageToString(anomalies))", "docstring": "Validates the statistics against the schema and materializes anomalies.\n\nArgs:\nstats_path: Location of the stats used to infer the schema.\nschema_path: Location of the schema to be used for validation.\nanomalies_path: Location where the detected anomalies are materialized.", "source": "github-repos"} {"code": "def failed_rows_with_errors(self) -> PCollection[Tuple[str, dict, list]]:\n self.validate([WriteToBigQuery.Method.STREAMING_INSERTS, WriteToBigQuery.Method.STORAGE_WRITE_API], 'FAILED_ROWS_WITH_ERRORS')\n return self._failed_rows_with_errors", "docstring": "A ``[STREAMING_INSERTS, STORAGE_WRITE_API]`` method attribute\n\nReturns:\nA PCollection of rows that failed when inserting to BigQuery,\nalong with their errors.\n\nRaises:\nAttributeError: if accessed with a write method\nbesides ``[STREAMING_INSERTS, STORAGE_WRITE_API]``.", "source": "github-repos"} {"code": "def run(self, dag):\n \n \n for node in dag.op_nodes():\n basic_insts = ['measure', 'reset', 'barrier', 'snapshot']\n if node.name in basic_insts:\n \n \n \n continue\n if node.name in self.basis: \n continue\n\n \n rule = node.op.definition\n if not rule:\n raise QiskitError(\"Cannot unroll the circuit to the given basis, %s. \"\n \"No rule to expand instruction %s.\" %\n (str(self.basis), node.op.name))\n\n \n \n decomposition = DAGCircuit()\n decomposition.add_qreg(rule[0][1][0][0])\n for inst in rule:\n decomposition.apply_operation_back(*inst)\n\n unrolled_dag = self.run(decomposition) \n dag.substitute_node_with_dag(node, unrolled_dag)\n return dag", "docstring": "Expand all op nodes to the given basis.\n\nArgs:\ndag(DAGCircuit): input dag\n\nRaises:\nQiskitError: if unable to unroll given the basis due to undefined\ndecomposition rules (such as a bad basis) or excessive recursion.\n\nReturns:\nDAGCircuit: output unrolled dag", "source": "juraj-google-style"} {"code": "def _make_static_axis_non_negative_list(axis, ndims):\n axis = distribution_util.make_non_negative_axis(axis, ndims)\n axis_const = tf.get_static_value(axis)\n if (axis_const is None):\n raise ValueError(('Expected argument `axis` to be statically available. Found: %s' % axis))\n axis = (axis_const + np.zeros([1], dtype=axis_const.dtype))\n return list((int(dim) for dim in axis))", "docstring": "Convert possibly negatively indexed axis to non-negative list of ints.\n\nArgs:\naxis: Integer Tensor.\nndims: Number of dimensions into which axis indexes.\n\nReturns:\nA list of non-negative Python integers.\n\nRaises:\nValueError: If `axis` is not statically defined.", "source": "codesearchnet"} {"code": "def _dump(self):\n return {'topic': self.topic, 'headers': self._headers, 'id': self.id, 'body': self.body, 'queue': self.queue}", "docstring": "Dump message attributes.\n\nReturns:\ndict: A dictionary of message attributes.", "source": "codesearchnet"} {"code": "def quad_genz_keister_18(order):\n order = sorted(GENZ_KEISTER_18.keys())[order]\n (abscissas, weights) = GENZ_KEISTER_18[order]\n abscissas = numpy.array(abscissas)\n weights = numpy.array(weights)\n weights /= numpy.sum(weights)\n abscissas *= numpy.sqrt(2)\n return (abscissas, weights)", "docstring": "Hermite Genz-Keister 18 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_18(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321 0. 1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "codesearchnet"} {"code": "def _buckets_nearly_equal(a_dist, b_dist):\n \n a_type, a_buckets = _detect_bucket_option(a_dist)\n b_type, b_buckets = _detect_bucket_option(b_dist)\n if a_type != b_type:\n return False\n elif a_type == u'linearBuckets':\n return _linear_buckets_nearly_equal(a_buckets, b_buckets)\n elif a_type == u'exponentialBuckets':\n return _exponential_buckets_nearly_equal(a_buckets, b_buckets)\n elif a_type == u'explicitBuckets':\n return _explicit_buckets_nearly_equal(a_buckets, b_buckets)\n else:\n return False", "docstring": "Determines whether two `Distributions` are nearly equal.\n\nArgs:\na_dist (:class:`Distribution`): an instance\nb_dist (:class:`Distribution`): another instance\n\nReturn:\nboolean: `True` if the two instances are approximately equal, otherwise\nFalse", "source": "juraj-google-style"} {"code": "def __init__(self, dist_cls_a, dist_cls_b):\n self._key = (dist_cls_a, dist_cls_b)", "docstring": "Initialize the KL registrar.\n\nArgs:\ndist_cls_a: the class of the first argument of the KL divergence.\ndist_cls_b: the class of the second argument of the KL divergence.", "source": "github-repos"} {"code": "def add_values_to_bundle_safe(connection, bundle, values):\n \n for value in values:\n try:\n connection.addValueToBundle(bundle, value)\n except YouTrackException as e:\n if e.response.status == 409:\n print(\"Value with name [ %s ] already exists in bundle [ %s ]\" %\n (utf8encode(value.name), utf8encode(bundle.name)))\n else:\n raise e", "docstring": "Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.\n\nArgs:\nconnection: An opened Connection instance.\nbundle: Bundle instance to add values in.\nvalues: Values, that should be added in bundle.\n\nRaises:\nYouTrackException: if something is wrong with queries.", "source": "juraj-google-style"} {"code": "def plot_script(self, script):\n \n\n script.plot([self.matplotlibwidget_1.figure, self.matplotlibwidget_2.figure])\n self.matplotlibwidget_1.draw()\n self.matplotlibwidget_2.draw()", "docstring": "Calls the plot function of the script, and redraws both plots\nArgs:\nscript: script to be plotted", "source": "juraj-google-style"} {"code": "def set_servo_position(self, goalposition, goaltime, led):\n \n goalposition_msb = int(goalposition) >> 8\n goalposition_lsb = int(goalposition) & 0xff\n\n data = []\n data.append(0x0C)\n data.append(self.servoid)\n data.append(I_JOG_REQ)\n data.append(goalposition_lsb)\n data.append(goalposition_msb)\n data.append(led)\n data.append(self.servoid)\n data.append(goaltime)\n send_data(data)", "docstring": "Set the position of Herkulex\n\nEnable torque using torque_on function before calling this\n\nArgs:\n\ngoalposition (int): The desired position, min-0 & max-1023\ngoaltime (int): the time taken to move from present\nposition to goalposition\nled (int): the LED color\n0x00 LED off\n0x04 GREEN\n0x08 BLUE\n0x10 RED", "source": "juraj-google-style"} {"code": "def __init__(self, counter_name, delta=1):\n \n self.counter_name = counter_name\n self.delta = delta", "docstring": "Constructor.\n\nArgs:\ncounter_name: name of the counter as string\ndelta: increment delta as int.", "source": "juraj-google-style"} {"code": "def tent_transform(value: types.FloatTensor) -> types.FloatTensor:\n return tf.where(value < 0.5, 2 * value, 2 * (1 - value))", "docstring": "Returns the tent transform of a given `Tensor`.\n\n#### Examples\n\n```python\nimport tensorflow as tf\nimport tf_quant_finance as tff\n\n# Example: Commputing the tent transform of a given vector.\n\ntff.math.qmc.utils.tent_transform(tf.constant([0, .2, .4, .6, .8, 1]))\n# ==> tf.Tensor([0, .4, .8, .8, .4, 0.], shape=(4,), dtype=float32)\n```\n\nArgs:\nvalue: Scalar `Tensor` of real values in the `[0, 1)` range.\n\nReturns:\n`Tensor` with the same `shape` as `value` equal to `2 ** value` if `value`\nis less than `0.5` or `2 * (1 - value)` otherwise.", "source": "github-repos"} {"code": "def list_media_services(access_token, subscription_id):\n \n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])\n return do_get(endpoint, access_token)", "docstring": "List the media services in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"} {"code": "def icon_description(self, **kwargs):\n params = {'language': util.language_code(kwargs.get('lang'))}\n result = self.make_request('icon_description', {}, **params)\n if (not util.check_result(result)):\n return (False, result.get('message', 'UNKNOWN ERROR'))\n values = util.response_list(result, 'Data')\n return (True, [emtype.IconDescription(**a) for a in values])", "docstring": "Obtain a list of elements that have an associated icon.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[IconDescription]), or\nmessage string in case of error.", "source": "codesearchnet"} {"code": "def has_succeed(self):\n status_code = self._response.status_code\n if (status_code in [HTTP_CODE_ZERO, HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY, HTTP_CODE_MULTIPLE_CHOICES]):\n return True\n if (status_code in [HTTP_CODE_BAD_REQUEST, HTTP_CODE_UNAUTHORIZED, HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_NOT_FOUND, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_CONNECTION_TIMEOUT, HTTP_CODE_CONFLICT, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_INTERNAL_SERVER_ERROR, HTTP_CODE_SERVICE_UNAVAILABLE]):\n return False\n raise Exception('Unknown status code %s.', status_code)", "docstring": "Check if the connection has succeed\n\nReturns:\nReturns True if connection has succeed.\nFalse otherwise.", "source": "codesearchnet"} {"code": "def train_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None):\n inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)\n outs, total_loss, output_losses, masks = _process_single_batch(model, inputs, targets, sample_weights=sample_weights, training=True, output_loss_metrics=output_loss_metrics)\n if not isinstance(outs, list):\n outs = [outs]\n metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks)\n total_loss = nest.flatten(total_loss)\n return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}", "docstring": "Calculates the loss and gradient updates for one input batch.\n\nArgs:\nmodel: Model whose loss has to be calculated.\ninputs: Input batch data.\ntargets: Target batch data.\nsample_weights: Sample weight batch data.\noutput_loss_metrics: List of metrics that are used to aggregated output\nloss values.\n\nReturns:\nDict with three items:\n'total_loss': list with a single tensor for overall loss,\n'output_losses': list of tensors for loss corresponding to each of the\nmodel output. Could be a empty list when model has only one output.\n'metrics': list of tensors for metric specified.", "source": "github-repos"} {"code": "def loads(s, single=False):\n corpus = etree.fromstring(s)\n if single:\n ds = _deserialize_dmrs(next(iter(corpus)))\n else:\n ds = (_deserialize_dmrs(dmrs_elem) for dmrs_elem in corpus)\n return ds", "docstring": "Deserialize DMRX string representations\n\nArgs:\ns (str): a DMRX string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of Xmrs objects (unless *single* is `True`)", "source": "codesearchnet"} {"code": "def verify_reset_restored_iterator(self, ds_fn, num_outputs, break_point=None, sparse_tensors=False, verify_exhausted=True, assert_items_equal=False):\n if context.executing_eagerly():\n self.skipTest('Eager mode iteration do not support re-initialization.')\n break_point = num_outputs \n expected = self.gen_outputs(ds_fn, [], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted)\n self.gen_outputs(ds_fn, [], break_point, sparse_tensors=sparse_tensors, verify_exhausted=False)\n actual = []\n with ops.Graph().as_default() as g:\n saver = self._import_meta_graph()\n init_op, get_next_op = self._get_iterator_ops_from_collection(ds_fn, sparse_tensors=sparse_tensors)\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n self._initialize(init_op, sess)\n self._restore(saver, sess)\n self._initialize(init_op, sess)\n for _ in range(num_outputs):\n actual.append(sess.run(get_next_op))\n if verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n self.match(expected, actual, assert_items_equal=assert_items_equal)", "docstring": "Attempts to re-initialize a restored iterator.\n\nThis is useful when restoring a training checkpoint during validation.\n\nArgs:\nds_fn: 0-argument function that returns a Dataset.\nnum_outputs: Total number of outputs expected from this Dataset.\nbreak_point: Break point. Optional. Defaults to num_outputs/2.\nsparse_tensors: Whether dataset is built from SparseTensor(s).\nverify_exhausted: Whether to verify that the iterator has been exhausted\nafter producing `num_outputs` elements.\nassert_items_equal: Tests the output has the expected elements regardless\nof order.\n\nRaises:\nAssertionError if any test fails.", "source": "github-repos"} {"code": "class RandomInvert(BaseImagePreprocessingLayer):\n _USE_BASE_FACTOR = False\n _FACTOR_BOUNDS = (0, 1)\n\n def __init__(self, factor=1.0, value_range=(0, 255), seed=None, data_format=None, **kwargs):\n super().__init__(data_format=data_format, **kwargs)\n self._set_factor(factor)\n self.value_range = value_range\n self.seed = seed\n self.generator = self.backend.random.SeedGenerator(seed)\n\n def get_random_transformation(self, data, training=True, seed=None):\n if not training:\n return None\n if isinstance(data, dict):\n images = data['images']\n else:\n images = data\n seed = seed or self._get_seed_generator(self.backend._backend)\n images_shape = self.backend.shape(images)\n rank = len(images_shape)\n if rank == 3:\n batch_size = 1\n elif rank == 4:\n batch_size = images_shape[0]\n else:\n raise ValueError(f'Expected the input image to be rank 3 or 4. Received inputs.shape={images_shape}')\n invert_probability = self.backend.random.uniform(shape=(batch_size,), minval=self.factor[0], maxval=self.factor[1], seed=seed)\n random_threshold = self.backend.random.uniform(shape=(batch_size,), minval=0, maxval=1, seed=seed)\n apply_inversion = random_threshold < invert_probability\n return {'apply_inversion': apply_inversion}\n\n def transform_images(self, images, transformation, training=True):\n if training:\n images = self.backend.cast(images, self.compute_dtype)\n apply_inversion = transformation['apply_inversion']\n return self.backend.numpy.where(apply_inversion[:, None, None, None], self.value_range[1] - images, images)\n return images\n\n def transform_labels(self, labels, transformation, training=True):\n return labels\n\n def transform_bounding_boxes(self, bounding_boxes, transformation, training=True):\n return bounding_boxes\n\n def transform_segmentation_masks(self, segmentation_masks, transformation, training=True):\n return segmentation_masks\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'factor': self.factor, 'value_range': self.value_range, 'seed': self.seed}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Preprocessing layer for random inversion of image colors.\n\nThis layer randomly inverts the colors of input images with a specified\nprobability range. When applied, each image has a chance of having its\ncolors inverted, where the pixel values are transformed to their\ncomplementary values. Images that are not selected for inversion\nremain unchanged.\n\nArgs:\nfactor: A single float or a tuple of two floats.\n`factor` controls the probability of inverting the image colors.\nIf a tuple is provided, the value is sampled between the two values\nfor each image, where `factor[0]` is the minimum and `factor[1]` is\nthe maximum probability. If a single float is provided, a value\nbetween `0.0` and the provided float is sampled.\nDefaults to `(0, 1)`.\nvalue_range: a tuple or a list of two elements. The first value\nrepresents the lower bound for values in passed images, the second\nrepresents the upper bound. Images passed to the layer should have\nvalues within `value_range`. Defaults to `(0, 255)`.\nseed: Integer. Used to create a random seed.", "source": "github-repos"} {"code": "def scroll(self, x, y):\n assert isinstance(x, _INTTYPES), ('x must be an integer, got %s' % repr(x))\n assert isinstance(y, _INTTYPES), ('y must be an integer, got %s' % repr(x))\n\n def getSlide(x, length):\n 'get the parameters needed to scroll the console in the given\\n direction with x\\n returns (x, length, srcx)\\n '\n if (x > 0):\n srcx = 0\n length -= x\n elif (x < 0):\n srcx = abs(x)\n x = 0\n length -= srcx\n else:\n srcx = 0\n return (x, length, srcx)\n\n def getCover(x, length):\n 'return the (x, width) ranges of what is covered and uncovered'\n cover = (0, length)\n uncover = None\n if (x > 0):\n cover = (x, (length - x))\n uncover = (0, x)\n elif (x < 0):\n x = abs(x)\n cover = (0, (length - x))\n uncover = ((length - x), x)\n return (cover, uncover)\n (width, height) = self.get_size()\n if ((abs(x) >= width) or (abs(y) >= height)):\n return self.clear()\n (coverX, uncoverX) = getCover(x, width)\n (coverY, uncoverY) = getCover(y, height)\n (x, width, srcx) = getSlide(x, width)\n (y, height, srcy) = getSlide(y, height)\n self.blit(self, x, y, width, height, srcx, srcy)\n if uncoverX:\n self.draw_rect(uncoverX[0], coverY[0], uncoverX[1], coverY[1], 32, self._fg, self._bg)\n if uncoverY:\n self.draw_rect(coverX[0], uncoverY[0], coverX[1], uncoverY[1], 32, self._fg, self._bg)\n if (uncoverX and uncoverY):\n self.draw_rect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1], 32, self._fg, self._bg)", "docstring": "Scroll the contents of the console in the direction of x,y.\n\nUncovered areas will be cleared to the default background color.\nDoes not move the virutal cursor.\n\nArgs:\nx (int): Distance to scroll along the x-axis.\ny (int): Distance to scroll along the y-axis.\n\nReturns:\nIterator[Tuple[int, int]]: An iterator over the (x, y) coordinates\nof any tile uncovered after scrolling.\n\n.. seealso:: :any:`set_colors`", "source": "codesearchnet"} {"code": "def clean_decodes(ids, vocab_size, eos_id=1):\n ret = []\n for i in ids:\n if (i == eos_id):\n break\n if (i >= vocab_size):\n break\n ret.append(int(i))\n return ret", "docstring": "Stop at EOS or padding or OOV.\n\nArgs:\nids: a list of integers\nvocab_size: an integer\neos_id: EOS id\n\nReturns:\na list of integers", "source": "codesearchnet"} {"code": "def pivot_and_annotate(self, values, gpl, annotation_column, gpl_on='ID', gsm_on='ID_REF'):\n if isinstance(gpl, GPL):\n annotation_table = gpl.table\n elif isinstance(gpl, DataFrame):\n annotation_table = gpl\n else:\n raise TypeError('gpl should be a GPL object or a pandas.DataFrame')\n pivoted_samples = self.pivot_samples(values=values, index=gsm_on)\n ndf = pivoted_samples.reset_index().merge(annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on).set_index(gsm_on)\n del ndf[gpl_on]\n ndf.columns.name = 'name'\n return ndf", "docstring": "Annotate GSM with provided GPL.\n\nArgs:\nvalues (:obj:`str`): Column to use as values eg. \"VALUES\"\ngpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or\nDataFrame to annotate with.\nannotation_column (:obj:`str`): Column in table for annotation.\ngpl_on (:obj:`str`, optional): Use this column in GPL to merge.\nDefaults to \"ID\".\ngsm_on (:obj:`str`, optional): Use this column in GSM to merge.\nDefaults to \"ID_REF\".\n\nReturns:\npandas.DataFrame: Pivoted and annotated table of results", "source": "codesearchnet"} {"code": "def __init__(self, num_packs=1):\n if num_packs < 0:\n raise ValueError('NCCL all-reduce requires num_packs >= 0, but {} is specified'.format(num_packs))\n super(NcclAllReduce, self).__init__(all_reduce_alg='nccl', num_packs=num_packs)", "docstring": "Initializes the object.\n\nArgs:\nnum_packs: a non-negative integer. The number of packs to split values\ninto. If zero, no packing will be done.\n\nRaises:\nValueError: if `num_packs` is negative.", "source": "github-repos"} {"code": "def _create_initial_state(self, initial_ids, initial_cache):\n \n \n cur_index = tf.constant(0)\n\n \n alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)\n alive_seq = tf.expand_dims(alive_seq, axis=2)\n\n \n \n initial_log_probs = tf.constant(\n [[0.] + [-float(\"inf\")] * (self.beam_size - 1)])\n alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])\n\n \n \n alive_cache = nest.map_structure(\n lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)\n\n \n finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)\n\n \n finished_scores = tf.ones([self.batch_size, self.beam_size]) * -INF\n\n \n finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)\n\n \n state = {\n _StateKeys.CUR_INDEX: cur_index,\n _StateKeys.ALIVE_SEQ: alive_seq,\n _StateKeys.ALIVE_LOG_PROBS: alive_log_probs,\n _StateKeys.ALIVE_CACHE: alive_cache,\n _StateKeys.FINISHED_SEQ: finished_seq,\n _StateKeys.FINISHED_SCORES: finished_scores,\n _StateKeys.FINISHED_FLAGS: finished_flags\n }\n\n \n \n \n \n \n state_shape_invariants = {\n _StateKeys.CUR_INDEX: tf.TensorShape([]),\n _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]),\n _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]),\n _StateKeys.ALIVE_CACHE: nest.map_structure(\n _get_shape_keep_last_dim, alive_cache),\n _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]),\n _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]),\n _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])\n }\n\n return state, state_shape_invariants", "docstring": "Return initial state dictionary and its shape invariants.\n\nArgs:\ninitial_ids: initial ids to pass into the symbols_to_logits_fn.\nint tensor with shape [batch_size, 1]\ninitial_cache: dictionary storing values to be passed into the\nsymbols_to_logits_fn.\n\nReturns:\nstate and shape invariant dictionaries with keys from _StateKeys", "source": "juraj-google-style"} {"code": "def variables(self):\n current_graph = ops.get_default_graph()\n\n def _from_current_graph(variable):\n if variable._in_graph_mode:\n return variable.op.graph is current_graph\n else:\n return variable._graph_key == current_graph._graph_key\n optimizer_variables = [v for v in self._non_slot_variables() if _from_current_graph(v)]\n for _, variable_dict in self._slots.items():\n for _, slot_for_variable in variable_dict.items():\n if _from_current_graph(slot_for_variable):\n optimizer_variables.append(slot_for_variable)\n return sorted(optimizer_variables, key=lambda v: v.name)", "docstring": "A list of variables which encode the current state of `Optimizer`.\n\nIncludes slot variables and additional global variables created by the\noptimizer in the current default graph.\n\nReturns:\nA list of variables.", "source": "github-repos"} {"code": "def _get_bond_data(line):\n line = line.split()\n length = float(line[2])\n sites = line[0].replace('/', '-').split('-')\n site_indices = tuple(((int(ind) - 1) for ind in sites[1:4:2]))\n species = tuple((re.split('\\\\d+', spec)[0] for spec in sites[0:3:2]))\n label = ('%s%d-%s%d' % (species[0], (site_indices[0] + 1), species[1], (site_indices[1] + 1)))\n return (label, length, site_indices)", "docstring": "Subroutine to extract bond label, site indices, and length from\na COPL header line. The site indices are zero-based, so they\ncan be easily used with a Structure object.\n\nExample header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.\n\nArgs:\nline: line in the COHPCAR header describing the bond.\n\nReturns:\nThe bond label, the bond length and a tuple of the site\nindices.", "source": "codesearchnet"} {"code": "def get_dns_zone_ids(env='dev', facing='internal'):\n client = boto3.Session(profile_name=env).client('route53')\n zones = client.list_hosted_zones_by_name(DNSName='.'.join([env, DOMAIN]))\n zone_ids = []\n for zone in zones['HostedZones']:\n LOG.debug('Found Hosted Zone: %s', zone)\n if ((facing == 'external') or zone['Config']['PrivateZone']):\n LOG.info('Using %(Id)s for \"%(Name)s\", %(Config)s', zone)\n zone_ids.append(zone['Id'])\n LOG.debug('Zone IDs: %s', zone_ids)\n return zone_ids", "docstring": "Get Route 53 Hosted Zone IDs for _env_.\n\nArgs:\nenv (str): Deployment environment.\nfacing (str): Type of ELB, external or internal.\n\nReturns:\nlist: Hosted Zone IDs for _env_. Only *PrivateZone* when _facing_ is\ninternal.", "source": "codesearchnet"} {"code": "def _mutation(candidate, rate=0.1):\n sample_index = np.random.choice(len(candidate))\n sample = candidate[sample_index]\n idx_list = []\n for i in range(int(max((len(sample) * rate), 1))):\n idx = np.random.choice(len(sample))\n idx_list.append(idx)\n field = sample[idx]\n field[np.argmax(field)] = 0\n bit = np.random.choice(field.shape[0])\n field[bit] = 1\n logger.info((LOGGING_PREFIX + 'Perform mutation on %sth at index=%s'), sample_index, str(idx_list))\n return sample", "docstring": "Perform mutation action to candidates.\n\nFor example, randomly change 10% of original sample\n\nArgs:\ncandidate: List of candidate genes (encodings).\nrate: Percentage of mutation bits\n\nExamples:\n>>> # Genes that represent 3 parameters\n>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])\n>>> new_gene = _mutation([gene1])\n>>> # new_gene could be the gene1 with the 3rd parameter changed\n>>> # new_gene[0] = gene1[0]\n>>> # new_gene[1] = gene1[1]\n>>> # new_gene[2] = [0, 1] != gene1[2]\n\nReturns:\nNew gene (encoding)", "source": "codesearchnet"} {"code": "def build_listen(self, listen_node):\n \n proxy_name = listen_node.listen_header.proxy_name.text\n service_address_node = listen_node.listen_header.service_address\n\n \n config_block_lines = self.__build_config_block(\n listen_node.config_block)\n\n \n host, port = '', ''\n if isinstance(service_address_node, pegnode.ServiceAddress):\n host = service_address_node.host.text\n port = service_address_node.port.text\n else:\n \n \n for line in config_block_lines:\n if isinstance(line, config.Bind):\n host, port = line.host, line.port\n break\n else:\n raise Exception(\n 'Not specify host and port in `listen` definition')\n return config.Listen(\n name=proxy_name, host=host, port=port,\n config_block=config_block_lines)", "docstring": "parse `listen` sections, and return a config.Listen\n\nArgs:\nlisten_node (TreeNode): Description\n\nReturns:\nconfig.Listen: an object", "source": "juraj-google-style"} {"code": "def parent_index(self, relations=None):\n g = None\n if (relations is None):\n g = self.get_graph()\n else:\n g = self.get_filtered_graph(relations)\n l = []\n for n in g:\n l.append(([n] + list(g.predecessors(n))))\n return l", "docstring": "Returns a mapping of nodes to all direct parents\n\nArguments\n---------\nrelations : list[str]\nlist of relations used to filter\n\nReturns:\nlist\nlist of lists [[CLASS_1, PARENT_1,1, ..., PARENT_1,N], [CLASS_2, PARENT_2,1, PARENT_2,2, ... ] ... ]", "source": "codesearchnet"} {"code": "def remove(text, exclude):\n exclude = ''.join((str(symbol) for symbol in exclude))\n return text.translate(str.maketrans('', '', exclude))", "docstring": "Remove ``exclude`` symbols from ``text``.\n\nExample:\n>>> remove(\"example text\", string.whitespace)\n'exampletext'\n\nArgs:\ntext (str): The text to modify\nexclude (iterable): The symbols to exclude\n\nReturns:\n``text`` with ``exclude`` symbols removed", "source": "codesearchnet"} {"code": "def _ParseEntryArrayObject(self, file_object, file_offset):\n entry_array_object_map = self._GetDataTypeMap('systemd_journal_entry_array_object')\n try:\n (entry_array_object, _) = self._ReadStructureFromFileObject(file_object, file_offset, entry_array_object_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError('Unable to parse entry array object at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n if (entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY):\n raise errors.ParseError('Unsupported object type: {0:d}.'.format(entry_array_object.object_type))\n if (entry_array_object.object_flags != 0):\n raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(entry_array_object.object_flags))\n return entry_array_object", "docstring": "Parses an entry array object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the entry array object relative to the start\nof the file-like object.\n\nReturns:\nsystemd_journal_entry_array_object: entry array object.\n\nRaises:\nParseError: if the entry array object cannot be parsed.", "source": "codesearchnet"} {"code": "def merge_tags(left, right, factory=Tags):\n if isinstance(left, Mapping):\n tags = dict(left)\n elif hasattr(left, 'tags'):\n tags = _tags_to_dict(left.tags)\n else:\n tags = _tags_to_dict(left)\n if isinstance(right, Mapping):\n tags.update(right)\n elif hasattr(left, 'tags'):\n tags.update(_tags_to_dict(right.tags))\n else:\n tags.update(_tags_to_dict(right))\n return factory(**tags)", "docstring": "Merge two sets of tags into a new troposphere object\n\nArgs:\nleft (Union[dict, troposphere.Tags]): dictionary or Tags object to be\nmerged with lower priority\nright (Union[dict, troposphere.Tags]): dictionary or Tags object to be\nmerged with higher priority\nfactory (type): Type of object to create. Defaults to the troposphere\nTags class.", "source": "codesearchnet"} {"code": "def write_uint32(self, value, little_endian=True):\n if little_endian:\n endian = '<'\n else:\n endian = '>'\n return self.pack(('%sI' % endian), value)", "docstring": "Pack the value as an unsigned integer and write 4 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"} {"code": "def _ParseItems(self, parser_mediator, msiecf_file):\n \n format_version = msiecf_file.format_version\n\n decode_error = False\n cache_directories = []\n for cache_directory_name in iter(msiecf_file.cache_directories):\n try:\n cache_directory_name = cache_directory_name.decode('ascii')\n except UnicodeDecodeError:\n decode_error = True\n cache_directory_name = cache_directory_name.decode(\n 'ascii', errors='replace')\n\n cache_directories.append(cache_directory_name)\n\n if decode_error:\n parser_mediator.ProduceExtractionWarning((\n 'unable to decode cache directory names. Characters that cannot '\n 'be decoded will be replaced with \"?\" or \"\\\\ufffd\".'))\n\n for item_index in range(0, msiecf_file.number_of_items):\n try:\n msiecf_item = msiecf_file.get_item(item_index)\n if isinstance(msiecf_item, pymsiecf.leak):\n self._ParseLeak(parser_mediator, cache_directories, msiecf_item)\n\n elif isinstance(msiecf_item, pymsiecf.redirected):\n self._ParseRedirected(parser_mediator, msiecf_item)\n\n elif isinstance(msiecf_item, pymsiecf.url):\n self._ParseUrl(\n parser_mediator, format_version, cache_directories, msiecf_item)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to parse item: {0:d} with error: {1!s}'.format(\n item_index, exception))\n\n for item_index in range(0, msiecf_file.number_of_recovered_items):\n try:\n msiecf_item = msiecf_file.get_recovered_item(item_index)\n if isinstance(msiecf_item, pymsiecf.leak):\n self._ParseLeak(\n parser_mediator, cache_directories, msiecf_item, recovered=True)\n\n elif isinstance(msiecf_item, pymsiecf.redirected):\n self._ParseRedirected(parser_mediator, msiecf_item, recovered=True)\n\n elif isinstance(msiecf_item, pymsiecf.url):\n self._ParseUrl(\n parser_mediator, format_version, cache_directories, msiecf_item,\n recovered=True)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to parse recovered item: {0:d} with error: {1!s}'.format(\n item_index, exception))", "docstring": "Parses a MSIE Cache File (MSIECF) items.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmsiecf_file (pymsiecf.file): MSIECF file.", "source": "juraj-google-style"} {"code": "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n if isinstance(graph, func_graph.FuncGraph) or isinstance(graph, function._FuncGraph):\n logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. Ignoring tracing.')\n return tensor_fetches\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n self._parameters = tensor_tracer_flags.TTParameters()\n self._tt_config.device_type = _DEVICE_TYPE_CPU\n self._tt_config.num_replicas = 1\n self._tt_config.num_replicas_per_host = 1\n self._tt_config.num_hosts = 1\n self._replica_id = 0\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path, 'graph_before_tt.pbtxt')\n with graph.as_default():\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches, on_tpu=False)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path, 'graph_after_tt.pbtxt')\n return tensor_fetches", "docstring": "Traces the tensors generated by CPU Ops in a TF graph.\n\nArgs:\ngraph: the graph of Ops executed on the CPU.\ntensor_fetches: a (list,tuple,or a single object) of tensor fetches\nreturned by model_fn given to session.run. Function must be provided\nwith as least one tensor to fetch.\nop_fetches: A list of op fetches returned by model_fn given to\nsession.run. op_fetches and tensor_fetches are used to determine the\nnodes that will be executed. Can be None.\n\nReturns:\ntensor_fetches: an exact copy of tensor_fetches that has additional\ndependencies.", "source": "github-repos"} {"code": "def fitness(self, width, height): \n \n assert(width > 0 and height > 0)\n \n rect, max_rect = self._select_position(width, height)\n if rect is None:\n return None\n\n \n return self._rect_fitness(max_rect, rect.width, rect.height)", "docstring": "Metric used to rate how much space is wasted if a rectangle is placed.\nReturns a value greater or equal to zero, the smaller the value the more\n'fit' is the rectangle. If the rectangle can't be placed, returns None.\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\n\nReturns:\nint, float: Rectangle fitness\nNone: Rectangle can't be placed", "source": "juraj-google-style"} {"code": "def _create_events_writer(self, directory):\n \n total_size = 0\n events_files = self._fetch_events_files_on_disk()\n for file_name in events_files:\n file_path = os.path.join(self._events_directory, file_name)\n total_size += tf.io.gfile.stat(file_path).length\n\n if total_size >= self.total_file_size_cap_bytes:\n \n \n for file_name in events_files:\n if total_size < self.total_file_size_cap_bytes:\n break\n\n file_path = os.path.join(self._events_directory, file_name)\n file_size = tf.io.gfile.stat(file_path).length\n try:\n tf.io.gfile.remove(file_path)\n total_size -= file_size\n logger.info(\n \"Deleted %s because events files take up over %d bytes\",\n file_path, self.total_file_size_cap_bytes)\n except IOError as err:\n logger.error(\"Deleting %s failed: %s\", file_path, err)\n\n \n self._events_file_count += 1\n file_path = \"%s.%d.%d\" % (\n os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),\n time.time(), self._events_file_count)\n logger.info(\"Creating events file %s\", file_path)\n return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))", "docstring": "Creates a new events writer.\n\nArgs:\ndirectory: The directory in which to write files containing events.\n\nReturns:\nA new events writer, which corresponds to a new events file.", "source": "juraj-google-style"} {"code": "def iter_cast(inputs, dst_type, return_type=None):\n \n if not isinstance(inputs, collections_abc.Iterable):\n raise TypeError('inputs must be an iterable object')\n if not isinstance(dst_type, type):\n raise TypeError('\"dst_type\" must be a valid type')\n\n out_iterable = six.moves.map(dst_type, inputs)\n\n if return_type is None:\n return out_iterable\n else:\n return return_type(out_iterable)", "docstring": "Cast elements of an iterable object into some type.\n\nArgs:\ninputs (Iterable): The input object.\ndst_type (type): Destination type.\nreturn_type (type, optional): If specified, the output object will be\nconverted to this type, otherwise an iterator.\n\nReturns:\niterator or specified type: The converted object.", "source": "juraj-google-style"} {"code": "def __init__(\n self, password=None, parent=None, recovery_password=None,\n startup_key=None, **kwargs):\n \n if not parent:\n raise ValueError('Missing parent value.')\n\n super(BDEPathSpec, self).__init__(parent=parent, **kwargs)\n self.password = password\n self.recovery_password = recovery_password\n self.startup_key = startup_key", "docstring": "Initializes a path specification.\n\nNote that the BDE path specification must have a parent.\n\nArgs:\npassword (Optional[str]): password.\nparent (Optional[PathSpec]): parent path specification.\nrecovery_password (Optional[str]): recovery password.\nstartup_key (Optional[str]): name of the startup key file.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"} {"code": "def install_antivirus(version=None, latest=False, synch=False, skip_commit=False):\n if ((not version) and (latest is False)):\n raise CommandExecutionError('Version option must not be none.')\n if (synch is True):\n s = 'yes'\n else:\n s = 'no'\n if (skip_commit is True):\n c = 'yes'\n else:\n c = 'no'\n if (latest is True):\n query = {'type': 'op', 'cmd': '{0}{1}latest'.format(c, s)}\n else:\n query = {'type': 'op', 'cmd': '{0}{1}{2}'.format(c, s, version)}\n return _get_job_results(query)", "docstring": "Install anti-virus packages.\n\nArgs:\nversion(str): The version of the PANOS file to install.\n\nlatest(bool): If true, the latest anti-virus file will be installed.\nThe specified version option will be ignored.\n\nsynch(bool): If true, the anti-virus will synch to the peer unit.\n\nskip_commit(bool): If true, the install will skip committing to the device.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' panos.install_antivirus 8.0.0", "source": "codesearchnet"} {"code": "def answer(self, c, details):\n \n if c in [Settings.SUCCESS, Settings.CREATED, Settings.ACCEPTED]:\n return details\n elif c == Settings.BAD_REQUEST:\n raise ErrAtlasBadRequest(c, details)\n elif c == Settings.UNAUTHORIZED:\n raise ErrAtlasUnauthorized(c, details)\n elif c == Settings.FORBIDDEN:\n raise ErrAtlasForbidden(c, details)\n elif c == Settings.NOTFOUND:\n raise ErrAtlasNotFound(c, details)\n elif c == Settings.METHOD_NOT_ALLOWED:\n raise ErrAtlasMethodNotAllowed(c, details)\n elif c == Settings.CONFLICT:\n raise ErrAtlasConflict(c, details)\n else:\n \n raise ErrAtlasServerErrors(c, details)", "docstring": "Answer will provide all necessary feedback for the caller\n\nArgs:\nc (int): HTTP Code\ndetails (dict): Response payload\n\nReturns:\ndict: Response payload\n\nRaises:\nErrAtlasBadRequest\nErrAtlasUnauthorized\nErrAtlasForbidden\nErrAtlasNotFound\nErrAtlasMethodNotAllowed\nErrAtlasConflict\nErrAtlasServerErrors", "source": "juraj-google-style"} {"code": "def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443, sslflags=0):\n name = six.text_type(name).upper()\n binding_info = _get_binding_info(hostheader, ipaddress, port)\n if (_iisVersion() < 8):\n binding_info = (binding_info.rpartition(':')[0] + ':')\n binding_path = 'IIS:\\\\SslBindings\\\\{0}'.format(binding_info.replace(':', '!'))\n if (sslflags not in _VALID_SSL_FLAGS):\n message = \"Invalid sslflags '{0}' specified. Valid sslflags range: {1}..{2}\".format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[(- 1)])\n raise SaltInvocationError(message)\n current_bindings = list_bindings(site)\n if (binding_info not in current_bindings):\n log.error('Binding not present: %s', binding_info)\n return False\n current_name = None\n for current_binding in current_bindings:\n if (binding_info == current_binding):\n current_name = current_bindings[current_binding]['certificatehash']\n log.debug('Current certificate thumbprint: %s', current_name)\n log.debug('New certificate thumbprint: %s', name)\n if (name == current_name):\n log.debug('Certificate already present for binding: %s', name)\n return True\n certs = _list_certs()\n if (name not in certs):\n log.error('Certificate not present: %s', name)\n return False\n if (_iisVersion() < 8):\n iis7path = binding_path.replace('\\\\*!', '\\\\0.0.0.0!')\n if iis7path.endswith('!'):\n iis7path = iis7path[:(- 1)]\n ps_cmd = ['New-Item', '-Path', \"'{0}'\".format(iis7path), '-Thumbprint', \"'{0}'\".format(name)]\n else:\n ps_cmd = ['New-Item', '-Path', \"'{0}'\".format(binding_path), '-Thumbprint', \"'{0}'\".format(name), '-SSLFlags', '{0}'.format(sslflags)]\n cmd_ret = _srvmgr(ps_cmd)\n if (cmd_ret['retcode'] != 0):\n msg = 'Unable to create certificate binding: {0}\\nError: {1}'.format(name, cmd_ret['stderr'])\n raise CommandExecutionError(msg)\n new_cert_bindings = list_cert_bindings(site)\n if (binding_info not in new_cert_bindings):\n log.error('Binding not present: %s', binding_info)\n return False\n if (name == new_cert_bindings[binding_info]['certificatehash']):\n log.debug('Certificate binding created successfully: %s', name)\n return True\n log.error('Unable to create certificate binding: %s', name)\n return False", "docstring": "Assign a certificate to an IIS Web Binding.\n\n.. versionadded:: 2016.11.0\n\n.. note::\n\nThe web binding that the certificate is being assigned to must already\nexist.\n\nArgs:\nname (str): The thumbprint of the certificate.\nsite (str): The IIS site name.\nhostheader (str): The host header of the binding.\nipaddress (str): The IP address of the binding.\nport (int): The TCP port of the binding.\nsslflags (int): Flags representing certificate type and certificate storage of the binding.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'", "source": "codesearchnet"} {"code": "def _Stat(self, path, ext_attrs=False):\n \n \n local_path = client_utils.CanonicalPathToLocalPath(path)\n result = client_utils.StatEntryFromPath(\n local_path, self.pathspec, ext_attrs=ext_attrs)\n\n \n try:\n result.symlink = utils.SmartUnicode(os.readlink(local_path))\n except (OSError, AttributeError):\n pass\n\n return result", "docstring": "Returns stat information of a specific path.\n\nArgs:\npath: A unicode string containing the path.\next_attrs: Whether the call should also collect extended attributes.\n\nReturns:\na StatResponse proto\n\nRaises:\nIOError when call to os.stat() fails", "source": "juraj-google-style"} {"code": "def _context_callbacks(app, key, original_context=_CONTEXT_MISSING):\n\n def _get_context(dummy_app):\n 'Set the context proxy so that it points to a specific context.\\n '\n _CONTEXT_LOCALS.context = _CONTEXT_LOCALS(key)\n\n def _clear_context(dummy_app):\n 'Remove the context proxy that points to a specific context and\\n restore the original context, if there was one.\\n '\n try:\n del _CONTEXT_LOCALS.context\n except AttributeError:\n pass\n if (original_context is not _CONTEXT_MISSING):\n setattr(_CONTEXT_LOCALS, key, original_context)\n _CONTEXT_CALLBACK_MAP[app] = (_get_context, _clear_context)\n appcontext_pushed.connect(_get_context, app)\n appcontext_popped.connect(_clear_context, app)\n return (_get_context, _clear_context)", "docstring": "Register the callbacks we need to properly pop and push the\napp-local context for a component.\n\nArgs:\napp (flask.Flask): The app who this context belongs to. This is the\nonly sender our Blinker signal will listen to.\nkey (str): The key on ``_CONTEXT_LOCALS`` that this app's context\nlistens to.\n\nKwargs:\noriginal_context (dict): The original context present whenever\nthese callbacks were registered. We will restore the context to\nthis value whenever the app context gets popped.\n\nReturns:\n(function, function): A two-element tuple of the dynamic functions\nwe generated as appcontext callbacks. The first element is the\ncallback for ``appcontext_pushed`` (i.e., get and store the\ncurrent context) and the second element is the callback for\n``appcontext_popped`` (i.e., restore the current context to\nto it's original value).", "source": "codesearchnet"} {"code": "def parse_variant(store, institute_obj, case_obj, variant_obj, update=False, genome_build='37',\n get_compounds = True):\n \n has_changed = False\n compounds = variant_obj.get('compounds', [])\n if compounds and get_compounds:\n \n \n if 'not_loaded' not in compounds[0]:\n new_compounds = store.update_variant_compounds(variant_obj)\n variant_obj['compounds'] = new_compounds\n has_changed = True\n\n \n variant_obj['compounds'] = sorted(variant_obj['compounds'],\n key=lambda compound: -compound['combined_score'])\n\n \n variant_genes = variant_obj.get('genes')\n if variant_genes is not None:\n for gene_obj in variant_genes:\n \n if not gene_obj['hgnc_id']:\n continue\n \n if gene_obj.get('hgnc_symbol') is None:\n hgnc_gene = store.hgnc_gene(gene_obj['hgnc_id'], build=genome_build)\n if not hgnc_gene:\n continue\n has_changed = True\n gene_obj['hgnc_symbol'] = hgnc_gene['hgnc_symbol']\n\n \n \n if update and has_changed:\n variant_obj = store.update_variant(variant_obj)\n\n variant_obj['comments'] = store.events(institute_obj, case=case_obj,\n variant_id=variant_obj['variant_id'], comments=True)\n\n if variant_genes:\n variant_obj.update(get_predictions(variant_genes))\n if variant_obj.get('category') == 'cancer':\n variant_obj.update(get_variant_info(variant_genes))\n\n for compound_obj in compounds:\n compound_obj.update(get_predictions(compound_obj.get('genes', [])))\n\n if isinstance(variant_obj.get('acmg_classification'), int):\n acmg_code = ACMG_MAP[variant_obj['acmg_classification']]\n variant_obj['acmg_classification'] = ACMG_COMPLETE_MAP[acmg_code]\n\n\n \n variant_length = variant_obj.get('length')\n variant_obj['length'] = {100000000000: 'inf', -1: 'n.d.'}.get(variant_length, variant_length)\n if not 'end_chrom' in variant_obj:\n variant_obj['end_chrom'] = variant_obj['chromosome']\n\n return variant_obj", "docstring": "Parse information about variants.\n\n- Adds information about compounds\n- Updates the information about compounds if necessary and 'update=True'\n\nArgs:\nstore(scout.adapter.MongoAdapter)\ninstitute_obj(scout.models.Institute)\ncase_obj(scout.models.Case)\nvariant_obj(scout.models.Variant)\nupdate(bool): If variant should be updated in database\ngenome_build(str)", "source": "juraj-google-style"} {"code": "def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):\n split_image = images_kwargs.get('split_image', None) or self.split_image\n max_image_size = images_kwargs.get('max_image_size', None) or self.max_image_size\n resized_height, resized_width = select_best_resolution((height, width), self.split_resolutions)\n num_patches = 1 if not split_image else resized_height \n return num_patches", "docstring": "A utility that returns number of image patches for a given image size.\n\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nimages_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the image processor.\nReturns:\n`int`: Number of patches per image.", "source": "github-repos"} {"code": "def _get_weights(max_length):\n weights = [1]\n for i in range(1, max_length):\n weights.append(((weights[(i - 1)] * len(_ALPHABET)) + 1))\n weights.reverse()\n return weights", "docstring": "Get weights for each offset in str of certain max length.\n\nArgs:\nmax_length: max length of the strings.\n\nReturns:\nA list of ints as weights.\n\nExample:\nIf max_length is 2 and alphabet is \"ab\", then we have order \"\", \"a\", \"aa\",\n\"ab\", \"b\", \"ba\", \"bb\". So the weight for the first char is 3.", "source": "codesearchnet"} {"code": "def rmdir(path, dir_fd=None):\n \n system = get_instance(path)\n system.remove(system.ensure_dir_path(path))", "docstring": "Remove a directory.\n\nEquivalent to \"os.rmdir\".\n\nArgs:\npath (path-like object): Path or URL.\ndir_fd: directory descriptors;\nsee the os.rmdir() description for how it is interpreted.\nNot supported on cloud storage objects.", "source": "juraj-google-style"} {"code": "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Accumulates statistics for the metric.\n\nNote: This function is executed as a graph function in graph mode.\nThis means:\na) Operations on the same resource are executed in textual order.\nThis should make it easier to do things like add the updated\nvalue of a variable to another, for example.\nb) You don't need to worry about collecting the update ops to execute.\nAll update ops added to the graph by this function will be executed.\nAs a result, code should generally work the same way with graph or\neager execution.\n\nArgs:\n*args:\n**kwargs: A mini-batch of inputs to the Metric.", "source": "github-repos"} {"code": "def plot(self, figure_list):\n \n \n if not self.data:\n return\n\n \n if not self.is_running:\n self._plot_refresh = True\n\n axes_list = self.get_axes_layout(figure_list)\n if self._plot_refresh is True:\n self._plot(axes_list)\n self._plot_refresh = False\n for figure in figure_list:\n if figure.axes:\n figure.set_tight_layout(True)\n else:\n self._update_plot(axes_list)", "docstring": "plots the data contained in self.data, which should be a dictionary or a deque of dictionaries\nfor the latter use the last entry\nArgs:\nfigure_list: list of figure objects that are passed to self.get_axes_layout to get axis objects for plotting", "source": "juraj-google-style"} {"code": "def allzeros(msg):\n d = hex2bin(data(msg))\n if (bin2int(d) > 0):\n return False\n else:\n return True", "docstring": "check if the data bits are all zeros\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"} {"code": "def _config_parser_to_defaultdict(config_parser):\n config = defaultdict(defaultdict)\n for (section, section_content) in config_parser.items():\n if (section != 'DEFAULT'):\n for (option, option_value) in section_content.items():\n config[section][option] = option_value\n return config", "docstring": "Convert a ConfigParser to a defaultdict.\n\nArgs:\nconfig_parser (ConfigParser): A ConfigParser.", "source": "codesearchnet"} {"code": "def _save_sorted_results(self, run_stats, scores, image_count, filename):\n \n with open(filename, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['SubmissionID', 'ExternalTeamId', 'Score',\n 'MedianTime', 'ImageCount'])\n\n def get_second(x):\n \n return x[1]\n for s_id, score in sorted(iteritems(scores),\n key=get_second, reverse=True):\n external_id = self.submissions.get_external_id(s_id)\n stat = run_stats.get(\n s_id, collections.defaultdict(lambda: float('NaN')))\n writer.writerow([s_id, external_id, score,\n stat['median_eval_time'],\n image_count[s_id]])", "docstring": "Saves sorted (by score) results of the evaluation.\n\nArgs:\nrun_stats: dictionary with runtime statistics for submissions,\ncan be generated by WorkPiecesBase.compute_work_statistics\nscores: dictionary mapping submission ids to scores\nimage_count: dictionary with number of images processed by submission\nfilename: output filename", "source": "juraj-google-style"} {"code": "def scale(self, scalar, ignored_terms=None):\n \n\n if ignored_terms is None:\n ignored_terms = set()\n else:\n ignored_terms = {asfrozenset(term) for term in ignored_terms}\n\n for term in self:\n if term not in ignored_terms:\n self[term] *= scalar", "docstring": "Multiply the polynomial by the given scalar.\n\nArgs:\nscalar (number):\nValue to multiply the polynomial by.\n\nignored_terms (iterable, optional):\nBiases associated with these terms are not scaled.", "source": "juraj-google-style"} {"code": "def build_transaction(self, inputs, outputs):\n \n \n inputs = [{'output': '{}:{}'.format(input['txid'], input['vout']),\n 'value': input['amount']} for input in inputs]\n tx = bitcoin.mktx(inputs, outputs)\n return tx", "docstring": "Thin wrapper around ``bitcoin.mktx(inputs, outputs)``\n\nArgs:\ninputs (dict): inputs in the form of\n``{'output': 'txid:vout', 'value': amount in satoshi}``\noutputs (dict): outputs in the form of\n``{'address': to_address, 'value': amount in satoshi}``\nReturns:\ntransaction", "source": "juraj-google-style"} {"code": "def create_epub(self, output_directory, epub_name=None):\n\n def createTOCs_and_ContentOPF():\n for (epub_file, name) in ((self.toc_html, 'toc.html'), (self.toc_ncx, 'toc.ncx'), (self.opf, 'content.opf')):\n epub_file.add_chapters(self.chapters)\n epub_file.write(os.path.join(self.OEBPS_DIR, name))\n\n def create_zip_archive(epub_name):\n try:\n assert (isinstance(epub_name, basestring) or (epub_name is None))\n except AssertionError:\n raise TypeError('epub_name must be string or None')\n if (epub_name is None):\n epub_name = self.title\n epub_name = ''.join([c for c in epub_name if (c.isalpha() or c.isdigit() or (c == ' '))]).rstrip()\n epub_name_with_path = os.path.join(output_directory, epub_name)\n try:\n os.remove(os.path.join(epub_name_with_path, '.zip'))\n except OSError:\n pass\n shutil.make_archive(epub_name_with_path, 'zip', self.EPUB_DIR)\n return (epub_name_with_path + '.zip')\n\n def turn_zip_into_epub(zip_archive):\n epub_full_name = (zip_archive.strip('.zip') + '.epub')\n try:\n os.remove(epub_full_name)\n except OSError:\n pass\n os.rename(zip_archive, epub_full_name)\n return epub_full_name\n createTOCs_and_ContentOPF()\n epub_path = turn_zip_into_epub(create_zip_archive(epub_name))\n return epub_path", "docstring": "Create an epub file from this object.\n\nArgs:\noutput_directory (str): Directory to output the epub file to\nepub_name (Option[str]): The file name of your epub. This should not contain\n.epub at the end. If this argument is not provided, defaults to the title of the epub.", "source": "codesearchnet"} {"code": "def render(self, mode='human'):\n if (mode == 'human'):\n if (self.viewer is None):\n from ._image_viewer import ImageViewer\n if (self.spec is None):\n caption = self._rom_path.split('/')[(- 1)]\n else:\n caption = self.spec.id\n self.viewer = ImageViewer(caption=caption, height=SCREEN_HEIGHT, width=SCREEN_WIDTH)\n self.viewer.show(self.screen)\n elif (mode == 'rgb_array'):\n return self.screen\n else:\n render_modes = [repr(x) for x in self.metadata['render.modes']]\n msg = 'valid render modes are: {}'.format(', '.join(render_modes))\n raise NotImplementedError(msg)", "docstring": "Render the environment.\n\nArgs:\nmode (str): the mode to render with:\n- human: render to the current display\n- rgb_array: Return an numpy.ndarray with shape (x, y, 3),\nrepresenting RGB values for an x-by-y pixel image\n\nReturns:\na numpy array if mode is 'rgb_array', None otherwise", "source": "codesearchnet"} {"code": "def VerifyStructure(self, parser_mediator, lines):\n try:\n structure = self._GDS_LINE.parseString(lines)\n except pyparsing.ParseException as exception:\n logger.debug('Not a Google Drive Sync log file: {0!s}'.format(exception))\n return False\n date_time = dfdatetime_time_elements.TimeElementsInMilliseconds()\n try:\n datetime_iso8601 = self._GetISO8601String(structure.date_time)\n date_time.CopyFromStringISO8601(datetime_iso8601)\n except ValueError as exception:\n logger.debug('Not a Google Drive Sync log file, invalid date/time: {0!s} with error: {1!s}'.format(structure.date_time, exception))\n return False\n return True", "docstring": "Verify that this file is a Google Drive Sync log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nlines (str): one or more lines from the text file.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "codesearchnet"} {"code": "def _average_precision(self, rec, prec):\n mrec = np.concatenate(([0.0], rec, [1.0]))\n mpre = np.concatenate(([0.0], prec, [0.0]))\n for i in range((mpre.size - 1), 0, (- 1)):\n mpre[(i - 1)] = np.maximum(mpre[(i - 1)], mpre[i])\n i = np.where((mrec[1:] != mrec[:(- 1)]))[0]\n ap = np.sum(((mrec[(i + 1)] - mrec[i]) * mpre[(i + 1)]))\n return ap", "docstring": "calculate average precision\n\nParams:\n----------\nrec : numpy.array\ncumulated recall\nprec : numpy.array\ncumulated precision\nReturns:\n----------\nap as float", "source": "codesearchnet"} {"code": "def GetParserObjectByName(cls, parser_name):\n \n parser_class = cls._parser_classes.get(parser_name, None)\n if parser_class:\n return parser_class()\n return None", "docstring": "Retrieves a specific parser object by its name.\n\nArgs:\nparser_name (str): name of the parser.\n\nReturns:\nBaseParser: parser object or None.", "source": "juraj-google-style"} {"code": "def create_video(video_data):\n serializer = VideoSerializer(data=video_data)\n if serializer.is_valid():\n serializer.save()\n return video_data.get('edx_video_id')\n else:\n raise ValCannotCreateError(serializer.errors)", "docstring": "Called on to create Video objects in the database\n\ncreate_video is used to create Video objects whose children are EncodedVideo\nobjects which are linked to Profile objects. This is an alternative to the HTTP\nrequests so it can be used internally. The VideoSerializer is used to\ndeserialize this object. If there are duplicate profile_names, the entire\ncreation will be rejected. If the profile is not found in the database, the\nvideo will not be created.\nArgs:\nvideo_data (dict):\n{\nurl: api url to the video\nedx_video_id: ID of the video\nduration: Length of video in seconds\nclient_video_id: client ID of video\nencoded_video: a list of EncodedVideo dicts\nurl: url of the video\nfile_size: size of the video in bytes\nprofile: ID of the profile\ncourses: Courses associated with this video\nimage: poster image file name for a particular course\n}\n\nRaises:\nRaises ValCannotCreateError if the video cannot be created.\n\nReturns the successfully created Video object", "source": "codesearchnet"} {"code": "def load_from_checkpoint(self, sess, latest_filename=None):\n self._create_initializers()\n if self._save_path:\n ckpt = tf.train.get_checkpoint_state(os.path.dirname(self._save_path), latest_filename)\n if (ckpt and ckpt.all_model_checkpoint_paths):\n self._saver = tf.train.Saver(saver_def=self._saver.as_saver_def())\n self._saver.set_last_checkpoints(list(ckpt.all_model_checkpoint_paths))\n if self._saver.last_checkpoints:\n self._saver.restore(sess, self._saver.last_checkpoints[(- 1)])\n return self._saver.last_checkpoints[(- 1)]\n else:\n return None", "docstring": "Loads the model from the most recent checkpoint.\n\nThis gets the most current list of checkpoints each time it is called.\n\nArgs:\nsess: The current session.\nlatest_filename: The filename for the latest set of checkpoints, defaults\nto 'checkpoints'.\nReturns:\nThe loaded checkpoint or None if it failed to load.", "source": "codesearchnet"} {"code": "def _CalculateDigestHash(self, file_entry, data_stream_name):\n \n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n return None\n\n try:\n file_object.seek(0, os.SEEK_SET)\n\n hasher_object = hashers_manager.HashersManager.GetHasher('sha256')\n\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hasher_object.Update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n\n finally:\n file_object.close()\n\n return hasher_object.GetStringDigest()", "docstring": "Calculates a SHA-256 digest of the contents of the file entry.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry whose content will be hashed.\ndata_stream_name (str): name of the data stream whose content is to be\nhashed.\n\nReturns:\nstr: hexadecimal representation of the SHA-256 hash or None if the digest\ncannot be determined.", "source": "juraj-google-style"} {"code": "def df_categorical_column(category_values, num_rows=100, probabilities=None):\n \n splitter = np.random.choice(range(len(category_values)), num_rows, p=probabilities)\n return pd.Series(pd.Categorical.from_codes(splitter, categories=category_values))", "docstring": "Generate a categorical column with random data\nArgs:\ncategory_values (list): A list of category values (e.g. ['red', 'blue', 'green'])\nnum_rows (int): The number of rows to generate (default = 100)\nprobabilities (list): A list of probabilities of each value (e.g. [0.6, 0.2, 0.2]) (default=None an equal probability)", "source": "juraj-google-style"} {"code": "def getWeights(self, term_i=None):\n assert self.init, 'GP not initialised'\n if (term_i == None):\n if (self.gp.mean.n_terms == 1):\n term_i = 0\n else:\n print('VarianceDecomposition: Specify fixed effect term index')\n return self.gp.mean.B[term_i]", "docstring": "Return weights for fixed effect term term_i\n\nArgs:\nterm_i: fixed effect term index\nReturns:\nweights of the spefied fixed effect term.\nThe output will be a KxL matrix of weights will be returned,\nwhere K is F.shape[1] and L is A.shape[1] of the correspoding fixed effect term\n(L will be always 1 for single-trait analysis).", "source": "codesearchnet"} {"code": "class DisjunctiveConstraint(Constraint):\n\n def __init__(self, nested_token_ids: List[List[int]]):\n super(Constraint, self).__init__()\n if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0:\n raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.')\n if any((not isinstance(token_ids, list) for token_ids in nested_token_ids)):\n raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.')\n if any((any((not isinstance(token_id, int) or token_id < 0 for token_id in token_ids)) for token_ids in nested_token_ids)):\n raise ValueError(f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.')\n self.trie = DisjunctiveTrie(nested_token_ids)\n self.token_ids = nested_token_ids\n self.seqlen = self.trie.max_height\n self.current_seq = []\n self.completed = False\n\n def advance(self):\n token_list = self.trie.next_tokens(self.current_seq)\n if len(token_list) == 0:\n return None\n else:\n return token_list\n\n def does_advance(self, token_id: int):\n if not isinstance(token_id, int):\n raise TypeError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}')\n next_tokens = self.trie.next_tokens(self.current_seq)\n return token_id in next_tokens\n\n def update(self, token_id: int):\n if not isinstance(token_id, int):\n raise TypeError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}')\n stepped = False\n completed = False\n reset = False\n if self.does_advance(token_id):\n self.current_seq.append(token_id)\n stepped = True\n else:\n reset = True\n self.reset()\n completed = self.trie.reached_leaf(self.current_seq)\n self.completed = completed\n return (stepped, completed, reset)\n\n def reset(self):\n self.completed = False\n self.current_seq = []\n\n def remaining(self):\n if self.completed:\n return 0\n else:\n return self.seqlen - len(self.current_seq)\n\n def copy(self, stateful=False):\n new_constraint = DisjunctiveConstraint(self.token_ids)\n if stateful:\n new_constraint.seq_len = self.seqlen\n new_constraint.current_seq = self.current_seq\n new_constraint.completed = self.completed\n return new_constraint", "docstring": "A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints.\n\nArgs:\nnested_token_ids (`List[List[int]]`):\nA list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from\nthe list of words.", "source": "github-repos"} {"code": "def _get_formatted_date(dataset_date, date_format=None):\n if dataset_date:\n if date_format:\n return dataset_date.strftime(date_format)\n else:\n return dataset_date.date().isoformat()\n else:\n return None", "docstring": "Get supplied dataset date as string in specified format.\nIf no format is supplied, an ISO 8601 string is returned.\n\nArgs:\ndataset_date (Optional[datetime.datetime]): dataset date in datetime.datetime format\ndate_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.\n\nReturns:\nOptional[str]: Dataset date string or None if no date is set", "source": "codesearchnet"} {"code": "def resize_annotation(self, annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, interpolation: 'F.InterpolationMode'=None):\n interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST\n ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)]\n new_annotation = {}\n new_annotation['size'] = target_size\n for key, value in annotation.items():\n if key == 'boxes':\n boxes = value\n scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device)\n new_annotation['boxes'] = scaled_boxes\n elif key == 'area':\n area = value\n scaled_area = area * (ratio_width * ratio_height)\n new_annotation['area'] = scaled_area\n elif key == 'masks':\n masks = value[:, None]\n masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks]\n masks = torch.stack(masks).to(torch.float32)\n masks = masks[:, 0] > threshold\n new_annotation['masks'] = masks\n elif key == 'size':\n new_annotation['size'] = target_size\n else:\n new_annotation[key] = value\n return new_annotation", "docstring": "Resizes an annotation to a target size.\n\nArgs:\nannotation (`Dict[str, Any]`):\nThe annotation dictionary.\norig_size (`Tuple[int, int]`):\nThe original size of the input image.\ntarget_size (`Tuple[int, int]`):\nThe target size of the image, as returned by the preprocessing `resize` step.\nthreshold (`float`, *optional*, defaults to 0.5):\nThe threshold used to binarize the segmentation masks.\nresample (`InterpolationMode`, defaults to `InterpolationMode.NEAREST`):\nThe resampling filter to use when resizing the masks.", "source": "github-repos"} {"code": "def _GetCachedFileByPath(self, key_path_upper):\n \n longest_key_path_prefix_upper = ''\n longest_key_path_prefix_length = len(longest_key_path_prefix_upper)\n for key_path_prefix_upper in self._registry_files:\n if key_path_upper.startswith(key_path_prefix_upper):\n key_path_prefix_length = len(key_path_prefix_upper)\n if key_path_prefix_length > longest_key_path_prefix_length:\n longest_key_path_prefix_upper = key_path_prefix_upper\n longest_key_path_prefix_length = key_path_prefix_length\n\n if not longest_key_path_prefix_upper:\n return None, None\n\n registry_file = self._registry_files.get(\n longest_key_path_prefix_upper, None)\n return longest_key_path_prefix_upper, registry_file", "docstring": "Retrieves a cached Windows Registry file for a key path.\n\nArgs:\nkey_path_upper (str): Windows Registry key path, in upper case with\na resolved root key alias.\n\nReturns:\ntuple: consist:\n\nstr: key path prefix\nWinRegistryFile: corresponding Windows Registry file or None if not\navailable.", "source": "juraj-google-style"} {"code": "def patch_on_path(src: symbolic.Symbolic, regex: str, value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:\n regex = re.compile(regex)\n return _conditional_patch(src, lambda k, v, p: regex.match(str(k)), value, value_fn, skip_notification)", "docstring": "Recursively patch values on matched paths.\n\nExample::\n\nd = pg.Dict(a={'x': 1}, b=2)\nprint(pg.patching.patch_on_path(d, '.*x', value=3))\n# {a={x=1}, b=2}\n\nArgs:\nsrc: symbolic value to patch.\nregex: Regex for key path.\nvalue: New value for field that satisfy `condition`.\nvalue_fn: Callable object that produces new value based on old value.\nIf not None, `value` must be None.\nskip_notification: If True, `on_change` event will not be triggered for this\noperation. If None, the behavior is decided by `pg.notify_on_rebind`.\nPlease see `symbolic.Symbolic.rebind` for details.\n\nReturns:\n`src` after being patched.", "source": "github-repos"} {"code": "def set_user(uid=None, username=None, password=None, priv=None, status=None):\n \n\n conf = \"\"\n if not uid:\n raise salt.exceptions.CommandExecutionError(\"The user ID must be specified.\")\n\n if status:\n conf += ' accountStatus=\"{0}\"'.format(status)\n\n if username:\n conf += ' name=\"{0}\"'.format(username)\n\n if priv:\n conf += ' priv=\"{0}\"'.format(priv)\n\n if password:\n conf += ' pwd=\"{0}\"'.format(password)\n\n dn = \"sys/user-ext/user-{0}\".format(uid)\n\n inconfig = .format(uid,\n conf)\n\n ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)\n\n return ret", "docstring": "Sets a CIMC user with specified configurations.\n\n.. versionadded:: 2019.2.0\n\nArgs:\nuid(int): The user ID slot to create the user account in.\n\nusername(str): The name of the user.\n\npassword(str): The clear text password of the user.\n\npriv(str): The privilege level of the user.\n\nstatus(str): The account status of the user.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' cimc.set_user 11 username=admin password=foobar priv=admin active", "source": "juraj-google-style"} {"code": "def forward(self, layer_input):\n bsz, length, emb_size = layer_input.size()\n layer_input = layer_input.reshape(-1, emb_size)\n _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n expert_inputs = layer_input[batch_index]\n hidden_states = self.input_linear(expert_inputs, expert_size)\n chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n expert_outputs = self.output_linear(hidden_states, expert_size)\n expert_outputs = expert_outputs * batch_gates[:, None]\n zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n layer_output = zeros.index_add(0, batch_index, expert_outputs)\n layer_output = layer_output.view(bsz, length, self.input_size)\n layer_output = layer_output + self.bias\n return (layer_output, router_logits)", "docstring": "Forward pass of the mixture of experts layer.\n\nArgs:\nlayer_input (Tensor):\nInput tensor.\n\nReturns:\nTensor:\nOutput tensor.\nTensor:\nRouter logits.", "source": "github-repos"} {"code": "def business_days_in_period(self, date_tensor, period_tensor):\n return self.business_days_between(date_tensor, date_tensor + period_tensor)", "docstring": "Calculates number of business days in a period.\n\nIncludes the dates in `date_tensor`, but excludes final dates resulting from\naddition of `period_tensor`.\n\nArgs:\ndate_tensor: `DateTensor` of starting dates.\nperiod_tensor: PeriodTensor, should be broadcastable to `date_tensor`.\n\nReturns:\nAn int32 Tensor with the number of business days in given periods that\nstart at given dates.", "source": "github-repos"} {"code": "def get_widget_or_404(self):\n field_id = self.kwargs.get('field_id', self.request.GET.get('field_id', None))\n if (not field_id):\n raise Http404('No \"field_id\" provided.')\n try:\n key = signing.loads(field_id)\n except BadSignature:\n raise Http404('Invalid \"field_id\".')\n else:\n cache_key = ('%s%s' % (settings.SELECT2_CACHE_PREFIX, key))\n widget_dict = cache.get(cache_key)\n if (widget_dict is None):\n raise Http404('field_id not found')\n if (widget_dict.pop('url') != self.request.path):\n raise Http404('field_id was issued for the view.')\n (qs, qs.query) = widget_dict.pop('queryset')\n self.queryset = qs.all()\n widget_dict['queryset'] = self.queryset\n widget_cls = widget_dict.pop('cls')\n return widget_cls(**widget_dict)", "docstring": "Get and return widget from cache.\n\nRaises:\nHttp404: If if the widget can not be found or no id is provided.\n\nReturns:\nModelSelect2Mixin: Widget from cache.", "source": "codesearchnet"} {"code": "def CheckPath(self, path, path_segment_separator=None):\n \n if not self._case_sensitive:\n path = path.lower()\n\n if path_segment_separator is None:\n path_segment_separator = self._path_segment_separator\n\n path_segments = path.split(path_segment_separator)\n number_of_path_segments = len(path_segments)\n\n scan_object = self._root_node\n while scan_object:\n if isinstance(scan_object, py2to3.STRING_TYPES):\n break\n\n if scan_object.path_segment_index >= number_of_path_segments:\n scan_object = scan_object.default_value\n continue\n\n path_segment = path_segments[scan_object.path_segment_index]\n scan_object = scan_object.GetScanObject(path_segment)\n\n if not isinstance(scan_object, py2to3.STRING_TYPES):\n return False\n\n filter_path_segments = scan_object.split(self._path_segment_separator)\n return filter_path_segments == path_segments", "docstring": "Checks if a path matches the scan tree-based path filter.\n\nArgs:\npath: a string containing the path.\npath_segment_separator: optional string containing the path segment\nseparator. None defaults to the path segment\nseparator that was set when the path filter\nscan tree was initialized.\n\nReturns:\nA boolean indicating if the path matches the filter.", "source": "juraj-google-style"} {"code": "def _gather_saveables_for_checkpoint(self) -> Dict[str, Callable[..., Any]]:\n\n def _saveable_factory(name=self._common_name):\n saveables = []\n num_shards = len(self.values)\n for shard_id in range(num_shards):\n saveables.append(TPUEmbeddingShardedSaveable(self.values[shard_id], shard_id, num_shards, self.shard_dim, name))\n return saveables\n return {base.VARIABLE_VALUE_KEY: _saveable_factory}", "docstring": "Overrides Trackable method.\n\nReturns:\nA dictionary mapping attribute names to `SaveableObject` factories.", "source": "github-repos"} {"code": "def convert_to_layout_rules(x):\n if isinstance(x, LayoutRules):\n return x\n if isinstance(x, str):\n x = _parse_string_to_list_of_pairs(x)\n return LayoutRules(x)", "docstring": "Converts input to a LayoutRules.\n\nArgs:\nx: LayoutRules, str, or set-like of string pairs.\n\nReturns:\nLayoutRules.", "source": "codesearchnet"} {"code": "def filter_set(self, name):\n \n\n filter_set = filter_sets[name]\n for name, filter in iter(filter_set.filters.items()):\n self.filters[name] = filter\n self.descriptions += filter_set.descriptions", "docstring": "Adds filters from a particular global :class:`FilterSet`.\n\nArgs:\nname (str): The name of the set whose filters should be added.", "source": "juraj-google-style"} {"code": "def execute_task(self, task, workflow_id, data=None):\n \n start_time = datetime.utcnow()\n\n store_doc = DataStore(**self.app.user_options['config'].data_store,\n auto_connect=True).get(workflow_id)\n store_loc = 'log.{}.tasks.{}'.format(task.dag_name, task.name)\n\n def handle_callback(message, event_type, exc=None):\n msg = '{}: {}'.format(message, str(exc)) if exc is not None else message\n\n \n if event_type == JobEventName.Stopped:\n logger.warning(msg)\n elif event_type == JobEventName.Aborted:\n logger.error(msg)\n else:\n logger.info(msg)\n\n current_time = datetime.utcnow()\n\n \n if event_type != JobEventName.Started:\n duration = (current_time - start_time).total_seconds()\n\n store_doc.set(key='{}.end_time'.format(store_loc),\n value=current_time,\n section=DataStoreDocumentSection.Meta)\n\n store_doc.set(key='{}.duration'.format(store_loc),\n value=duration,\n section=DataStoreDocumentSection.Meta)\n else:\n \n store_doc.set(key='{}.start_time'.format(store_loc),\n value=start_time,\n section=DataStoreDocumentSection.Meta)\n\n store_doc.set(key='{}.worker'.format(store_loc),\n value=self.request.hostname,\n section=DataStoreDocumentSection.Meta)\n\n store_doc.set(key='{}.queue'.format(store_loc),\n value=task.queue,\n section=DataStoreDocumentSection.Meta)\n duration = None\n\n \n self.send_event(event_type,\n job_type=JobType.Task,\n name=task.name,\n queue=task.queue,\n time=current_time,\n workflow_id=workflow_id,\n duration=duration)\n\n \n self.update_state(meta={'name': task.name,\n 'queue': task.queue,\n 'type': JobType.Task,\n 'workflow_id': workflow_id})\n\n \n handle_callback('Start task <{}>'.format(task.name), JobEventName.Started)\n\n \n return task._run(\n data=data,\n store=store_doc,\n signal=TaskSignal(Client(\n SignalConnection(**self.app.user_options['config'].signal, auto_connect=True),\n request_key=workflow_id),\n task.dag_name),\n context=TaskContext(task.name, task.dag_name, task.workflow_name,\n workflow_id, self.request.hostname),\n success_callback=partial(handle_callback,\n message='Complete task <{}>'.format(task.name),\n event_type=JobEventName.Succeeded),\n stop_callback=partial(handle_callback,\n message='Stop task <{}>'.format(task.name),\n event_type=JobEventName.Stopped),\n abort_callback=partial(handle_callback,\n message='Abort workflow <{}> by task <{}>'.format(\n task.workflow_name, task.name),\n event_type=JobEventName.Aborted))", "docstring": "Celery task that runs a single task on a worker.\n\nArgs:\nself (Task): Reference to itself, the celery task object.\ntask (BaseTask): Reference to the task object that performs the work\nin its run() method.\nworkflow_id (string): The unique ID of the workflow run that started this task.\ndata (MultiTaskData): An optional MultiTaskData object that contains the data\nthat has been passed down from upstream tasks.", "source": "juraj-google-style"} {"code": "def from_config(cls, config, custom_objects=None):\n with generic_utils.SharedObjectLoadingScope():\n input_tensors, output_tensors, created_layers = reconstruct_from_config(config, custom_objects)\n model = cls(inputs=input_tensors, outputs=output_tensors, name=config.get('name'))\n connect_ancillary_layers(model, created_layers)\n return model", "docstring": "Instantiates a Model from its config (output of `get_config()`).\n\nArgs:\nconfig: Model config dictionary.\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\n\nReturns:\nA model instance.\n\nRaises:\nValueError: In case of improperly formatted config dict.", "source": "github-repos"} {"code": "async def find_movie(self, query):\n \n params = OrderedDict([\n ('query', query), ('include_adult', False),\n ])\n url = self.url_builder('search/movie', {}, params)\n data = await self.get_data(url)\n if data is None:\n return\n return [\n Movie.from_json(item, self.config['data'].get('images'))\n for item in data.get('results', [])\n ]", "docstring": "Retrieve movie data by search query.\n\nArguments:\nquery (:py:class:`str`): Query to search for.\n\nReturns:\n:py:class:`list`: Possible matches.", "source": "juraj-google-style"} {"code": "def dataset_exists(client, dataset_reference):\n \n from google.cloud.exceptions import NotFound\n\n try:\n client.get_dataset(dataset_reference)\n return True\n except NotFound:\n return False", "docstring": "Return if a dataset exists.\n\nArgs:\nclient (google.cloud.bigquery.client.Client):\nA client to connect to the BigQuery API.\ndataset_reference (google.cloud.bigquery.dataset.DatasetReference):\nA reference to the dataset to look for.\n\nReturns:\nbool: ``True`` if the dataset exists, ``False`` otherwise.", "source": "juraj-google-style"} {"code": "def dispose(json_str):\n \n result_str = list(json_str)\n escaped = False\n normal = True\n sl_comment = False\n ml_comment = False\n quoted = False\n\n a_step_from_comment = False\n a_step_from_comment_away = False\n\n former_index = None\n\n for index, char in enumerate(json_str):\n\n if escaped: \n escaped = False\n continue\n\n if a_step_from_comment: \n if char != '/' and char != '*':\n a_step_from_comment = False\n normal = True\n continue\n\n if a_step_from_comment_away: \n if char != '/':\n a_step_from_comment_away = False\n\n if char == '\"':\n if normal and not escaped:\n \n quoted = True\n normal = False\n elif quoted and not escaped:\n \n quoted = False\n normal = True\n\n elif char == '\\\\':\n \n if normal or quoted:\n escaped = True\n\n elif char == '/':\n if a_step_from_comment:\n \n a_step_from_comment = False\n sl_comment = True\n normal = False\n former_index = index - 1\n elif a_step_from_comment_away:\n \n a_step_from_comment_away = False\n normal = True\n ml_comment = False\n for i in range(former_index, index + 1):\n result_str[i] = \"\"\n\n elif normal:\n \n a_step_from_comment = True\n normal = False\n\n elif char == '*':\n if a_step_from_comment:\n \n a_step_from_comment = False\n ml_comment = True\n normal = False\n former_index = index - 1\n elif ml_comment:\n a_step_from_comment_away = True\n elif char == '\\n':\n if sl_comment:\n sl_comment = False\n normal = True\n for i in range(former_index, index + 1):\n result_str[i] = \"\"\n elif char == ']' or char == '}':\n if normal:\n _remove_last_comma(result_str, index)\n\n \n return (\"\" if isinstance(json_str, str) else u\"\").join(result_str)", "docstring": "Clear all comments in json_str.\n\nClear JS-style comments like // and /**/ in json_str.\nAccept a str or unicode as input.\n\nArgs:\njson_str: A json string of str or unicode to clean up comment\n\nReturns:\nstr: The str without comments (or unicode if you pass in unicode)", "source": "juraj-google-style"} {"code": "def _PrintPreprocessingInformation(self, storage_reader, session_number=None):\n knowledge_base_object = knowledge_base.KnowledgeBase()\n storage_reader.ReadPreprocessingInformation(knowledge_base_object)\n system_configuration = knowledge_base_object.GetSystemConfigurationArtifact(session_identifier=session_number)\n if (not system_configuration):\n return\n title = 'System configuration'\n table_view = views.ViewsFactory.GetTableView(self._views_format_type, title=title)\n hostname = 'N/A'\n if system_configuration.hostname:\n hostname = system_configuration.hostname.name\n operating_system = (system_configuration.operating_system or 'N/A')\n operating_system_product = (system_configuration.operating_system_product or 'N/A')\n operating_system_version = (system_configuration.operating_system_version or 'N/A')\n code_page = (system_configuration.code_page or 'N/A')\n keyboard_layout = (system_configuration.keyboard_layout or 'N/A')\n time_zone = (system_configuration.time_zone or 'N/A')\n table_view.AddRow(['Hostname', hostname])\n table_view.AddRow(['Operating system', operating_system])\n table_view.AddRow(['Operating system product', operating_system_product])\n table_view.AddRow(['Operating system version', operating_system_version])\n table_view.AddRow(['Code page', code_page])\n table_view.AddRow(['Keyboard layout', keyboard_layout])\n table_view.AddRow(['Time zone', time_zone])\n table_view.Write(self._output_writer)\n title = 'User accounts'\n table_view = views.ViewsFactory.GetTableView(self._views_format_type, column_names=['Username', 'User directory'], title=title)\n for user_account in system_configuration.user_accounts:\n table_view.AddRow([user_account.username, user_account.user_directory])\n table_view.Write(self._output_writer)", "docstring": "Prints the details of the preprocessing information.\n\nArgs:\nstorage_reader (StorageReader): storage reader.\nsession_number (Optional[int]): session number.", "source": "codesearchnet"} {"code": "def get_common_register(start, end):\n \n registers = defaultdict(int)\n for line in lines(start, end):\n insn = line.insn\n\n for operand in insn.operands:\n\n if not operand.type.has_phrase:\n continue\n\n if not operand.base:\n continue\n\n register_name = operand.base\n registers[register_name] += 1\n\n return max(registers.iteritems(), key=operator.itemgetter(1))[0]", "docstring": "Get the register most commonly used in accessing structs.\n\nAccess to is considered for every opcode that accesses memory\nin an offset from a register::\n\nmov eax, [ebx + 5]\n\nFor every access, the struct-referencing registers, in this case\n`ebx`, are counted. The most used one is returned.\n\nArgs:\nstart: The adderss to start at\nend: The address to finish at", "source": "juraj-google-style"} {"code": "def mme_delete(case_obj, mme_base_url, mme_token):\n server_responses = []\n if ((not mme_base_url) or (not mme_token)):\n return 'Please check that Matchmaker connection parameters are valid'\n for patient in case_obj['mme_submission']['patients']:\n patient_id = patient['id']\n url = ''.join([mme_base_url, '/patient/delete/', patient_id])\n resp = matchmaker_request(url=url, token=mme_token, method='DELETE')\n server_responses.append({'patient_id': patient_id, 'message': resp.get('message'), 'status_code': resp.get('status_code')})\n return server_responses", "docstring": "Delete all affected samples for a case from MatchMaker\n\nArgs:\ncase_obj(dict) a scout case object\nmme_base_url(str) base url of the MME server\nmme_token(str) auth token of the MME server\n\nReturns:\nserver_responses(list): a list of object of this type:\n{\n'patient_id': patient_id\n'message': server_message,\n'status_code': server_status_code\n}", "source": "codesearchnet"} {"code": "def segment_to_vector(self, seg):\n ft_dict = {ft: val for (val, ft) in self.fts(seg)}\n return [ft_dict[name] for name in self.names]", "docstring": "Given a Unicode IPA segment, return a list of feature specificiations\nin cannonical order.\n\nArgs:\nseg (unicode): IPA consonant or vowel\n\nReturns:\nlist: feature specifications ('+'/'-'/'0') in the order from\n`FeatureTable.names`", "source": "codesearchnet"} {"code": "def measure(*qubits: raw_types.Qid, key: Optional[str]=None, invert_mask: Tuple[(bool, ...)]=()) -> gate_operation.GateOperation:\n for qubit in qubits:\n if isinstance(qubit, np.ndarray):\n raise ValueError('measure() was called a numpy ndarray. Perhaps you meant to call measure_state_vector on numpy array?')\n elif (not isinstance(qubit, raw_types.Qid)):\n raise ValueError('measure() was called with type different than Qid.')\n if (key is None):\n key = _default_measurement_key(qubits)\n return MeasurementGate(len(qubits), key, invert_mask).on(*qubits)", "docstring": "Returns a single MeasurementGate applied to all the given qubits.\n\nThe qubits are measured in the computational basis.\n\nArgs:\n*qubits: The qubits that the measurement gate should measure.\nkey: The string key of the measurement. If this is None, it defaults\nto a comma-separated list of the target qubits' str values.\ninvert_mask: A list of Truthy or Falsey values indicating whether\nthe corresponding qubits should be flipped. None indicates no\ninverting should be done.\n\nReturns:\nAn operation targeting the given qubits with a measurement.\n\nRaises:\nValueError if the qubits are not instances of Qid.", "source": "codesearchnet"} {"code": "def exists(self, workflow_id):\n try:\n db = self._client[self.database]\n col = db[WORKFLOW_DATA_COLLECTION_NAME]\n return (col.find_one({'_id': ObjectId(workflow_id)}) is not None)\n except ConnectionFailure:\n raise DataStoreNotConnected()", "docstring": "Checks whether a document with the specified workflow id already exists.\n\nArgs:\nworkflow_id (str): The workflow id that should be checked.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.\n\nReturns:\nbool: ``True`` if a document with the specified workflow id exists.", "source": "codesearchnet"} {"code": "def compare_mim_panels(self, existing_panel, new_panel):\n existing_genes = set([gene['hgnc_id'] for gene in existing_panel['genes']])\n new_genes = set([gene['hgnc_id'] for gene in new_panel['genes']])\n return new_genes.difference(existing_genes)", "docstring": "Check if the latest version of OMIM differs from the most recent in database\nReturn all genes that where not in the previous version.\n\nArgs:\nexisting_panel(dict)\nnew_panel(dict)\n\nReturns:\nnew_genes(set(str))", "source": "codesearchnet"} {"code": "def call(self, input_ids: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, token_type_ids: Optional[tf.Tensor]=None, inputs_embeds: Optional[tf.Tensor]=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:\n assert not (input_ids is None and inputs_embeds is None)\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"} {"code": "def _add_unitary_single(self, gate, qubit):\n \n \n indexes = einsum_vecmul_index([qubit], self._number_of_qubits)\n \n gate_tensor = np.array(gate, dtype=complex)\n \n self._statevector = np.einsum(indexes, gate_tensor,\n self._statevector,\n dtype=complex,\n casting='no')", "docstring": "Apply an arbitrary 1-qubit unitary matrix.\n\nArgs:\ngate (matrix_like): a single qubit gate matrix\nqubit (int): the qubit to apply gate to", "source": "juraj-google-style"} {"code": "def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):\n plan_from_length = []\n plan_num_rand_blocks = []\n if 2 * num_rand_blocks + 5 < from_seq_length \n plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks)\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(0)\n elif num_rand_blocks + 5 < from_seq_length \n plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))\n plan_num_rand_blocks.append(num_rand_blocks \n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks - num_rand_blocks \n else:\n plan_from_length.append(from_seq_length)\n plan_num_rand_blocks.append(num_rand_blocks)\n return (plan_from_length, plan_num_rand_blocks)", "docstring": "Gives the plan of where to put random attention.\n\nArgs:\nfrom_seq_length: int. length of from sequence.\nfrom_block_size: int. size of block in from sequence.\nnum_rand_blocks: int. Number of random chunks per row.\n\nReturns:\nplan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for\neach block", "source": "github-repos"} {"code": "def plot_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1, show_row_points=True, row_points_size=10, show_row_labels=False, show_column_points=True, column_points_size=30, show_column_labels=False, legend_n_cols=1):\n utils.validation.check_is_fitted(self, 'total_inertia_')\n if (ax is None):\n (fig, ax) = plt.subplots(figsize=figsize)\n ax = plot.stylize_axis(ax)\n if (show_row_points or show_row_labels):\n row_coords = self.row_coordinates(X)\n if show_row_points:\n ax.scatter(row_coords.iloc[(:, x_component)], row_coords.iloc[(:, y_component)], s=row_points_size, label=None, color=plot.GRAY['dark'], alpha=0.6)\n if show_row_labels:\n for (_, row) in row_coords.iterrows():\n ax.annotate(row.name, (row[x_component], row[y_component]))\n if (show_column_points or show_column_labels):\n col_coords = self.column_coordinates(X)\n x = col_coords[x_component]\n y = col_coords[y_component]\n prefixes = col_coords.index.str.split('_').map((lambda x: x[0]))\n for prefix in prefixes.unique():\n mask = (prefixes == prefix)\n if show_column_points:\n ax.scatter(x[mask], y[mask], s=column_points_size, label=prefix)\n if show_column_labels:\n for (i, label) in enumerate(col_coords[mask].index):\n ax.annotate(label, (x[mask][i], y[mask][i]))\n ax.legend(ncol=legend_n_cols)\n ax.set_title('Row and column principal coordinates')\n ei = self.explained_inertia_\n ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, (100 * ei[x_component])))\n ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, (100 * ei[y_component])))\n return ax", "docstring": "Plot row and column principal coordinates.\n\nArgs:\nax (matplotlib.Axis): A fresh one will be created and returned if not provided.\nfigsize ((float, float)): The desired figure size if `ax` is not provided.\nx_component (int): Number of the component used for the x-axis.\ny_component (int): Number of the component used for the y-axis.\nshow_row_points (bool): Whether to show row principal components or not.\nrow_points_size (float): Row principal components point size.\nshow_row_labels (bool): Whether to show row labels or not.\nshow_column_points (bool): Whether to show column principal components or not.\ncolumn_points_size (float): Column principal components point size.\nshow_column_labels (bool): Whether to show column labels or not.\nlegend_n_cols (int): Number of columns used for the legend.\n\nReturns:\nmatplotlib.Axis", "source": "codesearchnet"} {"code": "def __init__(self, name, description, *labels):\n super(StringGauge, self).__init__('StringGauge', _string_gauge_methods, len(labels), name, description, *labels)", "docstring": "Creates a new StringGauge.\n\nArgs:\nname: name of the new metric.\ndescription: description of the new metric.\n*labels: The label list of the new metric.", "source": "github-repos"} {"code": "def join_tokens_to_sentences(tokens):\n text = ''\n for (entry, next_entry) in zip(tokens, tokens[1:]):\n text += entry\n if (next_entry not in SENTENCE_STOPS):\n text += ' '\n text += tokens[(- 1)]\n return text", "docstring": "Correctly joins tokens to multiple sentences\n\nInstead of always placing white-space between the tokens, it will distinguish\nbetween the next symbol and *not* insert whitespace if it is a sentence\nsymbol (e.g. '.', or '?')\n\nArgs:\ntokens: array of string tokens\nReturns:\nJoint sentences as one string", "source": "codesearchnet"} {"code": "def _run_task_hook(hooks, method, task, queue_name):\n \n if hooks is not None:\n try:\n getattr(hooks, method)(task, queue_name)\n except NotImplementedError:\n \n return False\n\n return True\n return False", "docstring": "Invokes hooks.method(task, queue_name).\n\nArgs:\nhooks: A hooks.Hooks instance or None.\nmethod: The name of the method to invoke on the hooks class e.g.\n\"enqueue_kickoff_task\".\ntask: The taskqueue.Task to pass to the hook method.\nqueue_name: The name of the queue to pass to the hook method.\n\nReturns:\nTrue if the hooks.Hooks instance handled the method, False otherwise.", "source": "juraj-google-style"} {"code": "def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers):\n \n\n signal = get_timing_signal_1d(num_layers, channels)\n layer_signal = tf.expand_dims(signal[:, layer, :], axis=1)\n\n return layer_signal", "docstring": "Add sinusoids of different frequencies as layer (vertical) timing signal.\n\nArgs:\nchannels: dimension of the timing signal\nlayer: layer num\nnum_layers: total number of layers\n\nReturns:\na Tensor of timing signals [1, 1, channels].", "source": "juraj-google-style"} {"code": "def index_subdirectory(directory, class_indices, follow_links, formats):\n dirname = os.path.basename(directory)\n valid_files = iter_valid_files(directory, follow_links, formats)\n labels = []\n filenames = []\n for root, fname in valid_files:\n labels.append(class_indices[dirname])\n absolute_path = tf.io.gfile.join(root, fname)\n relative_path = tf.io.gfile.join(dirname, os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n return (filenames, labels)", "docstring": "Recursively walks directory and list image paths and their class index.\n\nArgs:\ndirectory: string, target directory.\nclass_indices: dict mapping class names to their index.\nfollow_links: boolean, whether to recursively follow subdirectories\n(if False, we only list top-level images in `directory`).\nformats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n\nReturns:\ntuple `(filenames, labels)`. `filenames` is a list of relative file\npaths, and `labels` is a list of integer labels corresponding\nto these files.", "source": "github-repos"} {"code": "def timeseries_from_mat(filename, varname=None, fs=1.0):\n import scipy.io as sio\n if (varname is None):\n mat_dict = sio.loadmat(filename)\n if (len(mat_dict) > 1):\n raise ValueError('Must specify varname: file contains more than one variable. ')\n else:\n mat_dict = sio.loadmat(filename, variable_names=(varname,))\n array = mat_dict.popitem()[1]\n return Timeseries(array, fs=fs)", "docstring": "load a multi-channel Timeseries from a MATLAB .mat file\n\nArgs:\nfilename (str): .mat file to load\nvarname (str): variable name. only needed if there is more than one\nvariable saved in the .mat file\nfs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)\n\nReturns:\nTimeseries", "source": "codesearchnet"} {"code": "def __init__(self, token, vendor='test'):\n \n self.token = token\n self.vendor = vendor", "docstring": "Construct Retsly client\n\nArgs:\ntoken (string): access token\nvendor (string): vendor ID", "source": "juraj-google-style"} {"code": "def map_on_gpu(map_func):\n\n def _apply_fn(dataset):\n return _MapOnGpuDataset(dataset, map_func)\n return _apply_fn", "docstring": "Maps `map_func` across the elements of this dataset.\n\nNOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs\n`map_func` on GPU. It must be used after applying the\n`tf.data.experimental.copy_to_device` transformation with a GPU device\nargument.\n\nArgs:\nmap_func: A function mapping a nested structure of tensors (having shapes\nand types defined by `self.output_shapes` and `self.output_types`) to\nanother nested structure of tensors.\n\nReturns:\nA `Dataset` transformation function, which can be passed to\n`tf.data.Dataset.apply`.", "source": "github-repos"} {"code": "def expand_with_style(template, style, data, body_subtree='body'):\n \n if template.has_defines:\n return template.expand(data, style=style)\n else:\n tokens = []\n execute_with_style_LEGACY(template, style, data, tokens.append,\n body_subtree=body_subtree)\n return JoinTokens(tokens)", "docstring": "Expand a data dictionary with a template AND a style.\n\nDEPRECATED -- Remove this entire function in favor of expand(d, style=style)\n\nA style is a Template instance that factors out the common strings in several\n\"body\" templates.\n\nArgs:\ntemplate: Template instance for the inner \"page content\"\nstyle: Template instance for the outer \"page style\"\ndata: Data dictionary, with a 'body' key (or body_subtree", "source": "juraj-google-style"} {"code": "def wrap_py_func(f, args, kwargs=None):\n tensor_args = []\n tensor_args_idx = {}\n n_args = len(args)\n arg_is_tensor = tuple(map(tensor_util.is_tf_type, args))\n for i in range(n_args):\n if arg_is_tensor[i]:\n tensor_args_idx[i] = len(tensor_args)\n tensor_args.append(args[i])\n if kwargs:\n kwarg_keys = tuple(kwargs.keys())\n kwarg_is_tensor = {k: tensor_util.is_tf_type(kwargs[k]) for k in kwarg_keys}\n for k in kwarg_keys:\n if kwarg_is_tensor[k]:\n tensor_args_idx[k] = len(tensor_args)\n tensor_args.append(kwargs[k])\n else:\n kwarg_keys = ()\n\n def f_wrapper(*tensor_args):\n f_args = tuple((tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a for i, a in enumerate(args)))\n f_kwargs = {k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k] for i, k in enumerate(kwarg_keys)}\n f(*f_args, **f_kwargs)\n return 1\n return script_ops.eager_py_func(f_wrapper, tensor_args, dtypes.int32)", "docstring": "Helper that wraps a callable to py_func.\n\nThe helper passes tensor arguments through the py_func interface. Non-tensor\narguments are allowed, and will be passed to f directly. Note that non-tensor\narguments are captured by f will not update every time the wrapper is\ncalled (this is consistent with its argument list, which only includes\nthe tensor arguments). In general, it's safest not to reuse this wrapper.\n\nArgs:\nf: Callable\nargs: Positional arguments for f, as list or tuple.\nkwargs: Keyword arguments for f, as dict with string keys. May be None.\n\nReturns:\nThe return values of f converted to tensor.\nRaises:\nValueError: if any of the arguments are incorrect.", "source": "github-repos"} {"code": "def cut3d(self, cut3d_input, workdir):\n (self.stdin_fname, self.stdout_fname, self.stderr_fname) = map(os.path.join, (3 * [os.path.abspath(workdir)]), ['cut3d.stdin', 'cut3d.stdout', 'cut3d.stderr'])\n cut3d_input.write(self.stdin_fname)\n retcode = self._execute(workdir, with_mpirun=False)\n if (retcode != 0):\n raise RuntimeError(('Error while running cut3d in %s.' % workdir))\n output_filepath = cut3d_input.output_filepath\n if (output_filepath is not None):\n if (not os.path.isabs(output_filepath)):\n output_filepath = os.path.abspath(os.path.join(workdir, output_filepath))\n if (not os.path.isfile(output_filepath)):\n raise RuntimeError(('The file was not converted correctly in %s.' % workdir))\n return (self.stdout_fname, output_filepath)", "docstring": "Runs cut3d with a Cut3DInput\n\nArgs:\ncut3d_input: a Cut3DInput object.\nworkdir: directory where cut3d is executed.\n\nReturns:\n(string) absolute path to the standard output of the cut3d execution.\n(string) absolute path to the output filepath. None if output is required.", "source": "codesearchnet"} {"code": "def message(self, value):\n \n if value == self._defaults['message'] and 'message' in self._values:\n del self._values['message']\n else:\n self._values['message'] = value", "docstring": "The message property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def detect_events(self, max_attempts=3):\n for _ in xrange(max_attempts):\n try:\n with KindleCloudReaderAPI.get_instance(self.uname, self.pword) as kcr:\n self.books = kcr.get_library_metadata()\n self.progress = kcr.get_library_progress()\n except KindleAPIError:\n continue\n else:\n break\n else:\n return None\n progress_map = {book.asin: self.progress[book.asin].locs[1] for book in self.books}\n new_events = self._snapshot.calc_update_events(progress_map)\n update_event = UpdateEvent(datetime.now().replace(microsecond=0))\n new_events.append(update_event)\n self._event_buf.extend(new_events)\n return new_events", "docstring": "Returns a list of `Event`s detected from differences in state\nbetween the current snapshot and the Kindle Library.\n\n`books` and `progress` attributes will be set with the latest API\nresults upon successful completion of the function.\n\nReturns:\nIf failed to retrieve progress, None\nElse, the list of `Event`s", "source": "codesearchnet"} {"code": "def from_optimize_result(cls, result, n, m, index=None):\n \n coords = pd.DataFrame(result.x.reshape((m, n)), index=index)\n projection = cls(coords)\n projection.stress = result.fun\n return projection", "docstring": "Construct a Projection from the output of an optimization.\n\nArgs:\nresult (:py:class:`scipy.optimize.OptimizeResult`): Object\nreturned by :py:func:`scipy.optimize.minimize`.\nn (`int`): Number of dimensions.\nm (`int`): Number of samples.\nindex (`list-like`): Names of samples. (Optional).\n\nReturns:\n:py:class:`pymds.Projection`", "source": "juraj-google-style"} {"code": "def set(self, key, value):\n changed = super().set(key=key, value=value)\n if (not changed):\n return False\n self._log.info('Saving configuration to \"%s\"...', self._filename)\n with open(self._filename, 'w') as stream:\n stream.write(self.content)\n self._log.info('Saved configuration to \"%s\".', self._filename)\n return True", "docstring": "Updates the value of the given key in the file.\n\nArgs:\nkey (str): Key of the property to update.\nvalue (str): New value of the property.\n\nReturn:\nbool: Indicates whether or not a change was made.", "source": "codesearchnet"} {"code": "def bridge_to_vlan(br):\n \n cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n if result['retcode'] != 0:\n return False\n return int(result['stdout'])", "docstring": "Returns the VLAN ID of a bridge.\n\nArgs:\nbr: A string - bridge name\n\nReturns:\nVLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake\nbridge. If the bridge does not exist, False is returned.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' openvswitch.bridge_to_parent br0", "source": "juraj-google-style"} {"code": "def reset(self):\n self.lattice.reset()\n for atom in self.atoms.atoms:\n atom.reset()", "docstring": "Reset all counters for this simulation.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def dedupe_all_lists(obj, exclude_keys=()):\n squared_dedupe_len = 10\n if isinstance(obj, dict):\n new_obj = {}\n for (key, value) in obj.items():\n if (key in exclude_keys):\n new_obj[key] = value\n else:\n new_obj[key] = dedupe_all_lists(value)\n return new_obj\n elif isinstance(obj, (list, tuple, set)):\n new_elements = [dedupe_all_lists(v) for v in obj]\n if (len(new_elements) < squared_dedupe_len):\n new_obj = dedupe_list(new_elements)\n else:\n new_obj = dedupe_list_of_dicts(new_elements)\n return type(obj)(new_obj)\n else:\n return obj", "docstring": "Recursively remove duplucates from all lists.\n\nArgs:\nobj: collection to deduplicate\nexclude_keys (Container[str]): key names to ignore for deduplication", "source": "codesearchnet"} {"code": "def easeInOutQuint(n):\n _checkRange(n)\n n = (2 * n)\n if (n < 1):\n return (0.5 * (n ** 5))\n else:\n n = (n - 2)\n return (0.5 * ((n ** 5) + 2))", "docstring": "A quintic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"} {"code": "def _create_ssh_keys(self):\n (ret, _, _) = utils.run_command(['ssh-keygen', '-t', 'rsa', '-m', 'PEM', '-N', '', '-f', self.paths.ssh_id_rsa()])\n if (ret != 0):\n raise RuntimeError('Failed to crate ssh keys at %s', self.paths.ssh_id_rsa())", "docstring": "Generate a pair of ssh keys for this prefix\n\nReturns:\nNone\n\nRaises:\nRuntimeError: if it fails to create the keys", "source": "codesearchnet"} {"code": "def packet_meta_data(self):\n \n\n \n for item in self.input_stream:\n\n \n output = {}\n\n \n timestamp = item['timestamp']\n buf = item['raw_buf']\n\n \n output['timestamp'] = datetime.datetime.utcfromtimestamp(timestamp)\n\n \n eth = dpkt.ethernet.Ethernet(buf)\n output['eth'] = {'src': eth.src, 'dst': eth.dst, 'type':eth.type, 'len': len(eth)}\n\n \n packet = eth.data\n\n \n if hasattr(packet, 'data'):\n output['packet'] = {'type': packet.__class__.__name__, 'data': packet.data}\n else:\n output['packet'] = {'type': None, 'data': None}\n\n \n if output['packet']['type'] == 'IP':\n\n \n df = bool(packet.off & dpkt.ip.IP_DF)\n mf = bool(packet.off & dpkt.ip.IP_MF)\n offset = packet.off & dpkt.ip.IP_OFFMASK\n\n \n output['packet'].update({'src':packet.src, 'dst':packet.dst, 'p': packet.p, 'len':packet.len, 'ttl':packet.ttl,\n 'df':df, 'mf': mf, 'offset': offset, 'checksum': packet.sum})\n\n \n elif output['packet']['type'] == 'IP6':\n\n \n output['packet'].update({'src':packet.src, 'dst':packet.dst, 'p': packet.p, 'len':packet.plen, 'ttl':packet.hlim})\n\n \n else:\n output['packet'].update(data_utils.make_dict(packet))\n\n \n \n output['transport'] = None\n\n \n \n output['application'] = None\n\n \n yield output", "docstring": "Pull out the metadata about each packet from the input_stream\nArgs:\nNone\nReturns:\ngenerator (dictionary): a generator that contains packet meta data in the form of a dictionary", "source": "juraj-google-style"} {"code": "def make_repr(inst, attrs):\n \n \n arg_str = \", \".join(\n \"%s=%r\" % (a, getattr(inst, a)) for a in attrs if hasattr(inst, a))\n repr_str = \"%s(%s)\" % (inst.__class__.__name__, arg_str)\n return repr_str", "docstring": "Create a repr from an instance of a class\n\nArgs:\ninst: The class instance we are generating a repr of\nattrs: The attributes that should appear in the repr", "source": "juraj-google-style"} {"code": "def set_category(self, category):\n \n \n \n if isinstance(category, Category):\n name = category.name\n else:\n name = category\n self.find(\"category\").text = name", "docstring": "Set package category\n\nArgs:\ncategory: String of an existing category's name, or a\nCategory object.", "source": "juraj-google-style"} {"code": "def convert_relu(params, w_name, scope_name, inputs, layers, weights, names):\n print('Converting relu ...')\n if (names == 'short'):\n tf_name = ('RELU' + random_string(4))\n elif (names == 'keep'):\n tf_name = w_name\n else:\n tf_name = (w_name + str(random.random()))\n relu = keras.layers.Activation('relu', name=tf_name)\n layers[scope_name] = relu(layers[inputs[0]])", "docstring": "Convert relu layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"} {"code": "def _sideral(date, longitude=0.0, model='mean', eop_correction=True, terms=106):\n t = date.change_scale('UT1').julian_century\n theta = (((67310.54841 + (((876600 * 3600) + 8640184.812866) * t)) + (0.093104 * (t ** 2))) - (6.2e-06 * (t ** 3)))\n theta /= 240.0\n if (model == 'apparent'):\n theta += equinox(date, eop_correction, terms)\n theta += longitude\n theta %= 360.0\n return theta", "docstring": "Get the sideral time at a defined date\n\nArgs:\ndate (Date):\nlongitude (float): Longitude of the observer (in degrees)\nEast positive/West negative.\nmodel (str): 'mean' or 'apparent' for GMST and GAST respectively\nReturn:\nfloat: Sideral time in degrees\n\nGMST: Greenwich Mean Sideral Time\nLST: Local Sideral Time (Mean)\nGAST: Greenwich Apparent Sideral Time", "source": "codesearchnet"} {"code": "def read_field_h5(xdmf_file, fieldname, snapshot, header=None):\n if (header is None):\n (header, xdmf_root) = read_geom_h5(xdmf_file, snapshot)\n else:\n xdmf_root = xmlET.parse(str(xdmf_file)).getroot()\n npc = (header['nts'] \n flds = np.zeros(_flds_shape(fieldname, header))\n data_found = False\n for elt_subdomain in xdmf_root[0][0][snapshot].findall('Grid'):\n ibk = int(elt_subdomain.get('Name').startswith('meshYang'))\n for data_attr in elt_subdomain.findall('Attribute'):\n if (data_attr.get('Name') != fieldname):\n continue\n (icore, fld) = _get_field(xdmf_file, data_attr.find('DataItem'))\n fld = fld.T\n shp = fld.shape\n if ((shp[(- 1)] == 1) and (header['nts'][0] == 1)):\n fld = fld.reshape((shp[0], 1, shp[1], shp[2]))\n if (header['rcmb'] < 0):\n fld = fld[((2, 0, 1), ...)]\n elif (shp[(- 1)] == 1):\n fld = fld.reshape((shp[0], shp[1], 1, shp[2]))\n if (header['rcmb'] < 0):\n fld = fld[((0, 2, 1), ...)]\n elif (header['nts'][1] == 1):\n fld = fld.reshape((1, shp[0], 1, shp[1]))\n ifs = [(((icore \n if header['zp']:\n fld = fld[(:, :, :, :(- 1))]\n flds[(:, ifs[0]:((ifs[0] + npc[0]) + header['xp']), ifs[1]:((ifs[1] + npc[1]) + header['yp']), ifs[2]:(ifs[2] + npc[2]), ibk)] = fld\n data_found = True\n flds = _post_read_flds(flds, header)\n return ((header, flds) if data_found else None)", "docstring": "Extract field data from hdf5 files.\n\nArgs:\nxdmf_file (:class:`pathlib.Path`): path of the xdmf file.\nfieldname (str): name of field to extract.\nsnapshot (int): snapshot number.\nheader (dict): geometry information.\nReturns:\n(dict, numpy.array): geometry information and field data. None\nis returned if data is unavailable.", "source": "codesearchnet"} {"code": "def populate_settings_dir(force: bool = False) -> bool:\n \n res = False\n if _default_settings_path == _settings_path:\n return res\n\n for src in list(_default_settings_path.glob('**/*.json')):\n dest = _settings_path / src.relative_to(_default_settings_path)\n if not force and dest.exists():\n continue\n res = True\n dest.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(src, dest)\n return res", "docstring": "Populate settings directory with default settings files\n\nArgs:\nforce: if ``True``, replace existing settings files with default ones\n\nReturns:\n``True`` if any files were copied and ``False`` otherwise", "source": "juraj-google-style"} {"code": "def save(self, out_path):\n \n\n out = {\n 'selectors': [str(x) for x in self.selectors],\n 'trace': [{'stream': str(DataStream.FromEncoded(x.stream)), 'time': x.raw_time, 'value': x.value, 'reading_id': x.reading_id} for x in self]\n }\n\n with open(out_path, \"wb\") as outfile:\n json.dump(out, outfile, indent=4)", "docstring": "Save an ascii representation of this simulation trace.\n\nArgs:\nout_path (str): The output path to save this simulation trace.", "source": "juraj-google-style"} {"code": "def get(self, key):\n \n\n \n value = self.child_datastore.get(key)\n return self.deserializedValue(value)", "docstring": "Return the object named by key or None if it does not exist.\nRetrieves the value from the ``child_datastore``, and de-serializes\nit on the way out.\n\nArgs:\nkey: Key naming the object to retrieve\n\nReturns:\nobject or None", "source": "juraj-google-style"} {"code": "def order_for(self, qubits: Iterable[raw_types.Qid]) -> Tuple[(raw_types.Qid, ...)]:\n return self._explicit_func(qubits)", "docstring": "Returns a qubit tuple ordered corresponding to the basis.\n\nArgs:\nqubits: Qubits that should be included in the basis. (Additional\nqubits may be added into the output by the basis.)\n\nReturns:\nA tuple of qubits in the same order that their single-qubit\nmatrices would be passed into `np.kron` when producing a matrix for\nthe entire system.", "source": "codesearchnet"} {"code": "def threat(self, name, owner=None, **kwargs):\n return Threat(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Threat TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "codesearchnet"} {"code": "def angles( self ):\n \n ( a, b, c ) = [ row for row in self.matrix ]\n return [ angle( b, c ), angle( a, c ), angle( a, b ) ]", "docstring": "The cell angles (in degrees).\n\nArgs:\nNone\n\nReturns:\n(list(alpha,beta,gamma)): The cell angles.", "source": "juraj-google-style"} {"code": "def add_user(\n self, user, first_name=None, last_name=None, email=None, password=None):\n \n self.service.add_user(\n user, first_name, last_name, email, password,\n self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "Add a new user.\n\nArgs:\nuser (string): User name.\nfirst_name (optional[string]): User's first name. Defaults to None.\nlast_name (optional[string]): User's last name. Defaults to None.\nemail: (optional[string]): User's email address. Defaults to None.\npassword: (optional[string]): User's password. Defaults to None.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"} {"code": "def add(self, other, axis=\"columns\", level=None, fill_value=None):\n \n return self._binary_op(\n \"add\", other, axis=axis, level=level, fill_value=fill_value\n )", "docstring": "Add this DataFrame to another or a scalar/list.\n\nArgs:\nother: What to add this this DataFrame.\naxis: The axis to apply addition over. Only applicaable to Series\nor list 'other'.\nlevel: A level in the multilevel axis to add over.\nfill_value: The value to fill NaN.\n\nReturns:\nA new DataFrame with the applied addition.", "source": "juraj-google-style"} {"code": "def on_value_event(self, event):\n \n if not event.summary.value:\n logger.warn(\"The summary of the event lacks a value.\")\n return\n\n \n \n watch_key = event.summary.value[0].node_name\n if not watch_key.endswith(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX):\n \n \n return\n\n \n \n \n node_name_and_output_slot = watch_key[\n :-len(constants.DEBUG_NUMERIC_SUMMARY_SUFFIX)]\n\n shape = tensor_util.make_ndarray(event.summary.value[0].tensor).shape\n if (len(shape) != 1 or\n shape[0] < constants.MIN_DEBUG_NUMERIC_SUMMARY_TENSOR_LENGTH):\n logger.warn(\"Health-pill tensor either lacks a dimension or is \"\n \"shaped incorrectly: %s\" % shape)\n return\n\n match = re.match(r\"^(.*):(\\d+)$\", node_name_and_output_slot)\n if not match:\n logger.warn(\n (\"A event with a health pill has an invalid node name and output \"\n \"slot combination, (i.e., an unexpected debug op): %r\"),\n node_name_and_output_slot)\n return\n\n if self._session_run_index >= 0:\n event.step = self._session_run_index\n else:\n \n \n \n event.step = int(time.time() * 1e6)\n\n \n \n self._events_writer_manager.write_event(event)\n\n alert = numerics_alert.extract_numerics_alert(event)\n if self._numerics_alert_callback and alert:\n self._numerics_alert_callback(alert)", "docstring": "Records the summary values based on an updated message from the debugger.\n\nLogs an error message if writing the event to disk fails.\n\nArgs:\nevent: The Event proto to be processed.", "source": "juraj-google-style"} {"code": "def set_instrument(self, instrument=None):\n \n if instrument is None:\n instrument = self.tester\n\n if instrument in [\"arbin\", \"arbin_res\"]:\n self._set_arbin()\n self.tester = \"arbin\"\n\n elif instrument == \"arbin_sql\":\n self._set_arbin_sql()\n self.tester = \"arbin\"\n\n elif instrument == \"arbin_experimental\":\n self._set_arbin_experimental()\n self.tester = \"arbin\"\n\n elif instrument in [\"pec\", \"pec_csv\"]:\n self._set_pec()\n self.tester = \"pec\"\n\n elif instrument in [\"biologics\", \"biologics_mpr\"]:\n self._set_biologic()\n self.tester = \"biologic\"\n\n elif instrument == \"custom\":\n self._set_custom()\n self.tester = \"custom\"\n\n else:\n raise Exception(f\"option does not exist: '{instrument}'\")", "docstring": "Set the instrument (i.e. tell cellpy the file-type you use).\n\nArgs:\ninstrument: (str) in [\"arbin\", \"bio-logic-csv\", \"bio-logic-bin\",...]\n\nSets the instrument used for obtaining the data (i.e. sets fileformat)", "source": "juraj-google-style"} {"code": "def _prefer_static_concat_shape(first_shape, second_shape_int_list):\n second_shape_int_list_static = [tensor_util.constant_value(s) for s in second_shape_int_list]\n if isinstance(first_shape, tensor_shape.TensorShape) and all((s is not None for s in second_shape_int_list_static)):\n return first_shape.concatenate(second_shape_int_list_static)\n return array_ops.concat([first_shape, second_shape_int_list], axis=0)", "docstring": "Concatenate a shape with a list of integers as statically as possible.\n\nArgs:\nfirst_shape: `TensorShape` or `Tensor` instance. If a `TensorShape`,\n`first_shape.is_fully_defined()` must return `True`.\nsecond_shape_int_list: `list` of scalar integer `Tensor`s.\n\nReturns:\n`Tensor` representing concatenating `first_shape` and\n`second_shape_int_list` as statically as possible.", "source": "github-repos"} {"code": "def partial_derivative(self, X, y=0):\n \n self.check_fit()\n\n U, V = self.split_matrix(X)\n\n if self.theta == 0:\n return V\n\n else:\n num = np.multiply(self._g(U), self._g(V)) + self._g(U)\n den = np.multiply(self._g(U), self._g(V)) + self._g(1)\n return (num / den) - y", "docstring": "Compute partial derivative :math:`C(u|v)` of cumulative distribution.\n\nArgs:\nX: `np.ndarray`\ny: `float`\n\nReturns:\nnp.ndarray", "source": "juraj-google-style"} {"code": "def processes(self, processes):\n if (self._processes > 1):\n self._pool.close()\n self._pool.join()\n self._pool = multiprocessing.Pool(processes)\n else:\n self._pool = None\n self._logger.log('debug', 'Number of processes set to {}'.format(processes))", "docstring": "Set the number of concurrent processes the ABC will utilize for\nfitness function evaluation; if <= 1, single process is used\n\nArgs:\nprocesses (int): number of concurrent processes", "source": "codesearchnet"} {"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):\n logits = outputs.logits\n if target_sizes is not None:\n if len(logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n semantic_segmentation = []\n for idx in range(len(logits)):\n resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = logits.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation", "docstring": "Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`MobileNetV2ForSemanticSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple]` of length `batch_size`, *optional*):\nList of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\npredictions will not be resized.\n\nReturns:\nsemantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\nsegmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\nspecified). Each entry of each `torch.Tensor` correspond to a semantic class id.", "source": "github-repos"} {"code": "def __enter__(self) -> str:\n if self._name is None and self._values is not None:\n raise ValueError('At least one of name (%s) and default_name (%s) must be provided.' % (self._name, self._default_name))\n g = get_default_graph()\n if self._values and (not g.building_function):\n g_from_inputs = _get_graph_from_inputs(self._values)\n if g_from_inputs is not g:\n g = g_from_inputs\n self._g_manager = g.as_default()\n self._g_manager.__enter__()\n else:\n self._g_manager = None\n else:\n self._g_manager = None\n try:\n self._name_scope = g.name_scope(self._name)\n return self._name_scope.__enter__()\n except:\n if self._g_manager is not None:\n self._g_manager.__exit__(*sys.exc_info())\n raise", "docstring": "Start the scope block.\n\nReturns:\nThe scope name.\n\nRaises:\nValueError: if neither `name` nor `default_name` is provided\nbut `values` are.", "source": "github-repos"} {"code": "def stop_workflow(config, *, names=None):\n jobs = list_jobs(config, filter_by_type=JobType.Workflow)\n if (names is not None):\n filtered_jobs = []\n for job in jobs:\n if ((job.id in names) or (job.name in names) or (job.workflow_id in names)):\n filtered_jobs.append(job)\n else:\n filtered_jobs = jobs\n success = []\n failed = []\n for job in filtered_jobs:\n client = Client(SignalConnection(**config.signal, auto_connect=True), request_key=job.workflow_id)\n if client.send(Request(action='stop_workflow')).success:\n success.append(job)\n else:\n failed.append(job)\n return (success, failed)", "docstring": "Stop one or more workflows.\n\nArgs:\nconfig (Config): Reference to the configuration object from which the\nsettings for the workflow are retrieved.\nnames (list): List of workflow names, workflow ids or workflow job ids for the\nworkflows that should be stopped. If all workflows should be\nstopped, set it to None.\n\nReturns:\ntuple: A tuple of the workflow jobs that were successfully stopped and the ones\nthat could not be stopped.", "source": "codesearchnet"} {"code": "async def verify_chain_of_trust(chain):\n \n log_path = os.path.join(chain.context.config[\"task_log_dir\"], \"chain_of_trust.log\")\n scriptworker_log = logging.getLogger('scriptworker')\n with contextual_log_handler(\n chain.context, path=log_path, log_obj=scriptworker_log,\n formatter=AuditLogFormatter(\n fmt=chain.context.config['log_fmt'],\n datefmt=chain.context.config['log_datefmt'],\n )\n ):\n try:\n \n await build_task_dependencies(chain, chain.task, chain.name, chain.task_id)\n \n await download_cot(chain)\n \n verify_cot_signatures(chain)\n \n await download_cot_artifacts(chain)\n \n task_count = await verify_task_types(chain)\n check_num_tasks(chain, task_count)\n \n await verify_worker_impls(chain)\n await trace_back_to_tree(chain)\n except (BaseDownloadError, KeyError, AttributeError) as exc:\n log.critical(\"Chain of Trust verification error!\", exc_info=True)\n if isinstance(exc, CoTError):\n raise\n else:\n raise CoTError(str(exc))\n log.info(\"Good.\")", "docstring": "Build and verify the chain of trust.\n\nArgs:\nchain (ChainOfTrust): the chain we're operating on\n\nRaises:\nCoTError: on failure", "source": "juraj-google-style"} {"code": "def async_noop(name=None):\n with ops.name_scope(name, 'async_noop') as name:\n cond_init_value = constant_op.constant(False, name='cond_init_value')\n func_graph_signature = [tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)]\n cond_graph = func_graph_module.func_graph_from_py_func('cond_graph', lambda x: x, [cond_init_value], {}, signature=func_graph_signature, func_graph=util.WhileCondFuncGraph('cond_graph', collections=ops.get_default_graph()._collections), add_control_dependencies=False)\n body_graph = func_graph_module.func_graph_from_py_func('body_graph', lambda x: x, [cond_init_value], {}, signature=func_graph_signature, func_graph=util.WhileBodyFuncGraph('body_graph', collections=ops.get_default_graph()._collections), add_control_dependencies=False)\n while_op, _ = util.get_op_and_outputs(gen_functional_ops._while([cond_init_value], util.create_new_tf_function(cond_graph), util.create_new_tf_function(body_graph), output_shapes=[[]], name=name))\n util.maybe_set_lowering_attr(while_op, lower_using_switch_merge=False)\n return while_op", "docstring": "Returns a no-op that is implemented as an async kernel.\n\nThis operation may be useful to implement \"aggressive inter-op parallelism\"\nbecause it will cause any immediate downstream operations to be scheduled\non different threads.\n\nArgs:\nname: The name of the operation.", "source": "github-repos"} {"code": "def splitdrive(self, path):\n \n path = make_string_path(path)\n if self.is_windows_fs:\n if len(path) >= 2:\n path = self.normcase(path)\n sep = self._path_separator(path)\n \n \n if sys.version_info >= (2, 7, 8):\n if (path[0:2] == sep * 2) and (\n path[2:3] != sep):\n \n \n sep_index = path.find(sep, 2)\n if sep_index == -1:\n return path[:0], path\n sep_index2 = path.find(sep, sep_index + 1)\n if sep_index2 == sep_index + 1:\n return path[:0], path\n if sep_index2 == -1:\n sep_index2 = len(path)\n return path[:sep_index2], path[sep_index2:]\n if path[1:2] == self._matching_string(path, ':'):\n return path[:2], path[2:]\n return path[:0], path", "docstring": "Splits the path into the drive part and the rest of the path.\n\nTaken from Windows specific implementation in Python 3.5\nand slightly adapted.\n\nArgs:\npath: the full path to be splitpath.\n\nReturns:\nA tuple of the drive part and the rest of the path, or of\nan empty string and the full path if drive letters are\nnot supported or no drive is present.", "source": "juraj-google-style"} {"code": "def __init__(self, symbol, precedence, associative=False):\n \n self.symbol = symbol\n self.precedence = precedence\n self.associative = associative", "docstring": "Constructor.\n\nArgs:\nsymbol: The character which represents this operation, such as '+' for\naddition.\nprecedence: Operator precedence. This will determine where parentheses\nare used.\nassociative: If true, the order of the operands does not matter.", "source": "juraj-google-style"} {"code": "def copy(self, source_file_names, destination_file_names):\n if not len(source_file_names) == len(destination_file_names):\n message = 'Unable to copy unequal number of sources and destinations'\n raise BeamIOError(message)\n src_dest_pairs = list(zip(source_file_names, destination_file_names))\n return s3io.S3IO(options=self._options).copy_paths(src_dest_pairs)", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\nsource_file_names: list of source file objects that needs to be copied\ndestination_file_names: list of destination of the new object\n\nRaises:\n``BeamIOError``: if any of the copy operations fail", "source": "github-repos"} {"code": "def highlight(text: str, color_code: int, bold: bool=False) -> str:\n \n return '{}\\033[{}m{}\\033[0m'.format(\n '\\033[1m' if bold else '',\n color_code,\n text,)", "docstring": "Wraps the given string with terminal color codes.\n\nArgs:\ntext: The content to highlight.\ncolor_code: The color to highlight with, e.g. 'shelltools.RED'.\nbold: Whether to bold the content in addition to coloring.\n\nReturns:\nThe highlighted string.", "source": "juraj-google-style"} {"code": "def can_convert_arrays(arrays):\n return all(tree.flatten(tree.map_structure(array_slicing.can_slice_array, arrays)))", "docstring": "Check if array like-inputs can be handled by `ArrayDataAdapter`\n\nArgs:\ninputs: Structure of `Tensor`s, NumPy arrays, or tensor-like.\n\nReturns:\n`True` if `arrays` can be handled by `ArrayDataAdapter`, `False`\notherwise.", "source": "github-repos"} {"code": "def readDivPressure(fileName):\n try:\n df = pandas.read_csv(fileName, sep=None, engine='python')\n pandasformat = True\n except ValueError:\n pandasformat = False\n df.columns = ['site', 'divPressureValue']\n scaleFactor = max(df['divPressureValue'].abs())\n if (scaleFactor > 0):\n df['divPressureValue'] = [(x / scaleFactor) for x in df['divPressureValue']]\n assert (len(df['site'].tolist()) == len(set(df['site'].tolist()))), 'There is at least one non-unique site in {0}'.format(fileName)\n assert (max(df['divPressureValue'].abs()) <= 1), 'The scaling produced a diversifying pressure value with an absolute value greater than one.'\n sites = df['site'].tolist()\n divPressure = {}\n for r in sites:\n divPressure[r] = df[(df['site'] == r)]['divPressureValue'].tolist()[0]\n return divPressure", "docstring": "Reads in diversifying pressures from some file.\n\nScale diversifying pressure values so absolute value of the max value is 1,\nunless all values are zero.\n\nArgs:\n`fileName` (string or readable file-like object)\nFile holding diversifying pressure values. Can be\ncomma-, space-, or tab-separated file. The first column\nis the site (consecutively numbered, sites starting\nwith one) and the second column is the diversifying pressure values.\n\nReturns:\n`divPressure` (dict keyed by ints)\n`divPressure[r][v]` is the diversifying pressure value of site `r`.", "source": "codesearchnet"} {"code": "def create_per_test_excerpt(self, current_test_info):\n \n self.pause()\n dest_path = current_test_info.output_path\n utils.create_dir(dest_path)\n self._ad.log.debug('AdbLog excerpt location: %s', dest_path)\n shutil.move(self.adb_logcat_file_path, dest_path)\n self.resume()", "docstring": "Convenient method for creating excerpts of adb logcat.\n\nTo use this feature, call this method at the end of: `setup_class`,\n`teardown_test`, and `teardown_class`.\n\nThis moves the current content of `self.adb_logcat_file_path` to the\nlog directory specific to the current test.\n\nArgs:\ncurrent_test_info: `self.current_test_info` in a Mobly test.", "source": "juraj-google-style"} {"code": "def _reverse_seq(input_seq, lengths):\n if lengths is None:\n return list(reversed(input_seq))\n flat_input_seq = tuple((nest.flatten(input_) for input_ in input_seq))\n flat_results = [[] for _ in range(len(input_seq))]\n for sequence in zip(*flat_input_seq):\n input_shape = tensor_shape.unknown_shape(rank=sequence[0].get_shape().rank)\n for input_ in sequence:\n input_shape.assert_is_compatible_with(input_.get_shape())\n input_.set_shape(input_shape)\n s_joined = array_ops_stack.stack(sequence)\n s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)\n result = array_ops_stack.unstack(s_reversed)\n for r, flat_result in zip(result, flat_results):\n r.set_shape(input_shape)\n flat_result.append(r)\n results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for input_, flat_result in zip(input_seq, flat_results)]\n return results", "docstring": "Reverse a list of Tensors up to specified lengths.\n\nArgs:\ninput_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)\nor nested tuples of tensors.\nlengths: A `Tensor` of dimension batch_size, containing lengths for each\nsequence in the batch. If \"None\" is specified, simply reverses the list.\n\nReturns:\ntime-reversed sequence", "source": "github-repos"} {"code": "def prepare_for_tokenization(self, artists: str, genres: str, lyrics: str, is_split_into_words: bool=False) -> Tuple[str, str, str, Dict[str, Any]]:\n for idx in range(len(self.version)):\n if self.version[idx] == 'v3':\n artists[idx] = artists[idx].lower()\n genres[idx] = [genres[idx].lower()]\n else:\n artists[idx] = self._normalize(artists[idx]) + '.v2'\n genres[idx] = [self._normalize(genre) + '.v2' for genre in genres[idx].split('_')]\n if self.version[0] == 'v2':\n self.out_of_vocab = regex.compile('[^A-Za-z0-9.,:;!?\\\\-\\'\\\\\"()\\\\[\\\\] \\\\t\\\\n]+')\n vocab = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\\'\"()[] \\t\\n'\n self.vocab = {vocab[index]: index + 1 for index in range(len(vocab))}\n self.vocab[''] = 0\n self.n_vocab = len(vocab) + 1\n self.lyrics_encoder = self.vocab\n self.lyrics_decoder = {v: k for k, v in self.vocab.items()}\n self.lyrics_decoder[0] = ''\n else:\n self.out_of_vocab = regex.compile('[^A-Za-z0-9.,:;!?\\\\-+\\'\\\\\"()\\\\[\\\\] \\\\t\\\\n]+')\n lyrics = self._run_strip_accents(lyrics)\n lyrics = lyrics.replace('\\\\', '\\n')\n lyrics = (self.out_of_vocab.sub('', lyrics), [], [])\n return (artists, genres, lyrics)", "docstring": "Performs any necessary transformations before tokenization.\n\nArgs:\nartist (`str`):\nThe artist name to prepare. This will mostly lower the string\ngenres (`str`):\nThe genre name to prepare. This will mostly lower the string.\nlyrics (`str`):\nThe lyrics to prepare.\nis_split_into_words (`bool`, *optional*, defaults to `False`):\nWhether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the\ntokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)\nwhich it will tokenize. This is useful for NER or token classification.", "source": "github-repos"} {"code": "def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)):\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NHWC':\n strides = (1,) + strides + (1,)\n else:\n strides = (1, 1) + strides\n x = nn.depthwise_conv2d(x, depthwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2))\n return x", "docstring": "2D convolution with separable filters.\n\nArgs:\nx: input tensor\ndepthwise_kernel: convolution kernel for the depthwise convolution.\nstrides: strides tuple (length 2).\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\ndilation_rate: tuple of integers,\ndilation rates for the separable convolution.\n\nReturns:\nOutput tensor.\n\nRaises:\nValueError: if `data_format` is neither `channels_last` or\n`channels_first`.", "source": "github-repos"} {"code": "def pack(self, tensors):\n self._assert_eager()\n if len(tensors) != len(self.components):\n raise ValueError('Creating a parallel tensor requires one tensor per component. Got {} but was expecting {}.'.format(len(tensors), len(self.components)))\n with ops.device(None):\n tensors = variable_utils.convert_variables_to_tensors(tensors)\n return nest.map_structure(self._pack_tensor, *tensors, expand_composites=True)", "docstring": "Create a tensor on the parallel device from a sequence of tensors.\n\nArgs:\ntensors: A list of tensors, one per device in `self.components`. The list\ncan contain composite tensors and nests (lists, dicts, etc. supported by\n`tf.nest`) with the same structure for each device, but every component\nof nests must already be a `tf.Tensor` or composite. Passing\n`tf.Variable` objects reads their value, it does not share a mutable\nreference between the packed and unpacked forms.\n\nReturns:\nA tensor placed on the ParallelDevice. For nested structures, returns a\nsingle structure containing tensors placed on the ParallelDevice (same\nstructure as each component of `tensors`).\n\nRaises:\nValueError: If the length of `tensors` does not match the number of\ncomponent devices, or if there are non-tensor inputs.", "source": "github-repos"} {"code": "def decode_event(self, log_topics, log_data):\n if ((not len(log_topics)) or (log_topics[0] not in self.event_data)):\n raise ValueError('Unknown log type')\n event_id_ = log_topics[0]\n event = self.event_data[event_id_]\n unindexed_types = [type_ for (type_, indexed) in zip(event['types'], event['indexed']) if (not indexed)]\n unindexed_args = decode_abi(unindexed_types, log_data)\n indexed_count = 1\n result = {}\n for (name, type_, indexed) in zip(event['names'], event['types'], event['indexed']):\n if indexed:\n topic_bytes = utils.zpad(utils.encode_int(log_topics[indexed_count]), 32)\n indexed_count += 1\n value = decode_single(process_type(type_), topic_bytes)\n else:\n value = unindexed_args.pop(0)\n result[name] = value\n result['_event_type'] = utils.to_string(event['name'])\n return result", "docstring": "Return a dictionary representation the log.\n\nNote:\nThis function won't work with anonymous events.\n\nArgs:\nlog_topics (List[bin]): The log's indexed arguments.\nlog_data (bin): The encoded non-indexed arguments.", "source": "codesearchnet"} {"code": "def write(self, string):\n (x, y) = self._normalizeCursor(*self._cursor)\n (width, height) = self.get_size()\n wrapper = _textwrap.TextWrapper(initial_indent=(' ' * x), width=width)\n writeLines = []\n for line in string.split('\\n'):\n if line:\n writeLines += wrapper.wrap(line)\n wrapper.initial_indent = ''\n else:\n writeLines.append([])\n for line in writeLines:\n (x, y) = self._normalizeCursor(x, y)\n self.draw_str(x, y, line[x:], self._fg, self._bg)\n y += 1\n x = 0\n y -= 1\n self._cursor = (x, y)", "docstring": "This method mimics basic file-like behaviour.\n\nBecause of this method you can replace sys.stdout or sys.stderr with\na :any:`Console` or :any:`Window` instance.\n\nThis is a convoluted process and behaviour seen now can be excepted to\nchange on later versions.\n\nArgs:\nstring (Text): The text to write out.\n\n.. seealso:: :any:`set_colors`, :any:`set_mode`, :any:`Window`", "source": "codesearchnet"} {"code": "def projects(self, term, field=None, **kwargs):\n \n params = kwargs\n params['q'] = term\n if field:\n params['f'] = self._FIELD_MAP[field]\n else:\n params['f'] = 'pro.t'\n baseuri = self._BASE_URI + 'projects'\n res = self.session.get(baseuri, params=params)\n self.handle_http_error(res)\n return res", "docstring": "Search for projects. Defaults to project_title. Other fields\nare:\nproject_reference\nproject_abstract\n\nArgs:\nterm (str): Term to search for.\nkwargs (dict): additional keywords passed into\nrequests.session.get params keyword.", "source": "juraj-google-style"} {"code": "def _on_status_message(self, sequence, topic, message):\n self._logger.debug(('Received message on (topic=%s): %s' % (topic, message)))\n try:\n conn_key = self._find_connection(topic)\n except ArgumentError:\n self._logger.warn('Dropping message that does not correspond with a known connection, message=%s', message)\n return\n if messages.ConnectionResponse.matches(message):\n if (self.name != message['client']):\n self._logger.debug('Connection response received for a different client, client=%s, name=%s', message['client'], self.name)\n return\n self.conns.finish_connection(conn_key, message['success'], message.get('failure_reason', None))\n else:\n self._logger.warn('Dropping message that did not correspond with a known schema, message=%s', message)", "docstring": "Process a status message received\n\nArgs:\nsequence (int): The sequence number of the packet received\ntopic (string): The topic this message was received on\nmessage (dict): The message itself", "source": "codesearchnet"} {"code": "def to_css(self):\n if (self.a == 1.0):\n return ('rgb(%d, %d, %d)' % (self.r, self.g, self.b))\n else:\n return ('rgba(%d, %d, %d, %s)' % (self.r, self.g, self.b, self.a))", "docstring": "Generate the CSS representation of this RGB color.\n\nReturns:\nstr, ``\"rgb(...)\"`` or ``\"rgba(...)\"``", "source": "codesearchnet"} {"code": "def _estimate_step_duration(self, current, now):\n if current:\n if self._time_after_first_step is not None and current > 1:\n time_per_unit = (now - self._time_after_first_step) / (current - 1)\n else:\n time_per_unit = (now - self._start) / current\n if current == 1:\n self._time_after_first_step = now\n return time_per_unit\n else:\n return 0", "docstring": "Estimate the duration of a single step.\n\nGiven the step number `current` and the corresponding time `now`\nthis function returns an estimate for how long a single step\ntakes. If this is called before one step has been completed\n(i.e. `current == 0`) then zero is given as an estimate. The duration\nestimate ignores the duration of the (assumed to be non-representative)\nfirst step for estimates when more steps are available (i.e. `current>1`).\nArgs:\ncurrent: Index of current step.\nnow: The current time.\nReturns: Estimate of the duration of a single step.", "source": "github-repos"} {"code": "def process_rewards(self, rewards):\n \n\n min_reward, max_reward = self.reward_range\n\n \n rewards = np.clip(rewards, min_reward, max_reward)\n \n rewards = np.around(rewards, decimals=0).astype(np.int64)\n return rewards", "docstring": "Clips, rounds, and changes to integer type.\n\nArgs:\nrewards: numpy array of raw (float) rewards.\n\nReturns:\nprocessed_rewards: numpy array of np.int64", "source": "juraj-google-style"} {"code": "def _filter_top_k(x, k):\n _, top_k_idx = ops.top_k(x, k)\n top_k_mask = ops.sum(ops.one_hot(top_k_idx, ops.shape(x)[-1], axis=-1), axis=-2)\n return x * top_k_mask + NEG_INF * (1 - top_k_mask)", "docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF.\n\nUsed for computing top-k prediction values in dense labels (which has the\nsame shape as predictions) for recall and precision top-k metrics.\n\nArgs:\nx: tensor with any dimensions.\nk: the number of values to keep.\n\nReturns:\ntensor with same shape and dtype as x.", "source": "github-repos"} {"code": "def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False):\n global _NAN_BLOCKS\n if transpose:\n (n_row, n_col) = (n_col, n_row)\n shape = (n_row, n_col)\n if (shape not in _NAN_BLOCKS):\n arr = np.tile(np.array(np.NaN), shape)\n _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr))\n return _NAN_BLOCKS[shape]", "docstring": "A memory efficient way to get a block of NaNs.\n\nArgs:\npartition_class (BaseFramePartition): The class to use to put the object\nin the remote format.\nn_row(int): The number of rows.\nn_col(int): The number of columns.\ntranspose(bool): If true, swap rows and columns.\nReturns:\nObjectID of the NaN block.", "source": "codesearchnet"} {"code": "def is_user_in_group(self, user, group):\n \n search_url = \"%s/%s/%s/%s/%s\" % (self.url, \"group\", group,\n \"user\", user)\n response = self.jss.get(search_url)\n \n length = len(response)\n result = False\n if length == 1:\n \n pass\n elif length == 2:\n if response.findtext(\"ldap_user/username\") == user:\n if response.findtext(\"ldap_user/is_member\") == \"Yes\":\n result = True\n elif len(response) >= 2:\n raise JSSGetError(\"Unexpected response.\")\n return result", "docstring": "Test for whether a user is in a group.\n\nThere is also the ability in the API to test for whether\nmultiple users are members of an LDAP group, but you should just\ncall is_user_in_group over an enumerated list of users.\n\nArgs:\nuser: String username.\ngroup: String group name.\n\nReturns bool.", "source": "juraj-google-style"} {"code": "def _GetAccountsData(self, metadata_dict):\n (instance_data, project_data) = self._GetInstanceAndProjectAttributes(metadata_dict)\n valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]\n block_project = instance_data.get('block-project-ssh-keys', '').lower()\n if ((block_project != 'true') and (not instance_data.get('sshKeys'))):\n valid_keys.append(project_data.get('ssh-keys'))\n valid_keys.append(project_data.get('sshKeys'))\n accounts_data = '\\n'.join([key for key in valid_keys if key])\n return self._ParseAccountsData(accounts_data)", "docstring": "Get the user accounts specified in metadata server contents.\n\nArgs:\nmetadata_dict: json, the deserialized contents of the metadata server.\n\nReturns:\ndict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.", "source": "codesearchnet"} {"code": "def quad_genz_keister_24(order):\n order = sorted(GENZ_KEISTER_24.keys())[order]\n (abscissas, weights) = GENZ_KEISTER_24[order]\n abscissas = numpy.array(abscissas)\n weights = numpy.array(weights)\n weights /= numpy.sum(weights)\n abscissas *= numpy.sqrt(2)\n return (abscissas, weights)", "docstring": "Hermite Genz-Keister 24 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_24(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321 0. 1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "codesearchnet"} {"code": "def preemphasis(signal, shift=1, cof=0.98):\n rolled_signal = np.roll(signal, shift)\n return (signal - (cof * rolled_signal))", "docstring": "preemphasising on the signal.\n\nArgs:\nsignal (array): The input signal.\nshift (int): The shift step.\ncof (float): The preemphasising coefficient. 0 equals to no filtering.\n\nReturns:\narray: The pre-emphasized signal.", "source": "codesearchnet"} {"code": "def _ParseFSMVariables(self, template):\n \n\n self.values = []\n\n for line in template:\n self._line_num += 1\n line = line.rstrip()\n\n \n if not line:\n return\n\n \n if self.comment_regex.match(line):\n continue\n\n if line.startswith('Value '):\n try:\n value = TextFSMValue(\n fsm=self, max_name_len=self.MAX_NAME_LEN,\n options_class=self._options_cls)\n value.Parse(line)\n except TextFSMTemplateError as error:\n raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))\n\n if value.name in self.header:\n raise TextFSMTemplateError(\n \"Duplicate declarations for Value '%s'. Line: %s.\"\n % (value.name, self._line_num))\n\n try:\n self._ValidateOptions(value)\n except TextFSMTemplateError as error:\n raise TextFSMTemplateError('%s Line %s.' % (error, self._line_num))\n\n self.values.append(value)\n self.value_map[value.name] = value.template\n \n elif not self.values:\n raise TextFSMTemplateError('No Value definitions found.')\n else:\n raise TextFSMTemplateError(\n 'Expected blank line after last Value entry. Line: %s.'\n % (self._line_num))", "docstring": "Extracts Variables from start of template file.\n\nValues are expected as a contiguous block at the head of the file.\nThese will be line separated from the State definitions that follow.\n\nArgs:\ntemplate: Valid template file, with Value definitions at the top.\n\nRaises:\nTextFSMTemplateError: If syntax or semantic errors are found.", "source": "juraj-google-style"} {"code": "def __init__(self, df, grouping_column_names):\n \n \n self.df = df\n self.grouping_columns = []\n self.grouping_column_types = []\n\n if isinstance(grouping_column_names, str):\n grouping_column_names = [grouping_column_names]\n for column_name in grouping_column_names:\n column = df[column_name]\n if isinstance(column, LazyOpResult):\n self.grouping_column_types.append(column.weld_type)\n self.grouping_columns.append(column.expr)\n elif isinstance(column, np.ndarray):\n column_type = numpyImpl.numpy_to_weld_type_mapping[\n str(column.dtype)]\n self.grouping_column_types.append(column_type)\n self.grouping_columns.append(column)\n\n self.grouping_column_names = grouping_column_names\n self.column_names = []\n for x in df._get_column_names():\n if x not in self.grouping_column_names:\n self.column_names.append(x)\n\n self.columns = []\n self.column_types = []\n for column_name in self.column_names:\n column = df[column_name]\n column_type = None\n if isinstance(column, LazyOpResult):\n column_type = column.weld_type\n column = column.expr\n elif isinstance(column, np.ndarray):\n column_type = numpyImpl.numpy_to_weld_type_mapping[\n str(column.dtype)]\n\n self.columns.append(column)\n self.column_types.append(column_type)", "docstring": "Summary\n\nArgs:\ndf (TYPE): Description\ngrouping_column_name (TYPE): Description", "source": "juraj-google-style"} {"code": "def _DrawStations(self, color=\"\n \n stations=self._stations\n tmpstrs = []\n for y in stations:\n tmpstrs.append(' ' %(color,20,20+y+.5,self._gwidth+20,20+y+.5))\n return \"\".join(tmpstrs)", "docstring": "Generates svg with a horizontal line for each station/stop.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstations: [Stop, Stop, ...]\n\nReturns:\n# A string containing a polyline tag for each stop\n\" BELAst:\n \n ast_subject = ast_dict.get(\"subject\", None)\n ast_object = ast_dict.get(\"object\", None)\n\n bel_subject = None\n bel_object = None\n bel_relation = ast_dict.get(\"relation\")\n\n if ast_subject:\n bel_subject = function_ast_to_objects(ast_subject, bel_obj)\n\n if ast_object:\n bel_object = function_ast_to_objects(ast_object, bel_obj)\n\n ast_obj = BELAst(bel_subject, bel_relation, bel_object, bel_obj.spec)\n\n return ast_obj", "docstring": "Convert Tatsu AST dictionary to BEL AST object\n\nArgs:\nast_dict (Mapping[str, Any])\n\nReturns:\nBELAst: object representing the BEL Statement AST", "source": "juraj-google-style"} {"code": "def __init__(self, message=None, orig_exc=None, context=None):\n \n self.orig_exc = orig_exc\n if message is not None:\n self.error_message = message\n elif orig_exc is not None:\n \n self.error_message = \"%s\" % orig_exc\n\n self.context = context or StatikErrorContext()\n if not isinstance(self.context, StatikErrorContext):\n raise TypeError(\"Statik error context must be of type StatikErrorContext\")", "docstring": "Constructor.\n\nArgs:\nmessage: An optional message to override the predefined error message.\norig_exc: The original exception from which this error was generated.\ncontext: An optional ErrorContext instance to provide additional information during\nerror rendering.", "source": "juraj-google-style"} {"code": "def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n if not self._inbound_nodes:\n raise AttributeError(f'The layer {self.name} has never been called and thus has no defined {attr_name}.')\n if not len(self._inbound_nodes) > node_index:\n raise ValueError(f'Asked to get {attr_name} at node {node_index}, but the operation has only {len(self._inbound_nodes)} inbound nodes.')\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values", "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node.\n\nThis is used to implement the properties:\n- output\n- input\n\nArgs:\nnode_index: Integer index of the node from which\nto retrieve the attribute.\nattr: Exact node attribute name.\nattr_name: Human-readable attribute name, for error messages.\n\nReturns:\nThe operation's attribute `attr` at the node of index `node_index`.", "source": "github-repos"} {"code": "def get_evaluation_parameter(self, parameter_name, default_value=None):\n \n if \"evaluation_parameters\" in self._expectations_config and \\\n parameter_name in self._expectations_config['evaluation_parameters']:\n return self._expectations_config['evaluation_parameters'][parameter_name]\n else:\n return default_value", "docstring": "Get an evaluation parameter value that has been stored in meta.\n\nArgs:\nparameter_name (string): The name of the parameter to store.\ndefault_value (any): The default value to be returned if the parameter is not found.\n\nReturns:\nThe current value of the evaluation parameter.", "source": "juraj-google-style"} {"code": "def printMe(self, selfTag, selfValue):\n \n if len(selfValue) == 0:\n return ''\n \n \n elif len(selfValue) == 1 and not ancestor(selfValue[0]) is Single:\n text = '<{tag}>{value}\\n'.format(\n tag=selfTag, value=selfValue[0])\n return text\n else:\n valueText = ''\n for element in selfValue:\n \n \n \n \n if singleOrPair(element) == 'Single':\n \n valueText += element.printMe(element.tag, element.value)\n elif singleOrPair(element) == 'Pair':\n valueText += element.printMe(element.key, element.value)\n else:\n \n valueText += str(element) + '\\n'\n valueText = indent(valueText, 4)\n text = '<{tag}>\\n'.format(\n tag=selfTag) + valueText + '\\n'.format(tag=selfTag)\n return text", "docstring": "Parse the single and its value and return the parsed str.\n\nArgs:\nselfTag (str): The tag. Normally just ``self.tag``\nselfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``\n\nReturns:\nstr: A parsed text", "source": "juraj-google-style"} {"code": "def load_obs(self, mask_threshold=0.5):\n \n print(\"Loading obs \", self.run_date, self.model_name, self.forecast_variable)\n start_date = self.run_date + timedelta(hours=self.start_hour)\n end_date = self.run_date + timedelta(hours=self.end_hour)\n mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path)\n mrms_grid.load_data()\n if len(mrms_grid.data) > 0:\n self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data)\n self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0)\n if self.obs_mask:\n mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path)\n mask_grid.load_data()\n self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0)\n self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)", "docstring": "Loads observations and masking grid (if needed).\n\nArgs:\nmask_threshold: Values greater than the threshold are kept, others are masked.", "source": "juraj-google-style"} {"code": "def _blocking_poll(self, timeout=None):\n \n if self._result_set:\n return\n\n retry_ = self._retry.with_deadline(timeout)\n\n try:\n retry_(self._done_or_raise)()\n except exceptions.RetryError:\n raise concurrent.futures.TimeoutError(\n \"Operation did not complete within the designated \" \"timeout.\"\n )", "docstring": "Poll and wait for the Future to be resolved.\n\nArgs:\ntimeout (int):\nHow long (in seconds) to wait for the operation to complete.\nIf None, wait indefinitely.", "source": "juraj-google-style"} {"code": "def run_compiler(self, compiler=GCC, inputs=None, output=None):\n prog = RunningProgram(self, *compiler_cmdline(compiler=compiler, inputs=inputs, output=output))\n prog.expect_exit_status(0)", "docstring": "Runs a compiler in the working directory.\n\nArgs:\ncompiler (tuple): The compiler program and its command-line arguments,\nincluding placeholders for output and input files.\ninputs (tuple): The list of input files for the compiler.\noutput (str): The name of the output file.", "source": "codesearchnet"} {"code": "def get_function_arguments(obj, func):\n func_name = '_inspect_%s' % func\n if hasattr(obj, func_name):\n f = getattr(obj, func_name)\n return f()\n f = getattr(obj, func)\n return get_function_args_defaults(f)", "docstring": "Return the function arguments based on the name provided. If they have\na _inspect_function attached to the class then use that otherwise default\nto the modified version of python inspect library.\n\nReturns:\nSame as get_function_args_defaults.", "source": "github-repos"} {"code": "def ExamineEvent(self, mediator, event):\n if (self._session_end_timestamp is None):\n self._session_end_timestamp = (event.timestamp + self._maximum_pause_microseconds)\n self._events_per_session.append(0)\n if (event.timestamp > self._session_end_timestamp):\n self._session_counter += 1\n self._events_per_session.append(0)\n self._session_end_timestamp = (event.timestamp + self._maximum_pause_microseconds)\n self._events_per_session[(- 1)] += 1\n label = 'session_{0:d}'.format(self._session_counter)\n event_tag = self._CreateEventTag(event, self._EVENT_TAG_COMMENT, [label])\n mediator.ProduceEventTag(event_tag)\n self._number_of_event_tags += 1", "docstring": "Analyzes an EventObject and tags it as part of a session.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "codesearchnet"} {"code": "def set_file_idx_offset(self, file_idx_offset=0):\n if isinstance(file_idx_offset, int):\n self.file_idx_offset = file_idx_offset\n elif (file_idx_offset == 'auto'):\n self.file_idx_offset = self.storage.max_file_idx()\n else:\n raise ValueError('\"file_idx_offset\" must be an integer or `auto`')", "docstring": "Set offset of file index.\n\nArgs:\nfile_idx_offset: It can be either an integer or 'auto'. If set\nto an integer, the filename will start from\n``file_idx_offset`` + 1. If set to ``'auto'``, the filename\nwill start from existing max file index plus 1.", "source": "codesearchnet"} {"code": "def execute_interactive_code(elem, doc):\n code_lines = [l[4:] for l in elem.text.split('\\n')]\n code_blocks = [[code_lines[0]]]\n for line in code_lines[1:]:\n if (line.startswith(' ') or (line == '')):\n code_blocks[(- 1)].append(line)\n else:\n code_blocks.append([line])\n final_code = []\n try:\n child = replwrap.REPLWrapper('python', '>>> ', None)\n except NameError:\n pf.debug(('Can not run interactive session. No output produced ' + '(Code was:\\n{!s}\\n)'.format(elem)))\n pf.debug('Please pip install pexpect.')\n return ''\n for code_block in code_blocks:\n result = child.run_command(('\\n'.join(code_block) + '\\n')).rstrip('\\r\\n')\n final_code += [(('>>> ' if (i == 0) else '... ') + l) for (i, l) in enumerate(code_block)]\n if result:\n final_code += [r for r in result.split('\\n') if (r.strip() not in code_block)]\n return '\\n'.join(final_code)", "docstring": "Executes code blocks for a python shell.\n\nParses the code in `elem.text` into blocks and\nexecutes them.\n\nArgs:\nelem The AST element.\ndoc The document.\n\nReturn:\nThe code with inline results.", "source": "codesearchnet"} {"code": "def write_file(self, filename):\n with open(filename, 'w') as f:\n f.write(self.__str__())", "docstring": "Write the PWSCF input file.\n\nArgs:\nfilename (str): The string filename to output to.", "source": "codesearchnet"} {"code": "def __init__(self, device: 'cirq.google.XmonDevice', seed=None) -> None:\n \n self._c = device.qubits\n self._c_adj = chip_as_adjacency_list(device)\n self._rand = np.random.RandomState(seed)", "docstring": "Greedy sequence search constructor.\n\nArgs:\ndevice: Chip description.\nseed: Optional seed value for random number generator.", "source": "juraj-google-style"} {"code": "def from_grid_locator(locator):\n \n if not len(locator) in (4, 6, 8):\n raise ValueError('Locator must be 4, 6 or 8 characters long %r'\n % locator)\n\n \n \n locator = list(locator)\n\n \n locator[0] = ord(locator[0]) - 65\n locator[1] = ord(locator[1]) - 65\n\n \n locator[2] = int(locator[2])\n locator[3] = int(locator[3])\n\n if len(locator) >= 6:\n \n \n locator[4] = ord(locator[4].lower()) - 97\n locator[5] = ord(locator[5].lower()) - 97\n\n if len(locator) == 8:\n \n locator[6] = int(locator[6])\n locator[7] = int(locator[7])\n\n \n \n if not 0 <= locator[0] <= 17 \\\n or not 0 <= locator[1] <= 17 \\\n or not 0 <= locator[2] <= 9 \\\n or not 0 <= locator[3] <= 9:\n raise ValueError('Invalid values in locator %r' % locator)\n\n \n if len(locator) >= 6:\n if not 0 <= locator[4] <= 23 \\\n or not 0 <= locator[5] <= 23:\n raise ValueError('Invalid values in locator %r' % locator)\n\n \n if len(locator) == 8:\n if not 0 <= locator[6] <= 9 \\\n or not 0 <= locator[7] <= 9:\n raise ValueError('Invalid values in locator %r' % locator)\n\n longitude = LONGITUDE_FIELD * locator[0] \\\n + LONGITUDE_SQUARE * locator[2]\n latitude = LATITUDE_FIELD * locator[1] \\\n + LATITUDE_SQUARE * locator[3]\n\n if len(locator) >= 6:\n longitude += LONGITUDE_SUBSQUARE * locator[4]\n latitude += LATITUDE_SUBSQUARE * locator[5]\n\n if len(locator) == 8:\n longitude += LONGITUDE_EXTSQUARE * locator[6] + LONGITUDE_EXTSQUARE / 2\n latitude += LATITUDE_EXTSQUARE * locator[7] + LATITUDE_EXTSQUARE / 2\n else:\n longitude += LONGITUDE_EXTSQUARE * 5\n latitude += LATITUDE_EXTSQUARE * 5\n\n \n longitude -= 180\n latitude -= 90\n\n return latitude, longitude", "docstring": "Calculate geodesic latitude/longitude from Maidenhead locator.\n\nArgs:\nlocator (str): Maidenhead locator string\n\nReturns:\ntuple of float: Geodesic latitude and longitude values\n\nRaises:\nValueError: Incorrect grid locator length\nValueError: Invalid values in locator string", "source": "juraj-google-style"} {"code": "def _reset_offset(self, partition):\n \n timestamp = self._subscriptions.assignment[partition].reset_strategy\n if timestamp is OffsetResetStrategy.EARLIEST:\n strategy = 'earliest'\n elif timestamp is OffsetResetStrategy.LATEST:\n strategy = 'latest'\n else:\n raise NoOffsetForPartitionError(partition)\n\n log.debug(\"Resetting offset for partition %s to %s offset.\",\n partition, strategy)\n offsets = self._retrieve_offsets({partition: timestamp})\n if partition not in offsets:\n raise NoOffsetForPartitionError(partition)\n offset = offsets[partition][0]\n\n \n \n if self._subscriptions.is_assigned(partition):\n self._subscriptions.seek(partition, offset)", "docstring": "Reset offsets for the given partition using the offset reset strategy.\n\nArguments:\npartition (TopicPartition): the partition that needs reset offset\n\nRaises:\nNoOffsetForPartitionError: if no offset reset strategy is defined", "source": "juraj-google-style"} {"code": "def get_static_value(x):\n if isinstance(x, core.Tensor) and (x.dtype.is_floating or x.dtype.is_complex):\n return None\n return tensor_util.constant_value(x)", "docstring": "A version of tf.get_static_value that returns None on float dtypes.\n\nIt returns None on float dtypes in order to avoid breaking gradients.\n\nArgs:\nx: a tensor.\n\nReturns:\nSame as `tf.get_static_value`, except that it returns None when `x` has a\nfloat dtype.", "source": "github-repos"} {"code": "def db_get(table, record, column, if_exists=False):\n cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]\n if if_exists:\n cmd += ['--if-exists']\n cmd += ['list', table, record]\n result = __salt__['cmd.run_all'](cmd)\n if (result['retcode'] != 0):\n raise CommandExecutionError(result['stderr'])\n output = _stdout_parse_json(result['stdout'])\n if (output['data'] and output['data'][0]):\n return output['data'][0][0]\n else:\n return None", "docstring": "Gets a column's value for a specific record.\n\nArgs:\ntable: A string - name of the database table.\nrecord: A string - identifier of the record.\ncolumn: A string - name of the column.\nif_exists: A boolean - if True, it is not an error if the record does\nnot exist.\n\nReturns:\nThe column's value.\n\nCLI Example:\n.. code-block:: bash\n\nsalt '*' openvswitch.db_get Port br0 vlan_mode", "source": "codesearchnet"} {"code": "def remove(self, word):\n self._dictionary.pop(word.lower())\n self._update_dictionary()", "docstring": "Remove a word from the word frequency list\n\nArgs:\nword (str): The word to remove", "source": "codesearchnet"} {"code": "def fileToMD5(filename, block_size=(256 * 128), binary=False):\n md5 = hashlib.md5()\n with open(filename, 'rb') as f:\n for chunk in iter((lambda : f.read(block_size)), b''):\n md5.update(chunk)\n if (not binary):\n return md5.hexdigest()\n return md5.digest()", "docstring": "A function that calculates the MD5 hash of a file.\n\nArgs:\n-----\nfilename: Path to the file.\nblock_size: Chunks of suitable size. Block size directly depends on\nthe block size of your filesystem to avoid performances issues.\nBlocks of 4096 octets (Default NTFS).\nbinary: A boolean representing whether the returned info is in binary\nformat or not.\n\nReturns:\n--------\nstring: The MD5 hash of the file.", "source": "codesearchnet"} {"code": "def pluralize(wordtext, num=2, plural_suffix='s'):\n if (num == 1):\n return wordtext\n elif wordtext.endswith(\"'s\"):\n return (wordtext[:(- 2)] + \"s'\")\n else:\n return (wordtext + plural_suffix)\n return ((wordtext + plural_suffix) if (num != 1) else wordtext)", "docstring": "r\"\"\"\nHeuristically changes a word to its plural form if `num` is not 1\n\nArgs:\nwordtext (str): word in singular form\nnum (int): a length of an associated list if applicable (default = 2)\nplural_suffix (str): heurstic plural form (default = 's')\n\nReturns:\nstr: pluralized form. Can handle some genitive cases\n\nCommandLine:\npython -m utool.util_str pluralize\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import * # NOQA\n>>> wordtext = 'foo'\n>>> result = pluralize(wordtext)\n>>> print(result)\nfoos", "source": "codesearchnet"} {"code": "def price(self, valuation_date, market, model=None, name=None):\n name = name or self._name + '_price'\n with tf.name_scope(name):\n discount_curve = market.discount_curve\n coupon_cf = self._cashflows.price(valuation_date, market, model)\n principal_cf = self._notional * discount_curve.get_discount_factor(self._maturity_date)\n return coupon_cf + principal_cf", "docstring": "Returns the dirty price of the bonds on the valuation date.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the bonds.\nmodel: Reserved for future use.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA Rank 1 `Tensor` of real dtype containing the dirty price of each bond\nbased on the input market data.", "source": "github-repos"} {"code": "def get_concept_item_mapping(self, concepts=None, lang=None):\n \n if concepts is None:\n concepts = self.filter(active=True)\n if lang is not None:\n concepts = concepts.filter(lang=lang)\n if lang is None:\n languages = set([concept.lang for concept in concepts])\n if len(languages) > 1:\n raise Exception('Concepts has multiple languages')\n lang = list(languages)[0]\n item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query)\n for concept in concepts], lang)\n return dict(zip([c.pk for c in concepts], item_lists))", "docstring": "Get mapping of concepts to items belonging to concept.\n\nArgs:\nconcepts (list of Concept): Defaults to None meaning all concepts\nlang (str): language of concepts, if None use language of concepts\n\nReturns:\ndict: concept (int) -> list of item ids (int)", "source": "juraj-google-style"} {"code": "def getCurrentStrDatetime():\n i = datetime.datetime.now()\n strTime = ('%s-%s-%s_%sh%sm' % (i.year, i.month, i.day, i.hour, i.minute))\n return strTime", "docstring": "Generating the current Datetime with a given format\n\nReturns:\n--------\nstring: The string of a date.", "source": "codesearchnet"} {"code": "def get(self, uri):\n uri = (self.URI + uri)\n return self._client.get(uri)", "docstring": "Gets an index resource by URI.\n\nArgs:\nuri: The resource URI.\n\nReturns:\ndict: The index resource.", "source": "codesearchnet"} {"code": "def element_wise(self, func, *args, **kwargs):\n s = self.shape\n emat = [func(o, *args, **kwargs) for o in self.matrix.ravel()]\n return Matrix(np_array(emat).reshape(s))", "docstring": "Apply a function to each matrix element and return the result in a\nnew operator matrix of the same shape.\n\nArgs:\nfunc (FunctionType): A function to be applied to each element. It\nmust take the element as its first argument.\nargs: Additional positional arguments to be passed to `func`\nkwargs: Additional keyword arguments to be passed to `func`\n\nReturns:\nMatrix: Matrix with results of `func`, applied element-wise.", "source": "codesearchnet"} {"code": "def get_committed_signatures(vcs):\n \n committed_path = _get_committed_history_path(vcs)\n known_signatures = []\n if os.path.exists(committed_path):\n with open(committed_path, 'r') as f:\n known_signatures = f.read().split()\n return known_signatures", "docstring": "Get the list of committed signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nlist(basestring) - list of signatures", "source": "juraj-google-style"} {"code": "def add(self, command, *args):\n \n\n cmd = Command(command, args)\n self.commands.append(cmd)", "docstring": "Add a command to this command file.\n\nArgs:\ncommand (str): The command to add\n*args (str): The parameters to call the command with", "source": "juraj-google-style"} {"code": "def _expanded_sql(self, sampling=None):\n udfs = []\n subqueries = []\n expanded_sql = ''\n\n def _recurse_subqueries(query):\n 'Recursively scan subqueries and add their pieces to global scope udfs and subqueries\\n '\n if query._subqueries:\n for subquery in query._subqueries:\n _recurse_subqueries(subquery[1])\n subqueries.extend([s for s in query._subqueries if (s not in subqueries)])\n if query._udfs:\n udfs.extend([u[1] for u in query._udfs if (u[1] not in udfs)])\n _recurse_subqueries(self)\n if udfs:\n expanded_sql += '\\n'.join([udf._expanded_sql() for udf in udfs])\n expanded_sql += '\\n'\n\n def _indent_query(subquery):\n return (' ' + subquery._sql.replace('\\n', '\\n '))\n if subqueries:\n expanded_sql += ('WITH ' + '\\n),\\n'.join([('%s AS (\\n%s' % (sq[0], _indent_query(sq[1]))) for sq in subqueries]))\n expanded_sql += '\\n)\\n\\n'\n expanded_sql += (sampling(self._sql) if sampling else self._sql)\n return expanded_sql", "docstring": "Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources\n\nReturns:\nThe expanded SQL string of this object", "source": "codesearchnet"} {"code": "def GetMessages(self, files):\n \n result = {}\n for file_name in files:\n file_desc = self.pool.FindFileByName(file_name)\n for desc in file_desc.message_types_by_name.values():\n result[desc.full_name] = self.GetPrototype(desc)\n\n \n \n \n \n \n \n \n \n\n for extension in file_desc.extensions_by_name.values():\n if extension.containing_type.full_name not in self._classes:\n self.GetPrototype(extension.containing_type)\n extended_class = self._classes[extension.containing_type.full_name]\n extended_class.RegisterExtension(extension)\n return result", "docstring": "Gets all the messages from a specified file.\n\nThis will find and resolve dependencies, failing if the descriptor\npool cannot satisfy them.\n\nArgs:\nfiles: The file names to extract messages from.\n\nReturns:\nA dictionary mapping proto names to the message classes. This will include\nany dependent messages as well as any messages defined in the same file as\na specified message.", "source": "juraj-google-style"} {"code": "def load_and_save_resfile(filename, outfile=None, outdir=None, mass=1.00):\n \n d = CellpyData()\n\n if not outdir:\n outdir = prms.Paths[\"cellpydatadir\"]\n\n if not outfile:\n outfile = os.path.basename(filename).split(\".\")[0] + \".h5\"\n outfile = os.path.join(outdir, outfile)\n\n print(\"filename:\", filename)\n print(\"outfile:\", outfile)\n print(\"outdir:\", outdir)\n print(\"mass:\", mass, \"mg\")\n\n d.from_raw(filename)\n d.set_mass(mass)\n d.make_step_table()\n d.make_summary()\n d.save(filename=outfile)\n d.to_csv(datadir=outdir, cycles=True, raw=True, summary=True)\n return outfile", "docstring": "Load a raw data file and save it as cellpy-file.\n\nArgs:\nmass (float): active material mass [mg].\noutdir (path): optional, path to directory for saving the hdf5-file.\noutfile (str): optional, name of hdf5-file.\nfilename (str): name of the resfile.\n\nReturns:\nout_file_name (str): name of saved file.", "source": "juraj-google-style"} {"code": "def highest_stored_id(self):\n shared = [0]\n\n def _keep_max(_i, reading):\n if (reading.reading_id > shared[0]):\n shared[0] = reading.reading_id\n self.engine.scan_storage('storage', _keep_max)\n self.engine.scan_storage('streaming', _keep_max)\n return shared[0]", "docstring": "Scan through the stored readings and report the highest stored id.\n\nReturns:\nint: The highest stored id.", "source": "codesearchnet"} {"code": "def validate(cls, mapper_spec):\n \n if mapper_spec.input_reader_class() != cls:\n raise BadReaderParamsError(\"Input reader class mismatch\")\n params = _get_params(mapper_spec)\n if cls.BATCH_SIZE_PARAM in params:\n try:\n batch_size = int(params[cls.BATCH_SIZE_PARAM])\n if batch_size < 1:\n raise BadReaderParamsError(\"Bad batch size: %s\" % batch_size)\n except ValueError, e:\n raise BadReaderParamsError(\"Bad batch size: %s\" % e)", "docstring": "Validates mapper spec.\n\nArgs:\nmapper_spec: The MapperSpec for this InputReader.\n\nRaises:\nBadReaderParamsError: required parameters are missing or invalid.", "source": "juraj-google-style"} {"code": "def _CreateFeedMapping(client, feed_details):\n \n \n feed_mapping_service = client.GetService('FeedMappingService',\n version='v201809')\n\n \n operation = {\n \n 'operand': {\n 'criterionType': DSA_PAGE_FEED_CRITERION_TYPE,\n 'feedId': feed_details.feed_id,\n \n 'attributeFieldMappings': [\n {\n 'feedAttributeId': feed_details.url_attribute_id,\n 'fieldId': DSA_PAGE_URLS_FIELD_ID\n },\n {\n 'feedAttributeId': feed_details.label_attribute_id,\n 'fieldId': DSA_LABEL_FIELD_ID\n }\n ]\n },\n 'operator': 'ADD'\n }\n\n \n feed_mapping_service.mutate([operation])", "docstring": "Creates the feed mapping for DSA page feeds.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_details: a _DSAFeedDetails instance.", "source": "juraj-google-style"} {"code": "def open_channel_with_funding(self, registry_address_hex, token_address_hex, peer_address_hex, total_deposit, settle_timeout=None):\n registry_address = decode_hex(registry_address_hex)\n peer_address = decode_hex(peer_address_hex)\n token_address = decode_hex(token_address_hex)\n try:\n self._discovery.get(peer_address)\n except KeyError:\n print('Error: peer {} not found in discovery'.format(peer_address_hex))\n return None\n self._api.channel_open(registry_address, token_address, peer_address, settle_timeout=settle_timeout)\n return self._api.set_total_channel_deposit(registry_address, token_address, peer_address, total_deposit)", "docstring": "Convenience method to open a channel.\n\nArgs:\nregistry_address_hex (str): hex encoded address of the registry for the channel.\ntoken_address_hex (str): hex encoded address of the token for the channel.\npeer_address_hex (str): hex encoded address of the channel peer.\ntotal_deposit (int): amount of total funding for the channel.\nsettle_timeout (int): amount of blocks for the settle time (if None use app defaults).\n\nReturn:\nnetting_channel: the (newly opened) netting channel object.", "source": "codesearchnet"} {"code": "def register_backend(self, config_contents):\n \n if config_contents is None:\n return\n self.__register_class(config_contents)\n self.__api_configs.append(config_contents)\n self.__register_methods(config_contents)", "docstring": "Register a single API and its config contents.\n\nArgs:\nconfig_contents: Dict containing API configuration.", "source": "juraj-google-style"} {"code": "def setup(argv):\n \n parser = argparse.ArgumentParser(\n description='Compute Jekyl- and prose-aware wordcounts',\n epilog='Accepted filetypes: plaintext, markdown, markdown (Jekyll)')\n parser.add_argument('-S', '--split-hyphens', action='store_true',\n dest='split_hyphens',\n help='split hyphenated words rather than counting '\n 'them as one word (\"non-trivial\" counts as two words '\n 'rather than one)')\n parser.add_argument('-u', '--update', action='store_true',\n help='update the jekyll file in place with the counts.'\n ' Does nothing if the file is not a Jekyll markdown '\n 'file. Implies format=yaml, invalid with input '\n 'from STDIN and non-Jekyll files.')\n parser.add_argument('-f', '--format', nargs='?',\n choices=['yaml', 'json', 'default'], default='default',\n help='output format.')\n parser.add_argument('-i', '--indent', type=int, nargs='?', default=4,\n help='indentation depth (default: 4).')\n parser.add_argument('file', type=argparse.FileType('rb'),\n help='file to parse (or - for STDIN)')\n return parser.parse_args(argv)", "docstring": "Sets up the ArgumentParser.\n\nArgs:\nargv: an array of arguments", "source": "juraj-google-style"} {"code": "def _lookup_in_all_namespaces(self, symbol):\n namespace = self.namespaces\n namespace_stack = []\n for current in symbol.namespace_stack:\n namespace = namespace.get(current)\n if ((namespace is None) or (not isinstance(namespace, dict))):\n break\n namespace_stack.append(namespace)\n for namespace in reversed(namespace_stack):\n try:\n return self._lookup_namespace(symbol, namespace)\n except Error:\n pass\n return None", "docstring": "Helper for lookup_symbol that looks for symbols in all namespaces.\n\nArgs:\nsymbol: Symbol", "source": "codesearchnet"} {"code": "def expand_by_device(original_parallelism, device_parallelism, data):\n \n device_to_datum = {\n device_parallelism.devices[i]: data[i]\n for i in range(device_parallelism.n)}\n return [device_to_datum[d] for d in original_parallelism.devices]", "docstring": "Opposite of reduce_by_device().\n\nArgs:\noriginal_parallelism: a expert_utils.Parallelism object.\ndevice_parallelism: a expert_utils.Parallelism object.\ndata: a list of tensors with length device_parallelism.n\n\nReturns:\na list of Tensors with length original_parallelism.n", "source": "juraj-google-style"} {"code": "def _FormatInAddrExToken(self, token_data):\n \n protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.net_type, 'UNKNOWN')\n if token_data.net_type == 4:\n ip_address = self._FormatPackedIPv6Address(token_data.ip_address[:4])\n elif token_data.net_type == 16:\n ip_address = self._FormatPackedIPv6Address(token_data.ip_address)\n return {\n 'protocols': protocol,\n 'net_type': token_data.net_type,\n 'address': ip_address}", "docstring": "Formats an extended IPv4 address token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_in_addr_ex): AUT_IN_ADDR_EX token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"} {"code": "def from_string(string):\n lines = string.split('\\n')\n toks = lines[0].split()\n lengths = [float(i) for i in toks]\n toks = lines[1].split()\n angles = [float(i) for i in toks[0:3]]\n a = lengths.pop((- 1))\n lengths.insert(0, a)\n alpha = angles.pop((- 1))\n angles.insert(0, alpha)\n latt = Lattice.from_lengths_and_angles(lengths, angles)\n sp = []\n coords = []\n chrg = []\n for l in lines[4:]:\n m = re.match(('\\\\d+\\\\s+(\\\\w+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+' + '([0-9\\\\-\\\\.]+)\\\\s+(?:0\\\\s+){8}([0-9\\\\-\\\\.]+)'), l.strip())\n if m:\n sp.append(m.group(1))\n coords.append([float(m.group(i)) for i in [3, 4, 2]])\n chrg.append(m.group(5))\n return ZeoCssr(Structure(latt, sp, coords, site_properties={'charge': chrg}))", "docstring": "Reads a string representation to a ZeoCssr object.\n\nArgs:\nstring: A string representation of a ZeoCSSR.\n\nReturns:\nZeoCssr object.", "source": "codesearchnet"} {"code": "def _ParseRedirected(\n self, parser_mediator, msiecf_item, recovered=False):\n \n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n\n event_data = MSIECFRedirectedEventData()\n event_data.offset = msiecf_item.offset\n event_data.recovered = recovered\n event_data.url = msiecf_item.location\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract data from a MSIE Cache Files (MSIECF) redirected item.\n\nEvery item is stored as an event object, one for each timestamp.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmsiecf_item (pymsiecf.redirected): MSIECF redirected item.\nrecovered (Optional[bool]): True if the item was recovered.", "source": "juraj-google-style"} {"code": "def __call__(self, w):\n return w", "docstring": "Applies the constraint to the input weight variable.\n\nBy default, the inputs weight variable is not modified.\nUsers should override this method to implement their own projection\nfunction.\n\nArgs:\nw: Input weight variable.\n\nReturns:\nProjected variable (by default, returns unmodified inputs).", "source": "github-repos"} {"code": "def get_data_source_instance(data_source, sagemaker_session):\n \n parsed_uri = urlparse(data_source)\n if parsed_uri.scheme == 'file':\n return LocalFileDataSource(parsed_uri.netloc + parsed_uri.path)\n elif parsed_uri.scheme == 's3':\n return S3DataSource(parsed_uri.netloc, parsed_uri.path, sagemaker_session)", "docstring": "Return an Instance of :class:`sagemaker.local.data.DataSource` that can handle\nthe provided data_source URI.\n\ndata_source can be either file:// or s3://\n\nArgs:\ndata_source (str): a valid URI that points to a data source.\nsagemaker_session (:class:`sagemaker.session.Session`): a SageMaker Session to interact with\nS3 if required.\n\nReturns\n:class:`sagemaker.local.data.DataSource`: an Instance of a Data Source", "source": "juraj-google-style"} {"code": "def __init__(self, channel):\n \n self.ListDocuments = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.Documents/ListDocuments',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.ListDocumentsRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.ListDocumentsResponse.FromString,\n )\n self.GetDocument = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.Documents/GetDocument',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.GetDocumentRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.Document.FromString,\n )\n self.CreateDocument = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.Documents/CreateDocument',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.CreateDocumentRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.DeleteDocument = channel.unary_unary(\n '/google.cloud.dialogflow.v2beta1.Documents/DeleteDocument',\n request_serializer=google_dot_cloud_dot_dialogflow__v2beta1_dot_proto_dot_document__pb2.DeleteDocumentRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"} {"code": "def _get_first_op_from_collection(self, key):\n try:\n op_list = ops.get_collection(key)\n if len(op_list) > 1:\n logging.info('Found %d %s operations. Returning the first one.', len(op_list), key)\n if op_list:\n return op_list[0]\n except LookupError:\n pass\n return None", "docstring": "Returns the first `Operation` from a collection.\n\nArgs:\nkey: A string collection key.\n\nReturns:\nThe first Op found in a collection, or `None` if the collection is empty.", "source": "github-repos"} {"code": "def request(self, subject, callback, msg=None):\n inbox = self._build_inbox()\n s = self.subscribe(inbox, callback)\n self.unsubscribe(s, 1)\n self.publish(subject, msg, inbox)\n return s", "docstring": "ublish a message with an implicit inbox listener as the reply.\nMessage is optional.\n\nArgs:\nsubject (string): a string with the subject\ncallback (function): callback to be called\nmsg (string=None): payload string", "source": "codesearchnet"} {"code": "def set_page_artid(self, page_start=None, page_end=None, artid=None):\n if (page_end and (not page_start)):\n raise ValueError('End_page provided without start_page')\n self._ensure_reference_field('publication_info', {})\n publication_info = self.obj['reference']['publication_info']\n if page_start:\n publication_info['page_start'] = page_start\n if page_end:\n publication_info['page_end'] = page_end\n if artid:\n publication_info['artid'] = artid", "docstring": "Add artid, start, end pages to publication info of a reference.\n\nArgs:\npage_start(Optional[string]): value for the field page_start\npage_end(Optional[string]): value for the field page_end\nartid(Optional[string]): value for the field artid\n\nRaises:\nValueError: when no start_page given for an end_page", "source": "codesearchnet"} {"code": "def bounce(sequence):\n \n N = len(sequence)\n def f(i):\n div, mod = divmod(i, N)\n if div % 2 == 0:\n return sequence[mod]\n else:\n return sequence[N-mod-1]\n return partial(force, sequence=_advance(f))", "docstring": "Return a driver function that can advance a \"bounced\" sequence\nof values.\n\n.. code-block:: none\n\nseq = [0, 1, 2, 3]\n\n# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]\n\nArgs:\nsequence (seq) : a sequence of values for the driver to bounce", "source": "juraj-google-style"} {"code": "def data_group_association(self, xid):\n \n groups = []\n group_data = None\n\n \n if self.groups.get(xid) is not None:\n group_data = self.groups.get(xid)\n del self.groups[xid]\n elif self.groups_shelf.get(xid) is not None:\n group_data = self.groups_shelf.get(xid)\n del self.groups_shelf[xid]\n\n if group_data is not None:\n \n group_data = self.data_group_type(group_data)\n groups.append(group_data)\n\n \n for assoc_xid in group_data.get('associatedGroupXid', []):\n groups.extend(self.data_group_association(assoc_xid))\n\n return groups", "docstring": "Return group dict array following all associations.\n\nArgs:\nxid (str): The xid of the group to retrieve associations.\n\nReturns:\nlist: A list of group dicts.", "source": "juraj-google-style"} {"code": "def update_ports(self, ports, id_or_uri):\n ports = merge_default_values(ports, {'type': 'port'})\n uri = (self._client.build_uri(id_or_uri) + '/update-ports')\n return self._client.update(uri=uri, resource=ports)", "docstring": "Updates the switch ports. Only the ports under the management of OneView and those that are unlinked are\nsupported for update.\n\nNote:\nThis method is available for API version 300 or later.\n\nArgs:\nports: List of Switch Ports.\nid_or_uri: Can be either the switch id or the switch uri.\n\nReturns:\ndict: Switch", "source": "codesearchnet"} {"code": "def feature_path(self, gff_path):\n if (not gff_path):\n self.feature_dir = None\n self.feature_file = None\n else:\n if (not op.exists(gff_path)):\n raise OSError('{}: file does not exist!'.format(gff_path))\n if (not op.dirname(gff_path)):\n self.feature_dir = '.'\n else:\n self.feature_dir = op.dirname(gff_path)\n self.feature_file = op.basename(gff_path)", "docstring": "Load a GFF file with information on a single sequence and store features in the ``features`` attribute\n\nArgs:\ngff_path: Path to GFF file.", "source": "codesearchnet"} {"code": "def serialize_keras_object(instance):\n _, instance = tf_decorator.unwrap(instance)\n if instance is None:\n return None\n supports_masking = getattr(instance, 'supports_masking', False) or (hasattr(instance, 'compute_mask') and (not is_default(instance.compute_mask)))\n if supports_masking and is_default(instance.get_config):\n warnings.warn('Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.', category=CustomMaskWarning)\n if hasattr(instance, 'get_config'):\n name = get_registered_name(instance.__class__)\n try:\n config = instance.get_config()\n except NotImplementedError as e:\n if _SKIP_FAILED_SERIALIZATION:\n return serialize_keras_class_and_config(name, {_LAYER_UNDEFINED_CONFIG_KEY: True})\n raise e\n serialization_config = {}\n for key, item in config.items():\n if isinstance(item, str):\n serialization_config[key] = item\n continue\n try:\n serialized_item = serialize_keras_object(item)\n if isinstance(serialized_item, dict) and (not isinstance(item, dict)):\n serialized_item['__passive_serialization__'] = True\n serialization_config[key] = serialized_item\n except ValueError:\n serialization_config[key] = item\n name = get_registered_name(instance.__class__)\n return serialize_keras_class_and_config(name, serialization_config, instance)\n if hasattr(instance, '__name__'):\n return get_registered_name(instance)\n raise ValueError('Cannot serialize', instance)", "docstring": "Serialize a Keras object into a JSON-compatible representation.\n\nCalls to `serialize_keras_object` while underneath the\n`SharedObjectSavingScope` context manager will cause any objects re-used\nacross multiple layers to be saved with a special shared object ID. This\nallows the network to be re-created properly during deserialization.\n\nArgs:\ninstance: The object to serialize.\n\nReturns:\nA dict-like, JSON-compatible representation of the object's config.", "source": "github-repos"} {"code": "def _checkTimeValue( timevalue, maxvalue ): \n \n if maxvalue is None:\n raise TypeError('The maxvalue (for the time value) must not be None!')\n minimalmodbus._checkNumerical(timevalue, minvalue=0, maxvalue=maxvalue, description='time value')", "docstring": "Check that the given timevalue is valid.\n\nArgs:\n* timevalue (numerical): The time value to be checked. Must be positive.\n* maxvalue (numerical): Upper limit for time value. Must be positive.\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"} {"code": "def __new__(mcs, classname, baseclasses, attrs):\n if not baseclasses:\n raise TypeError('Expected non-empty baseclass. Does Distribution not subclass _BaseDistribution?')\n which_base = [base for base in baseclasses if base == _BaseDistribution or issubclass(base, Distribution)]\n base = which_base[0]\n if base == _BaseDistribution:\n return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)\n if not issubclass(base, Distribution):\n raise TypeError(\"First parent class declared for %s must be Distribution, but saw '%s'\" % (classname, base.__name__))\n for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:\n special_attr = '_%s' % attr\n class_attr_value = attrs.get(attr, None)\n if attr in attrs:\n continue\n base_attr_value = getattr(base, attr, None)\n if not base_attr_value:\n raise AttributeError(\"Internal error: expected base class '%s' to implement method '%s'\" % (base.__name__, attr))\n class_special_attr_value = attrs.get(special_attr, None)\n if class_special_attr_value is None:\n continue\n class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)\n if not class_special_attr_docstring:\n continue\n class_attr_value = _copy_fn(base_attr_value)\n class_attr_docstring = tf_inspect.getdoc(base_attr_value)\n if class_attr_docstring is None:\n raise ValueError('Expected base class fn to contain a docstring: %s.%s' % (base.__name__, attr))\n class_attr_value.__doc__ = _update_docstring(class_attr_value.__doc__, 'Additional documentation from `%s`:\\n\\n%s' % (classname, class_special_attr_docstring))\n attrs[attr] = class_attr_value\n return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)", "docstring": "Control the creation of subclasses of the Distribution class.\n\nThe main purpose of this method is to properly propagate docstrings\nfrom private Distribution methods, like `_log_prob`, into their\npublic wrappers as inherited by the Distribution base class\n(e.g. `log_prob`).\n\nArgs:\nclassname: The name of the subclass being created.\nbaseclasses: A tuple of parent classes.\nattrs: A dict mapping new attributes to their values.\n\nReturns:\nThe class object.\n\nRaises:\nTypeError: If `Distribution` is not a subclass of `BaseDistribution`, or\nthe new class is derived via multiple inheritance and the first\nparent class is not a subclass of `BaseDistribution`.\nAttributeError: If `Distribution` does not implement e.g. `log_prob`.\nValueError: If a `Distribution` public method lacks a docstring.", "source": "github-repos"} {"code": "def import_image_from_data(self, data, repository=None, tag=None,\n changes=None):\n \n\n u = self._url('/images/create')\n params = _import_image_params(\n repository, tag, src='-', changes=changes\n )\n headers = {'Content-Type': 'application/tar'}\n return self._result(\n self._post(\n u, data=data, params=params, headers=headers, timeout=None\n )\n )", "docstring": "Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but\nallows importing in-memory bytes data.\n\nArgs:\ndata (bytes collection): Bytes collection containing valid tar data\nrepository (str): The repository to create\ntag (str): The tag to apply", "source": "juraj-google-style"} {"code": "def eere_station(station_code):\n \n with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta:\n stations = csv.DictReader(eere_meta)\n for station in stations:\n if station['station_code'] == station_code:\n return station\n raise KeyError('station not found')", "docstring": "Station information.\n\nArgs:\nstation_code (str): station code.\n\nReturns (dict): station information", "source": "juraj-google-style"} {"code": "def get_sample_dataset(dataset_properties):\n kwargs = dataset_properties.copy()\n data_type = kwargs.pop('type')\n if (data_type == 'multiclass'):\n try:\n (X, y) = datasets.make_classification(random_state=8, **kwargs)\n splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n except Exception as e:\n raise exceptions.UserError(repr(e))\n elif (data_type == 'iris'):\n (X, y) = datasets.load_iris(return_X_y=True)\n splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n elif (data_type == 'mnist'):\n (X, y) = datasets.load_digits(return_X_y=True)\n splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n elif (data_type == 'breast_cancer'):\n (X, y) = datasets.load_breast_cancer(return_X_y=True)\n splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)\n elif (data_type == 'boston'):\n (X, y) = datasets.load_boston(return_X_y=True)\n splits = model_selection.KFold(n_splits=2, random_state=8).split(X)\n elif (data_type == 'diabetes'):\n (X, y) = datasets.load_diabetes(return_X_y=True)\n splits = model_selection.KFold(n_splits=2, random_state=8).split(X)\n else:\n raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type']))\n return (X, y, splits)", "docstring": "Returns sample dataset\n\nArgs:\ndataset_properties (dict): Dictionary corresponding to the properties of the dataset\nused to verify the estimator and metric generators.\n\nReturns:\nX (array-like): Features array\n\ny (array-like): Labels array\n\nsplits (iterator): This is an iterator that returns train test splits for\ncross-validation purposes on ``X`` and ``y``.", "source": "codesearchnet"} {"code": "def _load_stop_words(self, language=None):\n \n self._logger.debug('Loading stop words')\n\n loaded = False\n\n if language:\n file_path = 'data/stop-' + language\n loaded = self._parse_stop_words_file(os.path.join(PATH, file_path))\n else:\n for file in os.listdir(os.path.join(PATH, 'data')):\n loaded = self._parse_stop_words_file(os.path.join(PATH, 'data', file)) or loaded\n\n return loaded", "docstring": "Load stop words into __stop_words set.\n\nStop words will be loaded according to the language code\nreceived during instantiation.\n\nArgs:\nlanguage: Language code.\n\nReturns:\nA boolean indicating whether a file was loaded.", "source": "juraj-google-style"} {"code": "def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):\n \n query_hash = hash(query)\n\n cookie_data = self._GetRowValue(query_hash, row, 'value')\n cookie_name = self._GetRowValue(query_hash, row, 'name')\n\n hostname = self._GetRowValue(query_hash, row, 'host')\n if hostname.startswith('.'):\n hostname = hostname[1:]\n\n is_secure = bool(self._GetRowValue(query_hash, row, 'isSecure'))\n if is_secure:\n url_scheme = 'https'\n else:\n url_scheme = 'http'\n\n path = self._GetRowValue(query_hash, row, 'path')\n url = '{0:s}:\n\n event_data = FirefoxCookieEventData()\n event_data.cookie_name = cookie_name\n event_data.data = cookie_data\n event_data.host = hostname\n event_data.httponly = bool(self._GetRowValue(query_hash, row, 'isHttpOnly'))\n event_data.offset = self._GetRowValue(query_hash, row, 'id')\n event_data.path = path\n event_data.query = query\n event_data.secure = is_secure\n event_data.url = url\n\n timestamp = self._GetRowValue(query_hash, row, 'creationTime')\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetRowValue(query_hash, row, 'lastAccessed')\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetRowValue(query_hash, row, 'expiry')\n if timestamp:\n \n \n \n \n \n \n\n date_time = dfdatetime_posix_time.PosixTime(\n timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_EXPIRATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n \n \n for cookie_plugin in self._cookie_plugins:\n try:\n cookie_plugin.UpdateChainAndProcess(\n parser_mediator, cookie_name=cookie_name, cookie_data=cookie_data,\n url=url)\n except errors.WrongPlugin:\n pass", "docstring": "Parses a cookie row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"} {"code": "def change_subscription(self, topics):\n if self._user_assignment:\n raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)\n if isinstance(topics, six.string_types):\n topics = [topics]\n if (self.subscription == set(topics)):\n log.warning('subscription unchanged by change_subscription(%s)', topics)\n return\n for t in topics:\n self._ensure_valid_topic_name(t)\n log.info('Updating subscribed topics to: %s', topics)\n self.subscription = set(topics)\n self._group_subscription.update(topics)\n for tp in set(self.assignment.keys()):\n if (tp.topic not in self.subscription):\n del self.assignment[tp]", "docstring": "Change the topic subscription.\n\nArguments:\ntopics (list of str): topics for subscription\n\nRaises:\nIllegalStateErrror: if assign_from_user has been used already\nTypeError: if a topic is None or a non-str\nValueError: if a topic is an empty string or\n- a topic name is '.' or '..' or\n- a topic name does not consist of ASCII-characters/'-'/'_'/'.'", "source": "codesearchnet"} {"code": "def _add_name_scope_wrapper(func, api_signature):\n if 'name' not in api_signature.parameters:\n return func\n func_signature = tf_inspect.signature(func)\n func_argspec = tf_inspect.getargspec(func)\n if 'name' in func_signature.parameters or func_argspec.keywords is not None:\n return func\n name_index = list(api_signature.parameters).index('name')\n\n def wrapped_func(*args, **kwargs):\n if name_index < len(args):\n name = args[name_index]\n args = args[:name_index] + args[name_index + 1:]\n else:\n name = kwargs.pop('name', None)\n if name is None:\n return func(*args, **kwargs)\n else:\n with ops.name_scope(name):\n return func(*args, **kwargs)\n wrapped_func = tf_decorator.make_decorator(func, wrapped_func)\n wrapped_func.__signature__ = func_signature.replace(parameters=list(func_signature.parameters.values()) + [api_signature.parameters['name']])\n del wrapped_func._tf_decorator\n return wrapped_func", "docstring": "Wraps `func` to expect a \"name\" arg, and use it to call `ops.name_scope`.\n\nIf `func` already expects a \"name\" arg, or if `api_signature` does not\nexpect a \"name\" arg, then returns `func` as-is.\n\nArgs:\nfunc: The function to wrap. Signature must match `api_signature` (except\nthe \"name\" parameter may be missing.\napi_signature: The signature of the original API (used to find the index for\nthe \"name\" parameter).\n\nReturns:\nThe wrapped function (or the original function if no wrapping is needed).", "source": "github-repos"} {"code": "def computeAccuracy(model, size, top):\n \n accuracy = []\n\n \n filename = os.path.join(os.path.dirname(__file__), \"msnbc990928.zip\")\n with zipfile.ZipFile(filename) as archive:\n with archive.open(\"msnbc990928.seq\") as datafile:\n \n for _ in xrange(7):\n next(datafile)\n\n \n for _ in xrange(LEARNING_RECORDS):\n next(datafile)\n\n \n \n for _ in xrange(size):\n pages = readUserSession(datafile)\n model.resetSequenceStates()\n for i in xrange(len(pages) - 1):\n result = model.run({\"page\": pages[i]})\n inferences = result.inferences[\"multiStepPredictions\"][1]\n\n \n predicted = sorted(inferences.items(), key=itemgetter(1), reverse=True)[:top]\n\n \n accuracy.append(1 if pages[i + 1] in zip(*predicted)[0] else 0)\n\n return np.mean(accuracy)", "docstring": "Compute prediction accuracy by checking if the next page in the sequence is\nwithin the top N predictions calculated by the model\nArgs:\nmodel: HTM model\nsize: Sample size\ntop: top N predictions to use\n\nReturns: Probability the next page in the sequence is within the top N\npredicted pages", "source": "juraj-google-style"} {"code": "def set_day_time(self, hour):\n \n self._should_write_to_command_buffer = True\n command_to_send = DayTimeCommand(hour % 24)\n self._commands.add_command(command_to_send)", "docstring": "Queue up a change day time command. It will be applied when `tick` or `step` is called next.\nBy the next tick, the lighting and the skysphere will be updated with the new hour. If there is no skysphere\nor directional light in the world, the command will not function properly but will not cause a crash.\n\nArgs:\nhour (int): The hour in military time, between 0 and 23 inclusive.", "source": "juraj-google-style"} {"code": "def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card', identity_document=None, billing_address=None, additional_details=None):\n headers = self.client._get_public_headers()\n payload = {'token_type': token_type, 'credit_card_cvv': credit_card_cvv, 'card_number': card_number, 'expiration_date': expiration_date, 'holder_name': holder_name, 'identity_document': identity_document, 'billing_address': billing_address, 'additional_details': additional_details}\n endpoint = '/tokens'\n return self.client._post((self.client.URL_BASE + endpoint), json=payload, headers=headers)", "docstring": "When creating a Token, remember to use the public-key header instead of the private-key header,\nand do not include the app-id header.\n\nArgs:\nholder_name: Name of the credit card holder.\ncard_number: Credit card number.\ncredit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.\nexpiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,\nmm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.\ntoken_type: The type of token\nbilling_address: Address.\nidentity_document: National identity document of the card holder.\nadditional_details: Optional additional data stored with your token in key/value pairs.\n\nReturns:", "source": "codesearchnet"} {"code": "def gpio_get(self, pins=None):\n \n if pins is None:\n pins = range(4)\n\n size = len(pins)\n indices = (ctypes.c_uint8 * size)(*pins)\n statuses = (ctypes.c_uint8 * size)()\n result = self._dll.JLINK_EMU_GPIO_GetState(ctypes.byref(indices),\n ctypes.byref(statuses),\n size)\n if result < 0:\n raise errors.JLinkException(result)\n\n return list(statuses)", "docstring": "Returns a list of states for the given pins.\n\nDefaults to the first four pins if an argument is not given.\n\nArgs:\nself (JLink): the ``JLink`` instance\npins (list): indices of the GPIO pins whose states are requested\n\nReturns:\nA list of states.\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"} {"code": "def json_set_fields(recipe, variables):\n if isinstance(recipe, dict):\n for key, value in list(recipe.items()):\n if isinstance(value, dict) and 'field' in value:\n variable_value = variables.get(value['field']['name'], value['field'].get('default'))\n field_value = get_field_value(value, variable_value)\n if field_value is None and value.get('default') is None:\n del recipe[key]\n else:\n recipe[key] = field_value\n else:\n json_set_fields(value, variables)\n elif isinstance(recipe, list) or isinstance(recipe, tuple):\n for index, value in enumerate(recipe):\n if isinstance(value, dict) and 'field' in value:\n variable_value = variables.get(value['field']['name'], value['field'].get('default'))\n recipe[index] = get_field_value(value, variable_value)\n else:\n json_set_fields(value, variables)\n return recipe", "docstring": "Recusrsively replaces fields in script JSON with values provided.\n\nField has format: { \"field\":{ \"name\":\"???\", \"kind\":\"???\", \"default\":???,\n\"description\":\"???\" }}\n\nIf field value is empty and field default is null, the value is removed\nfrom JSON as a parameter,\nallowing the python task to pick a default value. Allows optional\nparameters to exist.\n\nArgs:\nrecipe: (dict) A dictionary representation of the JSON script.\nvariables: (dict) A lookup table of all values to be replaced, key is name\nof field.\n\nReturns:\nNothig. Struct is modified in place.", "source": "github-repos"} {"code": "def has_no_checked_field(self, locator, **kwargs):\n kwargs['checked'] = True\n return self.has_no_selector('field', locator, **kwargs)", "docstring": "Checks if the page or current node has no radio button or checkbox with the given label,\nvalue, or id that is currently checked.\n\nArgs:\nlocator (str): The label, name, or id of a checked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it doesn't exist.", "source": "codesearchnet"} {"code": "def extrapolation_step():\n\n def step_fn(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, num_steps_performed, dtype=None, name=None):\n \n del num_steps_performed\n name = name or 'extrapolation_step'\n return parabolic_equation_step(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, time_marching_scheme=extrapolation_scheme, dtype=dtype, name=name)\n return step_fn", "docstring": "Creates a stepper function with Extrapolation time marching scheme.\n\nExtrapolation scheme combines two half-steps and the full time step to obtain\ndesirable properties. See more details below in `extrapolation_scheme`.\n\nIt is slower than Crank-Nicolson scheme, but deals better with value grids\nthat have discontinuities. Consider also `oscillation_damped_crank_nicolson`,\nan efficient combination of Crank-Nicolson and Extrapolation schemes.\n\nReturns:\nCallable to be used in finite-difference PDE solvers (see fd_solvers.py).", "source": "github-repos"} {"code": "def to_string(self, other_separation_char=None):\n \n separation_char = self._separation_char\n if other_separation_char is not None:\n separation_char = other_separation_char\n return separation_char.join(self._locations_list)", "docstring": "String representation of :class:`LocationDescriptor` object.\n\nArgs:\nother_separation_char: If needed, another separator character can be used.\n\nReturns:", "source": "juraj-google-style"} {"code": "def update_user(self, user_id,\n roles=None, netmask=None,\n secret=None, pubkey=None):\n \n arguments = {'roles': roles,\n 'netmask': netmask,\n 'secret': secret,\n 'pubkey': pubkey}\n return self.do_req('PUT',\n self.merchant_api_base_url + '/user/' +\n user_id + '/', arguments)", "docstring": "Update user. Returns the raw response object.\n\nArguments:\nuser_id:\nUser id of user to update\nroles:\nRole\nnetmask:\nLimit user connections by netmask, for example 192.168.1.0/24\nsecret:\nSecret used when authenticating with mCASH\npubkey:\nRSA key used for authenticating by signing", "source": "juraj-google-style"} {"code": "class _LazyAutoMapping(OrderedDict[type[PretrainedConfig], _LazyAutoMappingValue]):\n\n def __init__(self, config_mapping, model_mapping) -> None:\n self._config_mapping = config_mapping\n self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}\n self._model_mapping = model_mapping\n self._model_mapping._model_mapping = self\n self._extra_content = {}\n self._modules = {}\n\n def __len__(self) -> int:\n common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys())\n return len(common_keys) + len(self._extra_content)\n\n def __getitem__(self, key: type[PretrainedConfig]) -> _LazyAutoMappingValue:\n if key in self._extra_content:\n return self._extra_content[key]\n model_type = self._reverse_config_mapping[key.__name__]\n if model_type in self._model_mapping:\n model_name = self._model_mapping[model_type]\n return self._load_attr_from_module(model_type, model_name)\n model_types = [k for k, v in self._config_mapping.items() if v == key.__name__]\n for mtype in model_types:\n if mtype in self._model_mapping:\n model_name = self._model_mapping[mtype]\n return self._load_attr_from_module(mtype, model_name)\n raise KeyError(key)\n\n def _load_attr_from_module(self, model_type, attr):\n module_name = model_type_to_module_name(model_type)\n if module_name not in self._modules:\n self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models')\n return getattribute_from_module(self._modules[module_name], attr)\n\n def keys(self) -> list[type[PretrainedConfig]]:\n mapping_keys = [self._load_attr_from_module(key, name) for key, name in self._config_mapping.items() if key in self._model_mapping.keys()]\n return mapping_keys + list(self._extra_content.keys())\n\n def get(self, key: type[PretrainedConfig], default: _T) -> Union[_LazyAutoMappingValue, _T]:\n try:\n return self.__getitem__(key)\n except KeyError:\n return default\n\n def __bool__(self) -> bool:\n return bool(self.keys())\n\n def values(self) -> list[_LazyAutoMappingValue]:\n mapping_values = [self._load_attr_from_module(key, name) for key, name in self._model_mapping.items() if key in self._config_mapping.keys()]\n return mapping_values + list(self._extra_content.values())\n\n def items(self) -> list[tuple[type[PretrainedConfig], _LazyAutoMappingValue]]:\n mapping_items = [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping.keys() if key in self._config_mapping.keys()]\n return mapping_items + list(self._extra_content.items())\n\n def __iter__(self) -> Iterator[type[PretrainedConfig]]:\n return iter(self.keys())\n\n def __contains__(self, item: type) -> bool:\n if item in self._extra_content:\n return True\n if not hasattr(item, '__name__') or item.__name__ not in self._reverse_config_mapping:\n return False\n model_type = self._reverse_config_mapping[item.__name__]\n return model_type in self._model_mapping\n\n def register(self, key: type[PretrainedConfig], value: _LazyAutoMappingValue, exist_ok=False) -> None:\n \n if hasattr(key, '__name__') and key.__name__ in self._reverse_config_mapping:\n model_type = self._reverse_config_mapping[key.__name__]\n if model_type in self._model_mapping.keys() and (not exist_ok):\n raise ValueError(f\"'{key}' is already used by a Transformers model.\")\n self._extra_content[key] = value", "docstring": "\" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.\n\nArgs:\n- config_mapping: The map model type to config class\n- model_mapping: The map model type to model (or tokenizer) class", "source": "github-repos"} {"code": "def enable_plugin(self, name, timeout=0):\n \n url = self._url('/plugins/{0}/enable', name)\n params = {'timeout': timeout}\n res = self._post(url, params=params)\n self._raise_for_status(res)\n return True", "docstring": "Enable an installed plugin.\n\nArgs:\nname (string): The name of the plugin. The ``:latest`` tag is\noptional, and is the default if omitted.\ntimeout (int): Operation timeout (in seconds). Default: 0\n\nReturns:\n``True`` if successful", "source": "juraj-google-style"} {"code": "def read_from_hdx(identifier, configuration=None):\n showcase = Showcase(configuration=configuration)\n result = showcase._load_from_hdx('showcase', identifier)\n if result:\n return showcase\n return None", "docstring": "Reads the showcase given by identifier from HDX and returns Showcase object\n\nArgs:\nidentifier (str): Identifier of showcase\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Showcase]: Showcase object if successful read, None if not", "source": "codesearchnet"} {"code": "def __init__(self, name, distribution_fn, required_gpus=None, required_physical_gpus=0, required_tpu=False, use_cloud_tpu=False, has_chief=False, num_workers=1, num_ps=0, share_gpu=True, pool_runner_fn=None, no_xla=False):\n object.__init__(self)\n self._name = name\n self._distribution_fn = distribution_fn\n self.required_gpus = required_gpus\n self.required_physical_gpus = required_physical_gpus\n self.required_tpu = required_tpu\n self.use_cloud_tpu = use_cloud_tpu\n self.has_chief = has_chief\n self.num_workers = num_workers\n self.num_ps = num_ps\n self.share_gpu = share_gpu\n self._pool_runner_fn = pool_runner_fn\n self.no_xla = no_xla", "docstring": "Initialize NamedDistribution.\n\nArgs:\nname: Name that will be a part of the name of the test case.\ndistribution_fn: A callable that creates a `tf.distribute.Strategy`.\nrequired_gpus: The number of GPUs that the strategy requires. Only one of\n`required_gpus` and `required_physical_gpus` should be set.\nrequired_physical_gpus: Number of physical GPUs required. Only one of\n`required_gpus` and `required_physical_gpus` should be set.\nrequired_tpu: Whether the strategy requires TPU.\nuse_cloud_tpu: Whether the strategy requires cloud TPU.\nhas_chief: Whether the strategy requires a chief worker.\nnum_workers: The number of workers that the strategy requires.\nnum_ps: The number of parameter servers.\nshare_gpu: Whether to share GPUs among workers.\npool_runner_fn: An optional callable that returns a MultiProcessPoolRunner\nto run the test.\nno_xla: Whether to skip in XLA tests.", "source": "github-repos"} {"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(ExtensionInformation, self).read(istream, kmip_version=kmip_version)\n tstream = BytearrayStream(istream.read(self.length))\n self.extension_name.read(tstream, kmip_version=kmip_version)\n if self.is_tag_next(Tags.EXTENSION_TAG, tstream):\n self.extension_tag = ExtensionTag()\n self.extension_tag.read(tstream, kmip_version=kmip_version)\n if self.is_tag_next(Tags.EXTENSION_TYPE, tstream):\n self.extension_type = ExtensionType()\n self.extension_type.read(tstream, kmip_version=kmip_version)\n self.is_oversized(tstream)\n self.validate()", "docstring": "Read the data encoding the ExtensionInformation object and decode it\ninto its constituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"} {"code": "def _ReformatMessageString(self, message_string):\n\n def _PlaceHolderSpecifierReplacer(match_object):\n 'Replaces message string place holders into Python format() style.'\n expanded_groups = []\n for group in match_object.groups():\n try:\n place_holder_number = (int(group, 10) - 1)\n expanded_group = '{{{0:d}:s}}'.format(place_holder_number)\n except ValueError:\n expanded_group = group\n expanded_groups.append(expanded_group)\n return ''.join(expanded_groups)\n if (not message_string):\n return None\n message_string = self._WHITE_SPACE_SPECIFIER_RE.sub('', message_string)\n message_string = self._TEXT_SPECIFIER_RE.sub('\\\\\\\\\\\\1', message_string)\n message_string = self._CURLY_BRACKETS.sub('\\\\1\\\\1', message_string)\n return self._PLACE_HOLDER_SPECIFIER_RE.sub(_PlaceHolderSpecifierReplacer, message_string)", "docstring": "Reformats the message string.\n\nArgs:\nmessage_string (str): message string.\n\nReturns:\nstr: message string in Python format() (PEP 3101) style.", "source": "codesearchnet"} {"code": "def evaluate_ising(linear, quad, state):\n if (_numpy and isinstance(state, np.ndarray)):\n return evaluate_ising(linear, quad, state.tolist())\n energy = 0.0\n for (index, value) in uniform_iterator(linear):\n energy += (state[index] * value)\n for ((index_a, index_b), value) in six.iteritems(quad):\n energy += ((value * state[index_a]) * state[index_b])\n return energy", "docstring": "Calculate the energy of a state given the Hamiltonian.\n\nArgs:\nlinear: Linear Hamiltonian terms.\nquad: Quadratic Hamiltonian terms.\nstate: Vector of spins describing the system state.\n\nReturns:\nEnergy of the state evaluated by the given energy function.", "source": "codesearchnet"} {"code": "def setColumn(self, header, values):\n if any((isinstance(value, basestring) for value in values)):\n values = list(map(str, values))\n self._impl.setColumnStr(header, values, len(values))\n elif all((isinstance(value, Real) for value in values)):\n values = list(map(float, values))\n self._impl.setColumnDbl(header, values, len(values))\n else:\n print(values)\n raise NotImplementedError", "docstring": "Set the values of a column.\n\nArgs:\nheader: The header of the column to be set.\n\nvalues: The values to set.", "source": "codesearchnet"} {"code": "def layer(self, queryset, stylename=None):\n \n cls = RasterLayer if hasattr(queryset, 'image') else VectorLayer\n layer = cls(queryset, style=stylename)\n try:\n style = self.map.find_style(layer.stylename)\n except KeyError:\n self.map.append_style(layer.stylename, layer.style())\n layer.styles.append(layer.stylename)\n self.map.layers.append(layer._layer)\n return layer", "docstring": "Returns a map Layer.\n\nArguments:\nqueryset -- QuerySet for Layer\nKeyword args:\nstylename -- str name of style to apply", "source": "juraj-google-style"} {"code": "def set_from_tree(self, address_value_dict):\n for (address, value) in address_value_dict.items():\n if (address in self._state):\n self._state[address].set_result(result=value, from_tree=True)", "docstring": "Set the result for each future at the given addresses with the value\nstored in the merkle database.\n\nArgs:\naddress_value_dict (dict of str: bytes): The unique\nfull addresses that the bytes values should be set with.", "source": "codesearchnet"} {"code": "def groups_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:\n \n self._validate_xoxp_token()\n kwargs.update({\"channel\": channel, \"user\": user})\n return self.api_call(\"groups.invite\", json=kwargs)", "docstring": "Invites a user to a private channel.\n\nArgs:\nchannel (str): The group id. e.g. 'G1234567890'\nuser (str): The user id. e.g. 'U1234567890'", "source": "juraj-google-style"} {"code": "def PluginTagToContent(self, plugin_name):\n \n if plugin_name not in self._plugin_to_tag_to_content:\n raise KeyError('Plugin %r could not be found.' % plugin_name)\n return self._plugin_to_tag_to_content[plugin_name]", "docstring": "Returns a dict mapping tags to content specific to that plugin.\n\nArgs:\nplugin_name: The name of the plugin for which to fetch plugin-specific\ncontent.\n\nRaises:\nKeyError: if the plugin name is not found.\n\nReturns:\nA dict mapping tags to plugin-specific content (which are always strings).\nThose strings are often serialized protos.", "source": "juraj-google-style"} {"code": "def Draw(self, stoplist=None, triplist=None, height=520):\n \n output = str()\n if not triplist:\n triplist = []\n if not stoplist:\n stoplist = []\n\n if not self._cache or triplist or stoplist:\n self._gheight = height\n self._tlist=triplist\n self._slist=stoplist\n self._decorators = []\n self._stations = self._BuildStations(stoplist)\n self._cache = \"%s %s %s %s\" % (self._DrawBox(),\n self._DrawHours(),\n self._DrawStations(),\n self._DrawTrips(triplist))\n\n\n\n output = \"%s %s %s %s\" % (self._DrawHeader(),\n self._cache,\n self._DrawDecorators(),\n self._DrawFooter())\n return output", "docstring": "Main interface for drawing the marey graph.\n\nIf called without arguments, the data generated in the previous call\nwill be used. New decorators can be added between calls.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstoplist: [Stop, Stop, ...]\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n\nReturns:\n# A string that contain a svg/xml web-page with a marey graph.\n\" tensor_len:\n pad_len = pieces - tensor_len\n extended_whole = array_ops.concat([tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)\n parts = array_ops.split(extended_whole, pieces)\n return (parts, pad_len)\n elif (pieces - 1) * chunk_size >= tensor_len:\n pad_len = pieces * chunk_size % tensor_len\n extended_whole = array_ops.concat([tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)\n parts = array_ops.split(extended_whole, pieces)\n return (parts, pad_len)\n else:\n last_chunk_size = tensor_len - (pieces - 1) * chunk_size\n pad_len = chunk_size - last_chunk_size\n piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]\n parts = array_ops.split(tensor, piece_lens)\n parts[-1] = array_ops.concat([parts[-1], array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)\n return (parts, pad_len)\n else:\n return (array_ops.split(tensor, pieces), 0)", "docstring": "Like split for 1D tensors but pads-out case where len % pieces != 0.\n\nArgs:\ntensor: `tf.Tensor` that must be 1D.\npieces: a positive integer specifying the number of pieces into which\ntensor should be split.\n\nReturns:\nlist of `tf.Tensor` of length pieces, which hold the values of\nthin input tensor, in order. The final tensor may\nbe zero-padded on the end to make its size equal to those of all\nof the other tensors.\n\nRaises:\nValueError: The input tensor is not 1D.", "source": "github-repos"} {"code": "def update_nanopubstore_start_dt(url: str, start_dt: str):\n hostname = urllib.parse.urlsplit(url)[1]\n start_dates_doc = state_mgmt.get(start_dates_doc_key)\n if (not start_dates_doc):\n start_dates_doc = {'_key': start_dates_doc_key, 'start_dates': [{'nanopubstore': hostname, 'start_dt': start_dt}]}\n state_mgmt.insert(start_dates_doc)\n else:\n for (idx, start_date) in enumerate(start_dates_doc['start_dates']):\n if (start_date['nanopubstore'] == hostname):\n start_dates_doc['start_dates'][idx]['start_dt'] = start_dt\n break\n else:\n start_dates_doc['start_dates'].append({'nanopubstore': hostname, 'start_dt': start_dt})\n state_mgmt.replace(start_dates_doc)", "docstring": "Add nanopubstore start_dt to belapi.state_mgmt collection\n\nArgs:\nurl: url of nanopubstore\nstart_dt: datetime of last query against nanopubstore for new ID's", "source": "codesearchnet"} {"code": "def clean(self, value, *_):\n \n\n if not value or not isinstance(value, LocalizedValue):\n return None\n\n \n is_all_null = True\n for lang_code, _ in settings.LANGUAGES:\n if value.get(lang_code) is not None:\n is_all_null = False\n break\n\n \n \n if is_all_null and self.null:\n return None\n\n return value", "docstring": "Cleans the specified value into something we\ncan store in the database.\n\nFor example, when all the language fields are\nleft empty, and the field is allowed to be null,\nwe will store None instead of empty keys.\n\nArguments:\nvalue:\nThe value to clean.\n\nReturns:\nThe cleaned value, ready for database storage.", "source": "juraj-google-style"} {"code": "def _findOptionValueAdvAudit(option):\n \n if 'lgpo.adv_audit_data' not in __context__:\n system_root = os.environ.get('SystemRoot', 'C:\\\\Windows')\n f_audit = os.path.join(system_root, 'security', 'audit', 'audit.csv')\n f_audit_gpo = os.path.join(system_root, 'System32', 'GroupPolicy',\n 'Machine', 'Microsoft', 'Windows NT',\n 'Audit', 'audit.csv')\n\n \n if not __salt__['file.file_exists'](f_audit):\n if __salt__['file.file_exists'](f_audit_gpo):\n \n __salt__['file.copy'](f_audit_gpo, f_audit)\n else:\n field_names = _get_audit_defaults('fieldnames')\n \n \n __salt__['file.makedirs'](f_audit)\n __salt__['file.write'](f_audit, ','.join(field_names))\n\n audit_settings = {}\n with salt.utils.files.fopen(f_audit, mode='r') as csv_file:\n reader = csv.DictReader(csv_file)\n\n for row in reader:\n audit_settings.update(\n {row['Subcategory']: row['Setting Value']})\n\n __context__['lgpo.adv_audit_data'] = audit_settings\n\n return __context__['lgpo.adv_audit_data'].get(option, None)", "docstring": "Get the Advanced Auditing policy as configured in\n``C:\\\\Windows\\\\Security\\\\Audit\\\\audit.csv``\n\nArgs:\noption (str): The name of the setting as it appears in audit.csv\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``", "source": "juraj-google-style"} {"code": "def flatten(self, d=None):\n \n if d is None:\n d = {}\n if self.name is not None:\n d[self.name] = self\n for child in self.children:\n child.flatten(d=d)\n return d", "docstring": "Flatten tree structure to a one level dictionary.\n\n\nArgs:\nd (dict, optional): output dictionary to update\n\nReturns:\ndict: Node.name -> Node. The returned dictionary includes the\ncurrent Node and all its children.", "source": "juraj-google-style"} {"code": "def _revoke(self, http):\n \n self._do_revoke(http, self.refresh_token or self.access_token)", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\nArgs:\nhttp: an object to be used to make HTTP requests.", "source": "juraj-google-style"} {"code": "def __init__(self, *, content: content_api.ProcessorContentTypes | None=None, content_factory: PreambleFactory | None=None):\n if content is not None and content_factory is not None:\n raise ValueError('Only one of `content` and `content_factory` must be provided.')\n self._content = None if content is None else content_api.ProcessorContent(content)\n self._content_factory = content_factory", "docstring": "Constructs a Preamble processor.\n\nArgs:\ncontent: content to prepend.\ncontent_factory: function for returning a content given no input. This is\nhelpful for when contents are not fully known on __init__, e.g. if they\ndepend on the user or time of the request.\n\nRaises:\nValueError if both `content` and `content_factory` are provided.", "source": "github-repos"} {"code": "def get(self, request, customer_uuid):\n context = self._build_context(request, customer_uuid)\n manage_learners_form = ManageLearnersForm(user=request.user, enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER])\n context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})\n return render(request, self.template, context)", "docstring": "Handle GET request - render linked learners list and \"Link learner\" form.\n\nArguments:\nrequest (django.http.request.HttpRequest): Request instance\ncustomer_uuid (str): Enterprise Customer UUID\n\nReturns:\ndjango.http.response.HttpResponse: HttpResponse", "source": "codesearchnet"} {"code": "def _safe_get(self, revision, key):\n \n if self.has_revision(revision):\n return self.raw_answers[revision].get(key)\n else:\n return None", "docstring": "Get an answer data (vote or rationale) by revision\n\nArgs:\nrevision (int): the revision number for student answer, could be\n0 (original) or 1 (revised)\nkey (str); key for retrieve answer data, could be VOTE_KEY or\nRATIONALE_KEY\n\nReturns:\nthe answer data or None if revision doesn't exists", "source": "juraj-google-style"} {"code": "def register_items(self, items):\n for item in items:\n item.set_parent(self)\n self.items.extend(items)", "docstring": "Bulk ``register_item``.\n\nArgs:\nitems (iterable[Tree]):\nSequence of nodes to be registered as children.", "source": "codesearchnet"} {"code": "def broadcast_recv_v2(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n return gen_collective_ops.collective_bcast_recv_v2(T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, communication_hint=communication_hint.lower(), timeout_seconds=timeout)", "docstring": "Receives a broadcasts tensor, across devices.\n\nArgs:\nshape: an int tensor. Shape of the tensor to be received.\ndtype: Type of the tensor to be received.\ngroup_size: an int32 tensor. One plus the number of receiving tensors, i.e.\nthe total number of devices participating. Each tensor must reside on a\ndifferent device.\ngroup_key: an int32 tensor identifying the group of devices.\ninstance_key: an int32 tensor identifying the participating group of Ops.\ncommunication_hint: preferred collective communication. The implementation\nmay fall back to another mechanism. Options include `auto`, `ring`, and\n`nccl`.\ntimeout: If set to a non zero, set a completion timeout to detect staleness.\nIf the timer goes off, a DeadlineExceededError is raised.\nThe timeout value in seconds. This feature is experimental.\n\nReturns:\nAn Op implementing the broadcast receive.", "source": "github-repos"} {"code": "def _GetAccountsData(self, metadata_dict):\n \n instance_data, project_data = self._GetInstanceAndProjectAttributes(\n metadata_dict)\n valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]\n block_project = instance_data.get('block-project-ssh-keys', '').lower()\n if block_project != 'true' and not instance_data.get('sshKeys'):\n valid_keys.append(project_data.get('ssh-keys'))\n valid_keys.append(project_data.get('sshKeys'))\n accounts_data = '\\n'.join([key for key in valid_keys if key])\n return self._ParseAccountsData(accounts_data)", "docstring": "Get the user accounts specified in metadata server contents.\n\nArgs:\nmetadata_dict: json, the deserialized contents of the metadata server.\n\nReturns:\ndict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.", "source": "juraj-google-style"} {"code": "def _recompute_attrs_type_from_mro(self, all_attrs: dict[str, Attribute], type_params: 'dict[str | int, _base.BaseValue]') -> None:\n for typ_name, typ_obj in type_params.items():\n for attr in all_attrs.values():\n if typ_name == attr.typ.cls.name:\n attr.typ = typ_obj", "docstring": "Traverse the MRO and apply Generic type params to class attributes.\n\nThis IS REQUIRED for dataclass instances that inherits from a Generic.\n\nArgs:\nall_attrs: All __init__ attributes of a class.\ntype_params: List of ParameterizedClass instances that will override\nTypeVar attributes in all_attrs.", "source": "github-repos"} {"code": "def check_exists(self):\n response = self.repo.api.http_request('HEAD', self.uri)\n self.status_code = response.status_code\n if (self.status_code == 200):\n self.exists = True\n elif (self.status_code == 410):\n self.exists = False\n elif (self.status_code == 404):\n self.exists = False\n return self.exists", "docstring": "Check if resource exists, update self.exists, returns\n\nReturns:\nNone: sets self.exists", "source": "codesearchnet"} {"code": "def render_unregistered(error=None):\n \n return template(\n read_index_template(),\n registered=False,\n error=error,\n seeder_data=None,\n url_id=None,\n )", "docstring": "Render template file for the unregistered user.\n\nArgs:\nerror (str, default None): Optional error message.\n\nReturns:\nstr: Template filled with data.", "source": "juraj-google-style"} {"code": "def _hat_integral(self, x):\n x = tf.cast(x, self.power.dtype)\n t = (self.power - 1.0)\n return tf.exp((((- t) * tf.math.log1p(x)) - tf.math.log(t)))", "docstring": "Integral of the `hat` function, used for sampling.\n\nWe choose a `hat` function, h(x) = x^(-power), which is a continuous\n(unnormalized) density touching each positive integer at the (unnormalized)\npmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt;\nwhich is needed for sampling purposes.\n\nArguments:\nx: A Tensor of points x at which to evaluate H(x).\n\nReturns:\nA Tensor containing evaluation H(x) at x.", "source": "codesearchnet"} {"code": "def get(self, key):\n \n data = self._store.get(key)\n if not data:\n return None\n value, expire = data\n if expire and time.time() > expire:\n del self._store[key]\n return None\n return value", "docstring": "Get an item from the cache\nArgs:\nkey: item key\nReturns:\nthe value of the item or None if the item isn't in the cache", "source": "juraj-google-style"} {"code": "def ragged_shape(input: ragged_tensor.Ragged, name: Optional[str]=None, out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape:\n with ops.name_scope(name, 'RaggedShape', [input]):\n return dynamic_ragged_shape.DynamicRaggedShape.from_tensor(input, out_type)", "docstring": "Returns the shape of a RaggedTensor.\n\nArgs:\ninput: A `RaggedTensor`\nname: A name for the operation (optional).\nout_type: dtype used to encode the shape.\n\nReturns:\nA `tf.experimental.DynamicRaggedShape`", "source": "github-repos"} {"code": "def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression:\n \n if isinstance(expression, Operation):\n if hasattr(expression, 'variable_name'):\n variable_name = renaming.get(expression.variable_name, expression.variable_name)\n return create_operation_expression(\n expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name\n )\n operands = [rename_variables(o, renaming) for o in op_iter(expression)]\n return create_operation_expression(expression, operands)\n elif isinstance(expression, Expression):\n expression = expression.__copy__()\n expression.variable_name = renaming.get(expression.variable_name, expression.variable_name)\n return expression", "docstring": "Rename the variables in the expression according to the given dictionary.\n\nArgs:\nexpression:\nThe expression in which the variables are renamed.\nrenaming:\nThe renaming dictionary. Maps old variable names to new ones.\nVariable names not occuring in the dictionary are left unchanged.\n\nReturns:\nThe expression with renamed variables.", "source": "juraj-google-style"} {"code": "def config_from_url(u, **kwargs):\n path = u.path.lstrip('/').split('/')\n if ((len(path) > 2) or (not path)):\n raise AssertionError('zmq url format: zmq:\n typ = path[0].upper()\n try:\n topic = path[1]\n except IndexError as _:\n topic = ''\n param = dict(urllib.parse.parse_qsl(u.query))\n transport = param.get('transport', 'tcp')\n _id = ('%s-%s-%s-%s' % (typ, topic, transport, u.netloc))\n if (kwargs.get('prefix') is not None):\n _id = ('%s-%s' % (kwargs.get('prefix'), _id))\n return {'id': _id, 'typ_str': typ, 'typ': getattr(zmq, typ), 'topic': topic, 'transport': transport, 'url': ('%s:", "docstring": "Returns dict containing zmq configuration arguments\nparsed from xbahn url\n\nArguments:\n\n- u (urlparse.urlparse result)\n\nReturns:\n\ndict:\n- id (str): connection index key\n- typ_str (str): string representation of zmq socket type\n- typ (int): zmq socket type (PUB, SUB, REQ, REP, PUSH, PULL)\n- topic (str): subscription topic\n- url (str): url to use with zmq's bind function", "source": "codesearchnet"} {"code": "async def on_message(message):\n \n\n \n server = message.server\n author = message.author\n channel = message.channel\n content = message.content\n\n data = datatools.get_data()\n\n if not data[\"discord\"][\"servers\"][server.id][_data.modulename][\"activated\"]:\n return\n\n \n if server is not None and author != channel.server.me:\n \n if channel.server.me in message.mentions:\n\n logger.info(\"Bot was mentioned, summoning Mitsuku\")\n await client.send_typing(channel)\n\n \n if channel.id not in data[\"discord\"][\"servers\"][server.id][_data.modulename][\"channels\"]:\n new_serverdata = data\n new_serverdata[\"discord\"][\"servers\"][server.id][_data.modulename][\"channels\"][channel.id] = \\\n api_mitsuku.get_botcust2()\n datatools.write_data(new_serverdata)\n\n \n botcust2 = data[\"discord\"][\"servers\"][server.id][_data.modulename][\"channels\"][channel.id]\n\n \n content = content.replace(\"<@{}>\".format(str(channel.server.me.id)), ' ')\n content = content.replace(\"<@!{}>\".format(str(channel.server.me.id)), ' ')\n\n \n if botcust2:\n response = api_mitsuku.query(botcust2, content)\n if response:\n await client.send_message(channel, response)\n else:\n await client.send_message(channel, \"```Couldn't get readable response from Mitsuku.```\")\n else:\n await client.send_message(channel, \"```Couldn't initialise with Mitsuku.```\")", "docstring": "The on_message event handler for this module\n\nArgs:\nmessage (discord.Message): Input message", "source": "juraj-google-style"} {"code": "def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format=\"%Y%m%d-%H%M\"):\n \n merged_forecasts = pd.merge(forecasts[\"condition\"],\n forecasts[\"dist\"],\n on=[\"Step_ID\",\"Track_ID\",\"Ensemble_Member\",\"Forecast_Hour\"])\n all_members = self.data[mode][\"combo\"][\"Ensemble_Member\"]\n members = np.unique(all_members)\n all_run_dates = pd.DatetimeIndex(self.data[mode][\"combo\"][\"Run_Date\"])\n run_dates = pd.DatetimeIndex(np.unique(all_run_dates))\n print(run_dates)\n for member in members:\n for run_date in run_dates:\n mem_run_index = (all_run_dates == run_date) & (all_members == member)\n member_forecast = merged_forecasts.loc[mem_run_index]\n member_forecast.to_csv(join(csv_path, \"hail_forecasts_{0}_{1}_{2}.csv\".format(self.ensemble_name,\n member,\n run_date.strftime\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (run_date_format))))\n return", "docstring": "Output hail forecast values to csv files by run date and ensemble member.\n\nArgs:\nforecasts:\nmode:\ncsv_path:\nReturns:", "source": "juraj-google-style"} {"code": "def load_config(self, settings=None):\n \n self._load_defaults()\n if settings:\n self.update(settings)\n else:\n config_paths = _get_config_files()\n for p in config_paths:\n conf = _process_config_file([p])\n self.update(conf)\n self._loaded = True\n self._validate()", "docstring": "Load the configuration either from the config file, or from the given settings.\n\nArgs:\nsettings (dict): If given, the settings are pulled from this dictionary. Otherwise, the\nconfig file is used.", "source": "juraj-google-style"} {"code": "def pbs_for_set_no_merge(document_path, document_data):\n \n extractor = DocumentExtractor(document_data)\n\n if extractor.deleted_fields:\n raise ValueError(\n \"Cannot apply DELETE_FIELD in a set request without \"\n \"specifying 'merge=True' or 'merge=[field_paths]'.\"\n )\n\n \n \n write_pbs = [extractor.get_update_pb(document_path)]\n\n if extractor.has_transforms:\n transform_pb = extractor.get_transform_pb(document_path)\n write_pbs.append(transform_pb)\n\n return write_pbs", "docstring": "Make ``Write`` protobufs for ``set()`` methods.\n\nArgs:\ndocument_path (str): A fully-qualified document path.\ndocument_data (dict): Property names and values to use for\nreplacing a document.\n\nReturns:\nList[google.cloud.firestore_v1beta1.types.Write]: One\nor two ``Write`` protobuf instances for ``set()``.", "source": "juraj-google-style"} {"code": "async def event_wait(event: asyncio.Event, timeout=None):\n if (timeout is None):\n (await event.wait())\n return True\n try:\n (await asyncio.wait_for(event.wait(), timeout))\n except asyncio.TimeoutError:\n return False\n return True", "docstring": "Wait on an an asyncio event with an optional timeout\n\nReturns:\ntrue if the event got set, None if timed out", "source": "codesearchnet"} {"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n residual = hidden_states\n hidden_states = self.layer_norm1(hidden_states)\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"} {"code": "def _check(cls, name, val, can_be_zero=False, val_type=float):\n valid_types = [val_type]\n if (val_type is float):\n valid_types.append(int)\n if (type(val) not in valid_types):\n raise TypeError(('Expect type %s for parameter %s' % (val_type.__name__, name)))\n if (val < 0):\n raise ValueError(('Value for parameter %s has to be greater than 0' % name))\n if ((not can_be_zero) and (val == 0)):\n raise ValueError(('Value for parameter %s can not be 0' % name))\n return val", "docstring": "Check init arguments.\n\nArgs:\nname: name of the argument. For logging purpose.\nval: value. Value has to be non negative number.\ncan_be_zero: whether value can be zero.\nval_type: Python type of the value.\n\nReturns:\nThe value.\n\nRaises:\nValueError: when invalid value is passed in.\nTypeError: when invalid value type is passed in.", "source": "codesearchnet"} {"code": "def DetermineType(value):\n object_type = type(value)\n if (not hasattr(object_type, '__name__')):\n return None\n type_string = getattr(object_type, '__module__', '')\n if type_string:\n type_string += '.'\n type_string += object_type.__name__\n return type_string", "docstring": "Determines the type of val, returning a \"full path\" string.\n\nFor example:\nDetermineType(5) -> __builtin__.int\nDetermineType(Foo()) -> com.google.bar.Foo\n\nArgs:\nvalue: Any value, the value is irrelevant as only the type metadata\nis checked\n\nReturns:\nType path string. None if type cannot be determined.", "source": "codesearchnet"} {"code": "def mape(y, p):\n \n\n filt = np.abs(y) > EPS\n return np.mean(np.abs(1 - p[filt] / y[filt]))", "docstring": "Mean Absolute Percentage Error (MAPE).\n\nArgs:\ny (numpy.array): target\np (numpy.array): prediction\n\nReturns:\ne (numpy.float64): MAPE", "source": "juraj-google-style"} {"code": "def _evolve(self, state, qargs=None):\n \n \n if qargs is not None:\n return SuperOp(self)._evolve(state, qargs)\n\n \n state = self._format_state(state)\n if state.shape[0] != self._input_dim:\n raise QiskitError(\n \"QuantumChannel input dimension is not equal to state dimension.\"\n )\n if state.ndim == 1 and self._data[1] is None and len(\n self._data[0]) == 1:\n \n \n return np.dot(self._data[0][0], state)\n \n state = self._format_state(state, density_matrix=True)\n kraus_l, kraus_r = self._data\n if kraus_r is None:\n kraus_r = kraus_l\n return np.einsum('AiB,BC,AjC->ij', kraus_l, state,\n np.conjugate(kraus_r))", "docstring": "Evolve a quantum state by the QuantumChannel.\n\nArgs:\nstate (QuantumState): The input statevector or density matrix.\nqargs (list): a list of QuantumState subsystem positions to apply\nthe operator on.\n\nReturns:\nQuantumState: the output quantum state.\n\nRaises:\nQiskitError: if the operator dimension does not match the\nspecified QuantumState subsystem dimensions.", "source": "juraj-google-style"} {"code": "def get_smeared_densities(self, sigma):\n \n from scipy.ndimage.filters import gaussian_filter1d\n smeared_dens = {}\n diff = [self.energies[i + 1] - self.energies[i]\n for i in range(len(self.energies) - 1)]\n avgdiff = sum(diff) / len(diff)\n for spin, dens in self.densities.items():\n smeared_dens[spin] = gaussian_filter1d(dens, sigma / avgdiff)\n return smeared_dens", "docstring": "Returns the Dict representation of the densities, {Spin: densities},\nbut with a Gaussian smearing of std dev sigma applied about the fermi\nlevel.\n\nArgs:\nsigma: Std dev of Gaussian smearing function.\n\nReturns:\nDict of Gaussian-smeared densities.", "source": "juraj-google-style"} {"code": "def get_mask(self, layers=None, output='vector', in_global_mask=True):\n if in_global_mask:\n output = 'vector'\n if (layers is None):\n layers = self.layers.keys()\n elif (not isinstance(layers, list)):\n layers = [layers]\n layers = map((lambda x: (x if isinstance(x, string_types) else self.stack[x])), layers)\n layers = [self.layers[l] for l in layers if (l in self.layers)]\n layers.append(self.full)\n layers = np.vstack(layers).T.astype(bool)\n mask = layers.all(axis=1)\n mask = self.get_image(mask, output)\n return (mask[self.global_mask] if in_global_mask else mask)", "docstring": "Set the current mask by taking the conjunction of all specified\nlayers.\n\nArgs:\nlayers: Which layers to include. See documentation for add() for\nformat.\ninclude_global_mask: Whether or not to automatically include the\nglobal mask (i.e., self.volume) in the conjunction.", "source": "codesearchnet"} {"code": "def nice_join(seq, sep=', ', conjuction='or'):\n seq = [str(x) for x in seq]\n if ((len(seq) <= 1) or (conjuction is None)):\n return sep.join(seq)\n else:\n return ('%s %s %s' % (sep.join(seq[:(- 1)]), conjuction, seq[(- 1)]))", "docstring": "Join together sequences of strings into English-friendly phrases using\nthe conjunction ``or`` when appropriate.\n\nArgs:\nseq (seq[str]) : a sequence of strings to nicely join\nsep (str, optional) : a sequence delimiter to use (default: \", \")\nconjunction (str or None, optional) : a conjuction to use for the last\ntwo items, or None to reproduce basic join behaviour (default: \"or\")\n\nReturns:\na joined string\n\nExamples:\n>>> nice_join([\"a\", \"b\", \"c\"])\n'a, b or c'", "source": "codesearchnet"} {"code": "def before_starting_server(self):", "docstring": "Performs the preparation steps before starting the remote server.\n\nFor example, subclass can check or modify the device settings at this\nstage.\n\nNOTE: Any error at this stage will abort the initialization without cleanup.\nSo do not acquire resources in this function, or this function should\nrelease the acquired resources if an error occurs.\n\nRaises:\nerrors.ServerStartPreCheckError: when prechecks for starting the server\nfailed.", "source": "github-repos"} {"code": "def AtMaximumDepth(self, search_depth):\n \n if self._location_segments is not None:\n if search_depth >= self._number_of_location_segments:\n return True\n\n return False", "docstring": "Determines if the find specification is at maximum depth.\n\nArgs:\nsearch_depth (int): number of location path segments to compare.\n\nReturns:\nbool: True if at maximum depth, False if not.", "source": "juraj-google-style"} {"code": "def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n return self._path_open(path, 'wb', mime_type, compression_type)", "docstring": "Returns a write channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"} {"code": "def broadcast_to(x, shape):\n if any_symbolic_tensors((x,)):\n return BroadcastTo(shape=shape).symbolic_call(x)\n return backend.numpy.broadcast_to(x, shape)", "docstring": "Broadcast a tensor to a new shape.\n\nArgs:\nx: The tensor to broadcast.\nshape: The shape of the desired tensor. A single integer `i` is\ninterpreted as `(i,)`.\n\nReturns:\nA tensor with the desired shape.\n\nExamples:\n>>> x = keras.ops.array([1, 2, 3])\n>>> keras.ops.broadcast_to(x, (3, 3))\narray([[1, 2, 3],\n[1, 2, 3],\n[1, 2, 3]])", "source": "github-repos"} {"code": "def add_review(self, reviewer, product, review, date=None):\n \n if not isinstance(reviewer, self._reviewer_cls):\n raise TypeError(\n \"Type of given reviewer isn't acceptable:\", reviewer,\n \", expected:\", self._reviewer_cls)\n elif not isinstance(product, self._product_cls):\n raise TypeError(\n \"Type of given product isn't acceptable:\", product,\n \", expected:\", self._product_cls)\n r = self._review_cls(review, date=date)\n self.graph.add_edge(reviewer, product, review=r)\n return r", "docstring": "Add a new review from a given reviewer to a given product.\n\nArgs:\nreviewer: an instance of Reviewer.\nproduct: an instance of Product.\nreview: a float value.\ndate: date the review issued.\n\nReturns:\nthe added new review object.\n\nRaises:\nTypeError: when given reviewer and product aren't instance of\nspecified reviewer and product class when this graph is constructed.", "source": "juraj-google-style"} {"code": "def remote_file(self, branch='master', filename=''):\n \n LOG.info('Retrieving \"%s\" from \"%s\".', filename, self.git_short)\n\n file_contents = ''\n\n try:\n file_blob = self.project.files.get(file_path=filename, ref=branch)\n except gitlab.exceptions.GitlabGetError:\n file_blob = None\n\n LOG.debug('GitLab file response:\\n%s', file_blob)\n\n if not file_blob:\n msg = 'Project \"{0}\" is missing file \"{1}\" in \"{2}\" branch.'.format(self.git_short, filename, branch)\n LOG.warning(msg)\n raise FileNotFoundError(msg)\n else:\n file_contents = b64decode(file_blob.content).decode()\n\n LOG.debug('Remote file contents:\\n%s', file_contents)\n return file_contents", "docstring": "Read the remote file on Git Server.\n\nArgs:\nbranch (str): Git Branch to find file.\nfilename (str): Name of file to retrieve relative to root of\nrepository.\n\nReturns:\nstr: Contents of remote file.\n\nRaises:\nFileNotFoundError: Requested file missing.", "source": "juraj-google-style"} {"code": "class IncSlidingMeanTracker(IncMeanTracker):\n\n def __init__(self, window_size):\n super().__init__(window_mode=WindowMode.SLIDING, window_size=window_size)", "docstring": "Sliding window mean tracker using incremental calculation.\n\nArgs:\nwindow_size: The size of the sliding window.", "source": "github-repos"} {"code": "def get_added_vocab(self) -> dict[str, int]:\n return self._added_tokens_encoder", "docstring": "Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from\nthe fast call because for now we always add the tokens even if they are already in the vocabulary. This is\nsomething we should change.\n\nReturns:\n`Dict[str, int]`: The added tokens.", "source": "github-repos"} {"code": "def to_query(self, fields=None):\n \n \n from . import _query\n if fields is None:\n fields = '*'\n elif isinstance(fields, list):\n fields = ','.join(fields)\n return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context)", "docstring": "Return a Query for this Table.\n\nArgs:\nfields: the fields to return. If None, all fields will be returned. This can be a string\nwhich will be injected into the Query after SELECT, or a list of field names.\n\nReturns:\nA Query object that will return the specified fields from the records in the Table.", "source": "juraj-google-style"} {"code": "def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right, n_material, angle=0):\n x_mask = np.logical_and((x_bot_left <= self.x), (self.x <= x_top_right))\n y_mask = np.logical_and((y_bot_left <= self.y), (self.y <= y_top_right))\n xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))\n self.n[xy_mask] = n_material\n if angle:\n self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left, x_top_right, x_bot_left, n_material)\n return self.n", "docstring": "A low-level function that allows writing a rectangle refractive\nindex profile to a `Structure`.\n\nArgs:\nx_bot_left (float): The bottom-left x-coordinate of the\nrectangle.\ny_bot_left (float): The bottom-left y-coordinate of the\nrectangle.\nx_top_right (float): The top-right x-coordinate of the\nrectangle.\ny_top_right (float): The top-right y-coordinate of the\nrectangle.\nn_material (float): The refractive index of the points\nencompassed by the defined rectangle.\nangle (float): The angle in degrees of the sidewalls\nof the defined rectangle. Default is 0. This\nis useful for creating a ridge with angled\nsidewalls.", "source": "codesearchnet"} {"code": "def _ParseFileEntry(self, knowledge_base, file_entry):\n file_object = file_entry.GetFileObject()\n try:\n self._ParseFileData(knowledge_base, file_object)\n finally:\n file_object.close()", "docstring": "Parses a file entry for a preprocessing attribute.\n\nArgs:\nknowledge_base (KnowledgeBase): to fill with preprocessing information.\nfile_entry (dfvfs.FileEntry): file entry that contains the artifact\nvalue data.\n\nRaises:\nPreProcessFail: if the preprocessing fails.", "source": "codesearchnet"} {"code": "def plot(self, figsize=None, rotation=45):\n \n\n fig, ax = plt.subplots(figsize=figsize)\n\n plt.imshow(self._cm, interpolation='nearest', cmap=plt.cm.Blues, aspect='auto')\n plt.title('Confusion matrix')\n plt.colorbar()\n tick_marks = np.arange(len(self._labels))\n plt.xticks(tick_marks, self._labels, rotation=rotation)\n plt.yticks(tick_marks, self._labels)\n if isinstance(self._cm, list):\n \n thresh = max(max(self._cm)) / 2.\n for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):\n plt.text(j, i, self._cm[i][j], horizontalalignment=\"center\",\n color=\"white\" if self._cm[i][j] > thresh else \"black\")\n else:\n \n thresh = self._cm.max() / 2.\n for i, j in itertools.product(range(len(self._labels)), range(len(self._labels))):\n plt.text(j, i, self._cm[i, j], horizontalalignment=\"center\",\n color=\"white\" if self._cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "docstring": "Plot the confusion matrix.\n\nArgs:\nfigsize: tuple (x, y) of ints. Sets the size of the figure\nrotation: the rotation angle of the labels on the x-axis.", "source": "juraj-google-style"} {"code": "def save(self, path, compressed=True, exist_ok=False):\n \n path = os.path.expandvars(os.path.expanduser(path))\n if os.path.isfile(path) and not exist_ok:\n raise OSError(17, os.strerror(17), path)\n\n if os.path.isdir(path):\n path = os.path.join(path, \"out.gdg\")\n\n if compressed:\n bytes_written = cgaddag.gdg_save_compressed(self.gdg, path.encode(\"ascii\"))\n else:\n bytes_written = cgaddag.gdg_save(self.gdg, path.encode(\"ascii\"))\n\n if bytes_written == -1:\n errno = ctypes.c_int.in_dll(ctypes.pythonapi, \"errno\").value\n raise OSError(errno, os.strerror(errno), path)\n\n return bytes_written", "docstring": "Save the GADDAG to file.\n\nArgs:\npath: path to save the GADDAG to.\ncompressed: compress the saved GADDAG using gzip.\nexist_ok: overwrite existing file at `path`.", "source": "juraj-google-style"} {"code": "def prefix2ns(self, prefix: YangIdentifier, mid: ModuleId) -> YangIdentifier:\n try:\n mdata = self.modules[mid]\n except KeyError:\n raise ModuleNotRegistered(*mid) from None\n try:\n return mdata.prefix_map[prefix][0]\n except KeyError:\n raise UnknownPrefix(prefix, mid) from None", "docstring": "Return the namespace corresponding to a prefix.\n\nArgs:\nprefix: Prefix associated with a module and its namespace.\nmid: Identifier of the module in which the prefix is declared.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If `prefix` is not declared.", "source": "codesearchnet"} {"code": "def read32(self, offset):\n \n if not isinstance(offset, (int, long)):\n raise TypeError(\"Invalid offset type, should be integer.\")\n\n offset = self._adjust_offset(offset)\n self._validate_offset(offset, 4)\n return struct.unpack(\"=L\", self.mapping[offset:offset + 4])[0]", "docstring": "Read 32-bits from the specified `offset` in bytes, relative to the\nbase physical address of the MMIO region.\n\nArgs:\noffset (int, long): offset from base physical address, in bytes.\n\nReturns:\nint: 32-bit value read.\n\nRaises:\nTypeError: if `offset` type is invalid.\nValueError: if `offset` is out of bounds.", "source": "juraj-google-style"} {"code": "def upload_to_metta(train_features_path, train_labels_path, test_features_path, test_labels_path, train_quarter, test_quarter, num_dimensions):\n \n train_config = metta_config(train_quarter, num_dimensions)\n test_config = metta_config(test_quarter, num_dimensions)\n\n X_train = pd.read_csv(train_features_path, sep=',')\n X_train.columns = ['doc2vec_'+str(i) for i in range(X_train.shape[1])]\n \n Y_train = pd.read_csv(train_labels_path)\n Y_train.columns = ['onet_soc_code']\n train = pd.concat([X_train, Y_train], axis=1)\n\n X_test = pd.read_csv(test_features_path, sep=',')\n X_test.columns = ['doc2vec_'+str(i) for i in range(X_test.shape[1])]\n \n Y_test = pd.read_csv(test_labels_path)\n Y_test.columns = ['onet_soc_code']\n test = pd.concat([X_test, Y_test], axis=1)\n \n \n \n \n metta.archive_train_test(\n train_config,\n X_train,\n test_config,\n X_test,\n directory='wdi'\n )", "docstring": "Store train and test matrices using metta\n\nArgs:\ntrain_features_path (str) Path to matrix with train features\ntrain_labels_path (str) Path to matrix with train labels\ntest_features_path (str) Path to matrix with test features\ntest_labels_path (str) Path to matrix with test labels\ntrain_quarter (str) Quarter of train matrix\ntest_quarter (str) Quarter of test matrix\nnum_dimensions (int) Number of features", "source": "juraj-google-style"} {"code": "def CheckSchema(self, database):\n \n schema_match = False\n if self.SCHEMAS:\n for schema in self.SCHEMAS:\n if database and database.schema == schema:\n schema_match = True\n\n return schema_match", "docstring": "Checks the schema of a database with that defined in the plugin.\n\nArgs:\ndatabase (SQLiteDatabase): database.\n\nReturns:\nbool: True if the schema of the database matches that defined by\nthe plugin, or False if the schemas do not match or no schema\nis defined by the plugin.", "source": "juraj-google-style"} {"code": "def _ConvertValueBinaryDataToFloatingPointValue(self, value):\n \n if not value:\n return None\n\n value_length = len(value)\n if value_length not in (4, 8):\n raise errors.ParseError('Unsupported value data size: {0:d}'.format(\n value_length))\n\n if value_length == 4:\n floating_point_map = self._GetDataTypeMap('float32le')\n elif value_length == 8:\n floating_point_map = self._GetDataTypeMap('float64le')\n\n try:\n return self._ReadStructureFromByteStream(value, 0, floating_point_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n 'Unable to parse floating-point value with error: {0!s}'.format(\n exception))", "docstring": "Converts a binary data value into a floating-point value.\n\nArgs:\nvalue (bytes): binary data value containing an ASCII string or None.\n\nReturns:\nfloat: floating-point representation of binary data value or None if\nvalue is not set.\n\nRaises:\nParseError: if the floating-point value data size is not supported or\nif the value cannot be parsed.", "source": "juraj-google-style"} {"code": "def do_put(endpoint, body, access_token):\n headers = {'content-type': 'application/json', 'Authorization': ('Bearer ' + access_token)}\n headers['User-Agent'] = get_user_agent()\n return requests.put(endpoint, data=body, headers=headers)", "docstring": "Do an HTTP PUT request and return JSON.\n\nArgs:\nendpoint (str): Azure Resource Manager management endpoint.\nbody (str): JSON body of information to put.\naccess_token (str): A valid Azure authentication token.\n\nReturns:\nHTTP response. JSON body.", "source": "codesearchnet"} {"code": "def from_json_value(v):\n if isinstance(v, extra_types.JsonValue):\n if v.string_value is not None:\n return v.string_value\n elif v.boolean_value is not None:\n return v.boolean_value\n elif v.integer_value is not None:\n return v.integer_value\n elif v.double_value is not None:\n return v.double_value\n elif v.array_value is not None:\n return from_json_value(v.array_value)\n elif v.object_value is not None:\n return from_json_value(v.object_value)\n elif v.is_null:\n return None\n elif isinstance(v, extra_types.JsonArray):\n return [from_json_value(e) for e in v.entries]\n elif isinstance(v, extra_types.JsonObject):\n return {p.key: from_json_value(p.value) for p in v.properties}\n raise TypeError('Cannot convert %s from a JSON value.' % repr(v))", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nConverts ``extra_types.JsonValue`` objects into Python objects.\n\nArgs:\nv: ``JsonValue`` object to be converted.\n\nReturns:\nA Python object structured as values, lists, and dictionaries corresponding\nto ``JsonValue``, ``JsonArray`` and ``JsonObject`` types.\n\nRaises:\nTypeError: if the ``JsonValue`` object contains a type that is\nnot supported.\n\nThe types supported are ``str``, ``bool``, ``list``, ``dict``, and ``None``.\nThe Dataflow API returns JsonValue(s) in many places and it is quite\nconvenient to be able to convert these hierarchical objects to much simpler\nPython objects.", "source": "github-repos"} {"code": "def print_level(log_function, fmt, level, *args):\n if _SILENT:\n return\n msg = (fmt % args)\n spaces = (' ' * level)\n log_function(('%s%s' % (spaces, msg)))", "docstring": "Print a formatted message to stdout prepended by spaces. Useful for\nprinting hierarchical information, like bullet lists.\n\nNote:\nIf the application is running in \"Silent Mode\"\n(i.e., ``_SILENT == True``), this function will return\nimmediately and no message will be printed.\n\nArgs:\nlog_function: The function that will be called to output the formatted\nmessage.\nfmt (str): A Python formatted string.\nlevel (int): Used to determing how many spaces to print. The formula\nis ``' ' * level ``.\n*args: Variable length list of arguments. Values are plugged into the\nformat string.\n\nExamples:\n>>> print_level(\"%s %d\", 0, \"TEST\", 0)\nTEST 0\n>>> print_level(\"%s %d\", 1, \"TEST\", 1)\nTEST 1\n>>> print_level(\"%s %d\", 2, \"TEST\", 2)\nTEST 2", "source": "codesearchnet"} {"code": "def clear_agent(self, short_name, client_id):\n \n\n if short_name not in self.services:\n raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n if short_name not in self.agents:\n raise ArgumentError(\"No agent registered for service\", short_name=short_name)\n\n if client_id != self.agents[short_name]:\n raise ArgumentError(\"Client was not registered for service\", short_name=short_name,\n client_id=client_id, current_client=self.agents[short_name])\n\n del self.agents[short_name]", "docstring": "Remove a client id from being the command handler for a service.\n\nArgs:\nshort_name (str): The name of the service to set an agent\nfor.\nclient_id (str): A globally unique id for the client that\nshould no longer receive commands for this service.", "source": "juraj-google-style"} {"code": "def get_staged_signatures(vcs):\n \n staged_path = _get_staged_history_path(vcs)\n known_signatures = []\n if os.path.exists(staged_path):\n with open(staged_path, 'r') as f:\n known_signatures = f.read().split()\n return known_signatures", "docstring": "Get the list of staged signatures\n\nArgs:\nvcs (easyci.vcs.base.Vcs)\n\nReturns:\nlist(basestring) - list of signatures", "source": "juraj-google-style"} {"code": "def combine_assignments(self, assignments):\n group_by_fn = collections.defaultdict(list)\n for a in assignments:\n if (not isinstance(a, Assign)):\n raise ValueError('ops should be instances of mtf.Assign')\n group_by_fn[a.assign_fn].append(a)\n assignments_set = set(assignments)\n self._operations = [op for op in self._operations if (op not in assignments_set)]\n ret = []\n for (fn, ops) in six.iteritems(group_by_fn):\n variables = []\n values = []\n for a in ops:\n variables.extend(a.variables)\n values.extend(a.inputs)\n ret.append(Assign(variables, values, fn))\n return ret", "docstring": "Rewrite the current graph to combine \"Assign\" operations.\n\nCombine similar Assign operations into grouped Assign operations.\nThis is useful when using the rewrite_stack_variables() optimization,\nsince variables can only be stacked if they are present in the same set\nof Assign operations.\n\nThis function takes a list of Assign operations and returns a possibly\nshorter list of Assign operations. The input Assignment operations\nare removed from the graph and become invalid.\n\nArgs:\nassignments: a list of Assign objects\nReturns:\na list of Assign objects", "source": "codesearchnet"} {"code": "def register_layouts(layouts, app, url=\"/api/props/\", brand=\"Pyxley\"):\n \n def props(name):\n if name not in layouts:\n \n name = list(layouts.keys())[0]\n return jsonify({\"layouts\": layouts[name][\"layout\"]})\n\n def apps():\n paths = []\n for i, k in enumerate(layouts.keys()):\n if i == 0:\n paths.append({\n \"path\": \"/\",\n \"label\": layouts[k].get(\"title\", k)\n })\n\n paths.append({\n \"path\": \"/\"+k,\n \"label\": layouts[k].get(\"title\", k)\n })\n\n return jsonify({\"brand\": brand, \"navlinks\": paths})\n\n app.add_url_rule(url+\"/\", view_func=props)\n app.add_url_rule(url, view_func=apps)", "docstring": "register UILayout with the flask app\n\ncreate a function that will send props for each UILayout\n\nArgs:\nlayouts (dict): dict of UILayout objects by name\napp (object): flask app\nurl (string): address of props; default is /api/props/", "source": "juraj-google-style"} {"code": "def when_matches_async(self, path, good_value, bad_values=None):\n when = When(good_value, bad_values)\n future = self.subscribe(path, when)\n when.set_future_context(future, weakref.proxy(self))\n return future", "docstring": "Wait for an attribute to become a given value\n\nArgs:\npath (list): The path to wait to\ngood_value: If it is a callable then expect it to return\nTrue if we are satisfied and raise on error. If it is not\ncallable then compare each value against this one and return\nif it matches.\nbad_values (list): values to raise an error on\n\nReturns:\nFuture: a single Future that will resolve when the path matches\ngood_value or bad_values", "source": "codesearchnet"} {"code": "def parse_message(message):\n error_message = []\n func_tags = []\n node_tags = []\n pos = 0\n for match in re.finditer(_INTERPOLATION_PATTERN, message):\n parsed_tag = _ParseTag(match.group('type'), match.group('name'))\n if parsed_tag.type == 'function_node':\n error_message.append(match.group('sep'))\n func_tags.append(parsed_tag)\n else:\n error_message.append(match.group())\n node_tags.append(parsed_tag)\n pos = match.end()\n error_message.append(message[pos:])\n return (''.join(error_message), func_tags, node_tags)", "docstring": "Extract function tags and node tags from a message.\n\nTags are named tuples representing the string {{type name}}. For example,\nin \"123{{node Foo}}456{{function_node Bar}}789\", there are two tags: a node\ntag and a function tag.\n\nArgs:\nmessage: An error message, possibly from an OpError.\n\nReturns:\nA tuple containing the original message with function nodes stripped,\nfunction tags, and node tags.\n\nFor example, if message is \"123{{node Foo}}456{{function_node Bar}}789\"\nthen this function returns (\"123{{node Foo}}456789\",\n[_ParseTag(\"function_node\", \"Bar\")], [_ParseTag(\"node\", \"Foo\")]).", "source": "github-repos"} {"code": "def batch_insert(self, records, typecast=False):\n \n return self._batch_request(self.insert, records)", "docstring": "Calls :any:`insert` repetitively, following set API Rate Limit (5/sec)\nTo change the rate limit use ``airtable.API_LIMIT = 0.2``\n(5 per second)\n\n>>> records = [{'Name': 'John'}, {'Name': 'Marc'}]\n>>> airtable.batch_insert(records)\n\nArgs:\nrecords(``list``): Records to insert\ntypecast(``boolean``): Automatic data conversion from string values.\n\nReturns:\nrecords (``list``): list of added records", "source": "juraj-google-style"} {"code": "def _pull_out_perm_lhs(lhs, rest, out_port, in_port):\n (out_inv, lhs_red) = lhs._factor_lhs(out_port)\n return (lhs_red << Feedback.create(SeriesProduct.create(*rest), out_port=out_inv, in_port=in_port))", "docstring": "Pull out a permutation from the Feedback of a SeriesProduct with itself.\n\nArgs:\nlhs (CPermutation): The permutation circuit\nrest (tuple): The other SeriesProduct operands\nout_port (int): The feedback output port index\nin_port (int): The feedback input port index\n\nReturns:\nCircuit: The simplified circuit", "source": "codesearchnet"} {"code": "def isprocess(pid, error=False):\n try:\n os.kill(pid, 0)\n return True\n except OSError:\n return False", "docstring": "Check that a process is running.\n\nArguments:\n\npid (int): Process ID to check.\n\nReturns:\n\nTrue if the process is running, else false.", "source": "codesearchnet"} {"code": "def _examples_from_path_handler(self, request):\n examples_count = int(request.args.get('max_examples'))\n examples_path = request.args.get('examples_path')\n sampling_odds = float(request.args.get('sampling_odds'))\n self.example_class = (tf.train.SequenceExample if (request.args.get('sequence_examples') == 'true') else tf.train.Example)\n try:\n platform_utils.throw_if_file_access_not_allowed(examples_path, self._logdir, self._has_auth_group)\n example_strings = platform_utils.example_protos_from_path(examples_path, examples_count, parse_examples=False, sampling_odds=sampling_odds, example_class=self.example_class)\n self.examples = [self.example_class.FromString(ex) for ex in example_strings]\n self.generate_sprite(example_strings)\n json_examples = [json_format.MessageToJson(example) for example in self.examples]\n self.updated_example_indices = set(range(len(json_examples)))\n return http_util.Respond(request, {'examples': json_examples, 'sprite': (True if self.sprite else False)}, 'application/json')\n except common_utils.InvalidUserInputError as e:\n return http_util.Respond(request, {'error': e.message}, 'application/json', code=400)", "docstring": "Returns JSON of the specified examples.\n\nArgs:\nrequest: A request that should contain 'examples_path' and 'max_examples'.\n\nReturns:\nJSON of up to max_examlpes of the examples in the path.", "source": "codesearchnet"} {"code": "def unsubscribe(self, topic):\n del self.queues[topic]\n try:\n self.client.unsubscribe(topic)\n except operationError as exc:\n raise InternalError('Could not unsubscribe from topic', topic=topic, message=exc.message)", "docstring": "Unsubscribe from messages on a given topic\n\nArgs:\ntopic (string): The MQTT topic to unsubscribe from", "source": "codesearchnet"} {"code": "def get_table_metadata(engine, table):\n \n metadata = MetaData()\n metadata.reflect(bind=engine, only=[table])\n table_metadata = Table(table, metadata, autoload=True)\n return table_metadata", "docstring": "Extract all useful infos from the given table\n\nArgs:\nengine: SQLAlchemy connection engine\ntable: table name\n\nReturns:\nDictionary of infos", "source": "juraj-google-style"} {"code": "def _duplicate_example(self, request):\n \n index = int(request.args.get('index'))\n if index >= len(self.examples):\n return http_util.Respond(request, {'error': 'invalid index provided'},\n 'application/json', code=400)\n new_example = self.example_class()\n new_example.CopyFrom(self.examples[index])\n self.examples.append(new_example)\n self.updated_example_indices.add(len(self.examples) - 1)\n self.generate_sprite([ex.SerializeToString() for ex in self.examples])\n return http_util.Respond(request, {}, 'application/json')", "docstring": "Duplicates the specified example.\n\nArgs:\nrequest: A request that should contain 'index'.\n\nReturns:\nAn empty response.", "source": "juraj-google-style"} {"code": "def similar_text(self, *args, **kwargs):\n return SimilarRequest(self, *args, mode='text', **kwargs).send()", "docstring": "Search for documents that are similar to directly supplied text or to the textual content of an existing document.\n\nArgs:\ntext -- Text to found something similar to.\nlen -- Number of keywords to extract from the source.\nquota -- Minimum number of keywords matching in the destination.\n\nKeyword args:\noffset -- Number of results to skip before returning the following ones.\ndocs -- Number of documents to retrieve. Default is 10.\nquery -- An optional query that all found documents have to match against. See Search().\nSee Request.__init__()\n\nReturns:\nA ListResponse object.", "source": "codesearchnet"} {"code": "def connect(self, funds: typing.TokenAmount, initial_channel_target: int=3, joinable_funds_target: float=0.4):\n token = self.raiden.chain.token(self.token_address)\n token_balance = token.balance_of(self.raiden.address)\n if (token_balance < funds):\n raise InvalidAmount(f'Insufficient balance for token {pex(self.token_address)}')\n if (funds <= 0):\n raise InvalidAmount('The funds to use in the connection need to be a positive integer')\n if ((joinable_funds_target < 0) or (joinable_funds_target > 1)):\n raise InvalidAmount(f'joinable_funds_target should be between 0 and 1. Given: {joinable_funds_target}')\n with self.lock:\n self.funds = funds\n self.initial_channel_target = initial_channel_target\n self.joinable_funds_target = joinable_funds_target\n log_open_channels(self.raiden, self.registry_address, self.token_address, funds)\n qty_network_channels = views.count_token_network_channels(views.state_from_raiden(self.raiden), self.registry_address, self.token_address)\n if (not qty_network_channels):\n log.info('Bootstrapping token network.', node=pex(self.raiden.address), network_id=pex(self.registry_address), token_id=pex(self.token_address))\n self.api.channel_open(self.registry_address, self.token_address, self.BOOTSTRAP_ADDR)\n else:\n self._open_channels()", "docstring": "Connect to the network.\n\nSubsequent calls to `connect` are allowed, but will only affect the spendable\nfunds and the connection strategy parameters for the future. `connect` will not\nclose any channels.\n\nNote: the ConnectionManager does not discriminate manually opened channels from\nautomatically opened ones. If the user manually opened channels, those deposit\namounts will affect the funding per channel and the number of new channels opened.\n\nArgs:\nfunds: Target amount of tokens spendable to join the network.\ninitial_channel_target: Target number of channels to open.\njoinable_funds_target: Amount of funds not initially assigned.", "source": "codesearchnet"} {"code": "def get_string(self, distance=6, velocity=8, charge=3):\n file_template = 'Generated by pymatgen.io.lammps.data.LammpsData\\n\\n{stats}\\n\\n{box}\\n\\n{body}\\n'\n box = self.box.get_string(distance)\n body_dict = OrderedDict()\n body_dict['Masses'] = self.masses\n types = OrderedDict()\n types['atom'] = len(self.masses)\n if self.force_field:\n all_ff_kws = (SECTION_KEYWORDS['ff'] + SECTION_KEYWORDS['class2'])\n ff_kws = [k for k in all_ff_kws if (k in self.force_field)]\n for kw in ff_kws:\n body_dict[kw] = self.force_field[kw]\n if (kw in SECTION_KEYWORDS['ff'][2:]):\n types[kw.lower()[:(- 7)]] = len(self.force_field[kw])\n body_dict['Atoms'] = self.atoms\n counts = OrderedDict()\n counts['atoms'] = len(self.atoms)\n if (self.velocities is not None):\n body_dict['Velocities'] = self.velocities\n if self.topology:\n for kw in SECTION_KEYWORDS['topology']:\n if (kw in self.topology):\n body_dict[kw] = self.topology[kw]\n counts[kw.lower()] = len(self.topology[kw])\n all_stats = (list(counts.values()) + list(types.values()))\n stats_template = ('{:>%d} {}' % len(str(max(all_stats))))\n count_lines = [stats_template.format(v, k) for (k, v) in counts.items()]\n type_lines = [stats_template.format(v, (k + ' types')) for (k, v) in types.items()]\n stats = '\\n'.join(((count_lines + ['']) + type_lines))\n map_coords = (lambda q: ('{:.%df}' % distance).format(q))\n map_velos = (lambda q: ('{:.%df}' % velocity).format(q))\n map_charges = (lambda q: ('{:.%df}' % charge).format(q))\n formatters = {'x': map_coords, 'y': map_coords, 'z': map_coords, 'vx': map_velos, 'vy': map_velos, 'vz': map_velos, 'q': map_charges}\n section_template = '{kw}\\n\\n{df}\\n'\n parts = []\n for (k, v) in body_dict.items():\n index = (True if (k != 'PairIJ Coeffs') else False)\n df_string = v.to_string(header=False, formatters=formatters, index_names=False, index=index)\n parts.append(section_template.format(kw=k, df=df_string))\n body = '\\n'.join(parts)\n return file_template.format(stats=stats, box=box, body=body)", "docstring": "Returns the string representation of LammpsData, essentially\nthe string to be written to a file.\n\nArgs:\ndistance (int): No. of significant figures to output for\nbox settings (bounds and tilt) and atomic coordinates.\nDefault to 6.\nvelocity (int): No. of significant figures to output for\nvelocities. Default to 8.\ncharge (int): No. of significant figures to output for\ncharges. Default to 3.\n\nReturns:\nString representation", "source": "codesearchnet"} {"code": "def ack_deadline(self):\n target = min([(self._last_histogram_size * 2), (self._last_histogram_size + 100)])\n if (len(self.ack_histogram) > target):\n self._ack_deadline = self.ack_histogram.percentile(percent=99)\n return self._ack_deadline", "docstring": "Return the current ack deadline based on historical time-to-ack.\n\nThis method is \"sticky\". It will only perform the computations to\ncheck on the right ack deadline if the histogram has gained a\nsignificant amount of new information.\n\nReturns:\nint: The ack deadline.", "source": "codesearchnet"} {"code": "def PrivateKeyFromNEP2(nep2_key, passphrase):\n if ((not nep2_key) or (len(nep2_key) != 58)):\n raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key)))\n ADDRESS_HASH_SIZE = 4\n ADDRESS_HASH_OFFSET = (len(NEP_FLAG) + len(NEP_HEADER))\n try:\n decoded_key = base58.b58decode_check(nep2_key)\n except Exception as e:\n raise ValueError('Invalid nep2_key')\n address_hash = decoded_key[ADDRESS_HASH_OFFSET:(ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE)]\n encrypted = decoded_key[(- 32):]\n pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')\n derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES)\n derived1 = derived[:32]\n derived2 = derived[32:]\n cipher = AES.new(derived2, AES.MODE_ECB)\n decrypted = cipher.decrypt(encrypted)\n private_key = xor_bytes(decrypted, derived1)\n kp_new = KeyPair(priv_key=private_key)\n kp_new_address = kp_new.GetAddress()\n kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode('utf-8')).digest()\n kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest()\n kp_new_address_hash = kp_new_address_hash_tmp2[:4]\n if (kp_new_address_hash != address_hash):\n raise ValueError('Wrong passphrase')\n return private_key", "docstring": "Gets the private key from a NEP-2 encrypted private key\n\nArgs:\nnep2_key (str): The nep-2 encrypted private key\npassphrase (str): The password to encrypt the private key with, as unicode string\n\nReturns:\nbytes: The private key", "source": "codesearchnet"} {"code": "def __init__(self, min_shard_bytes=256 << 10, max_shards=1, bytes_per_string=16):\n if min_shard_bytes < 1:\n raise ValueError(f'Argument `min_shard_bytes` must be positive. Received: {min_shard_bytes}')\n if max_shards < 1:\n raise ValueError(f'Argument `max_shards` must be positive. Received: {max_shards}')\n if bytes_per_string < 1:\n raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}')\n self._min_shard_bytes = min_shard_bytes\n self._max_shards = max_shards\n self._bytes_per_string = bytes_per_string", "docstring": "Creates a new `MinSizePartitioner`.\n\nArgs:\nmin_shard_bytes: Minimum bytes of each shard. Defaults to 256K.\nmax_shards: Upper bound on the number of shards. Defaults to 1.\nbytes_per_string: If the partition value is of type string, this provides\nan estimate of how large each string is.", "source": "github-repos"} {"code": "def AddDir(self, dirpath):\n if (dirpath not in self._dirs):\n self._dirs.add(dirpath)\n return True\n return False", "docstring": "Adds a directory path as a source.\n\nArgs:\ndirpath: a string representing a path to the directory.\n\nReturns:\nTrue if the directory is not an already existing source.", "source": "codesearchnet"} {"code": "def tf_step(self, time, variables, **kwargs):\n \n fn_loss = kwargs[\"fn_loss\"]\n if variables is None:\n variables = tf.trainable_variables\n return tf.gradients(fn_loss, variables)", "docstring": "Creates the TensorFlow operations for performing an optimization step on the given variables, including\nactually changing the values of the variables.\n\nArgs:\ntime: Time tensor. Not used for this optimizer.\nvariables: List of variables to optimize.\n**kwargs:\nfn_loss : loss function tensor to differentiate.\n\nReturns:\nList of delta tensors corresponding to the updates for each optimized variable.", "source": "juraj-google-style"} {"code": "def _get_files_set(path, start_tag, end_tag):\n with open(path, 'r') as f:\n contents = f.read()\n start = contents.find(start_tag) + len(start_tag) + 1\n end = contents.find(end_tag)\n contents = contents[start:end]\n file_paths = [file_path.strip().strip('\"') for file_path in contents.split(',')]\n return set((file_path for file_path in file_paths if file_path))", "docstring": "Get set of file paths from the given file.\n\nArgs:\npath: Path to file. File at `path` is expected to contain a list of paths\nwhere entire list starts with `start_tag` and ends with `end_tag`. List\nmust be comma-separated and each path entry must be surrounded by double\nquotes.\nstart_tag: String that indicates start of path list.\nend_tag: String that indicates end of path list.\n\nReturns:\nList of string paths.", "source": "github-repos"} {"code": "def path_size(p: tcod.path.AStar) -> int:\n return int(lib.TCOD_path_size(p._path_c))", "docstring": "Return the current length of the computed path.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nint: Length of the path.", "source": "codesearchnet"} {"code": "def GetLineWidth(line):\n if isinstance(line, unicode):\n width = 0\n for uc in unicodedata.normalize('NFC', line):\n if (unicodedata.east_asian_width(uc) in ('W', 'F')):\n width += 2\n elif (not unicodedata.combining(uc)):\n width += 1\n return width\n else:\n return len(line)", "docstring": "Determines the width of the line in column positions.\n\nArgs:\nline: A string, which may be a Unicode string.\n\nReturns:\nThe width of the line in column positions, accounting for Unicode\ncombining characters and wide characters.", "source": "codesearchnet"} {"code": "def transpose(self, name=None):\n if (name is None):\n name = (self.module_name + '_transpose')\n return AddBias(output_shape=(lambda : self._input_shape), bias_dims=self._bias_dims, initializers=self._initializers, regularizers=self._regularizers, name=name)", "docstring": "Returns transposed `AddBias` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.module_name`.\n\nReturns:\nTransposed `AddBias` module.", "source": "codesearchnet"} {"code": "def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):\n callable_opts = config_pb2.CallableOptions()\n for x in feed_arrays:\n callable_opts.feed.append(x.name)\n if self.feed_dict:\n for key in sorted(self.feed_dict.keys()):\n callable_opts.feed.append(key.name)\n for x, y in zip(feed_symbols, symbol_vals):\n connection = callable_opts.tensor_connection.add()\n if x.dtype != y.dtype:\n y = math_ops.cast(y, dtype=x.dtype)\n from_tensor = _as_graph_element(y)\n if from_tensor is None:\n from_tensor = y\n connection.from_tensor = from_tensor.name\n connection.to_tensor = x.name\n for x in self.outputs + self.fetches:\n callable_opts.fetch.append(x.name)\n callable_opts.target.append(self.updates_op.name)\n if self.run_options:\n callable_opts.run_options.CopyFrom(self.run_options)\n callable_fn = session._make_callable_from_options(callable_opts)\n self._callable_fn = callable_fn\n self._feed_arrays = feed_arrays\n self._feed_symbols = feed_symbols\n self._symbol_vals = symbol_vals\n self._fetches = list(self.fetches)\n self._session = session", "docstring": "Generates a callable that runs the graph.\n\nArgs:\nfeed_arrays: List of input tensors to be fed Numpy arrays at runtime.\nfeed_symbols: List of input tensors to be fed symbolic tensors at runtime.\nsymbol_vals: List of symbolic tensors to be fed to `feed_symbols`.\nsession: Session to use to generate the callable.\n\nReturns:\nFunction that runs the graph according to the above options.", "source": "github-repos"} {"code": "def apply(self, elements, *args, **kwargs):\n return self.extract_output(self.add_inputs(self.create_accumulator(*args, **kwargs), elements, *args, **kwargs), *args, **kwargs)", "docstring": "Returns result of applying this CombineFn to the input values.\n\nArgs:\nelements: the set of values to combine.\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"} {"code": "def check_valid(money):\n \n if not isinstance(money, sc_messages.Money):\n raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,))\n currency = money.currencyCode\n if not currency or len(currency) != 3:\n raise ValueError(_MSG_3_LETTERS_LONG)\n units = money.units\n nanos = money.nanos\n if ((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0)):\n raise ValueError(_MSG_UNITS_NANOS_MISMATCH)\n if abs(nanos) > MAX_NANOS:\n raise ValueError(_MSG_NANOS_OOB)", "docstring": "Determine if an instance of `Money` is valid.\n\nArgs:\nmoney (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the\ninstance to test\n\nRaises:\nValueError: if the money instance is invalid", "source": "juraj-google-style"} {"code": "def stop(self, **kwargs):\n path = ('%s/%s/stop' % (self.manager.path, self.get_id()))\n self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Stop the environment.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabStopError: If the operation failed", "source": "codesearchnet"} {"code": "def bernoulli(key, mean=np.float32(0.5), shape=None):\n mean = tf_np.asarray(mean)\n if shape is None:\n shape = mean.shape\n return uniform(key, shape) < mean", "docstring": "Sample Bernoulli random values with given shape and mean.\n\nArgs:\nkey: the RNG key.\nmean: optional, an array_like broadcastable to `shape` for the mean of the\nrandom variables (default 0.5).\nshape: optional, a tuple of nonnegative integers representing the shape\n(default to `mean`'s shape).\n\nReturns:\nA random array with the specified shape and boolean dtype.", "source": "github-repos"} {"code": "def stringify(self, use_bytes=False):\n \n def _str_value(value):\n if isinstance(value, (list, tuple)):\n value = (self.EOL + '\\t').join(map(_str_value, value))\n elif callable(value):\n value = _str_value(value())\n return value\n\n s = self.EOL.join((\"{key}: {value}\".format(key=key,\n value=_str_value(value))\n for key, value in self._header_data.values()\n if value is not None))\n return s + (self.EOL * 2)", "docstring": "Returns representation of headers as a valid HTTP header string. This\nis called by __str__.\n\nArgs:\nuse_bytes (bool): Returns a bytes object instead of a str.", "source": "juraj-google-style"} {"code": "def decrypt_report(self, device_id, root, data, **kwargs):\n report_key = self._verify_derive_key(device_id, root, **kwargs)\n try:\n from Crypto.Cipher import AES\n import Crypto.Util.Counter\n except ImportError:\n raise NotFoundError\n ctr = Crypto.Util.Counter.new(128)\n encryptor = AES.new(bytes(report_key[:16]), AES.MODE_CTR, counter=ctr)\n decrypted = encryptor.decrypt(bytes(data))\n return {'data': decrypted}", "docstring": "Decrypt a buffer of report data on behalf of a device.\n\nArgs:\ndevice_id (int): The id of the device that we should encrypt for\nroot (int): The root key type that should be used to generate the report\ndata (bytearray): The data that we should decrypt\n**kwargs: There are additional specific keyword args that are required\ndepending on the root key used. Typically, you must specify\n- report_id (int): The report id\n- sent_timestamp (int): The sent timestamp of the report\n\nThese two bits of information are used to construct the per report\nsigning and encryption key from the specific root key type.\n\nReturns:\ndict: The decrypted data and any associated metadata about the data.\nThe data itself must always be a bytearray stored under the 'data'\nkey, however additional keys may be present depending on the encryption method\nused.\n\nRaises:\nNotFoundError: If the auth provider is not able to decrypt the data.", "source": "codesearchnet"} {"code": "def list_stack(list_, opts):\n assert isinstance(opts, ListStackOpts)\n if isinstance(list_, tensor_array_ops.TensorArray):\n return _tf_tensorarray_stack(list_)\n elif tensor_util.is_tf_type(list_):\n if list_.dtype == dtypes.variant:\n return _tf_tensor_list_stack(list_, opts)\n else:\n return list_\n else:\n return _py_list_stack(list_, opts)", "docstring": "The list stack function.\n\nThis does not have a direct correspondent in Python. The closest idiom to\nthis is tf.append or np.stack. It's different from those in the sense that it\naccepts a Tensor list, rather than a list of tensors. It can also accept\nTensorArray. When the target is anything else, the dispatcher will rely on\nctx.original_call for fallback.\n\nArgs:\nlist_: An entity that supports append semantics.\nopts: A ListStackOpts object.\n\nReturns:\nThe output of the stack operation, typically a Tensor.", "source": "github-repos"} {"code": "def get_v1_names(symbol: Any) -> Sequence[str]:\n names_v1 = []\n tensorflow_api_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].names\n keras_api_attr_v1 = API_ATTRS_V1[KERAS_API_NAME].names\n if not hasattr(symbol, '__dict__'):\n return names_v1\n if tensorflow_api_attr_v1 in symbol.__dict__:\n names_v1.extend(getattr(symbol, tensorflow_api_attr_v1))\n if keras_api_attr_v1 in symbol.__dict__:\n names_v1.extend(getattr(symbol, keras_api_attr_v1))\n return names_v1", "docstring": "Get a list of TF 1.* names for this symbol.\n\nArgs:\nsymbol: symbol to get API names for.\n\nReturns:\nList of all API names for this symbol.", "source": "github-repos"} {"code": "def getVarianceComps(self, univariance=False):\n \n RV=sp.zeros((self.P,self.n_randEffs))\n for term_i in range(self.n_randEffs):\n RV[:,term_i] = self.getTraitCovar(term_i).diagonal()\n if univariance:\n RV /= RV.sum(1)[:,sp.newaxis]\n return RV", "docstring": "Return the estimated variance components\n\nArgs:\nunivariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait\nReturns:\nvariance components of all random effects on all phenotypes [P, n_randEffs matrix]", "source": "juraj-google-style"} {"code": "def set_datetime_format(self, format):\n if (not (format in ['UNIX', 'RFC3339'])):\n return\n self.datetime_format = format\n self.set_header('Accept-Datetime-Format', self.datetime_format)", "docstring": "Set the Accept-Datetime-Format header to an acceptable\nvalue\n\nArgs:\nformat: UNIX or RFC3339", "source": "codesearchnet"} {"code": "def __init__(self, feature_set='spe+'):\n \n filename = filenames[feature_set]\n self.segments, self.seg_dict, self.names = self._read_table(filename)\n self.seg_seq = {seg[0]: i for (i, seg) in enumerate(self.segments)}\n self.weights = self._read_weights()\n self.seg_regex = self._build_seg_regex()\n self.longest_seg = max([len(x) for x in self.seg_dict.keys()])\n self.xsampa = xsampa.XSampa()", "docstring": "Construct a FeatureTable object\n\nArgs:\nfeature_set (str): the feature set that the FeatureTable will use;\ncurrently, there is only one of these (\"spe+\")", "source": "juraj-google-style"} {"code": "def is_finite_number(value):\n if (not isinstance(value, (numbers.Integral, float))):\n return False\n if isinstance(value, bool):\n return False\n if isinstance(value, float):\n if (math.isnan(value) or math.isinf(value)):\n return False\n if (abs(value) > (2 ** 53)):\n return False\n return True", "docstring": "Validates if the given value is a number, enforces\nabsolute limit of 2^53 and restricts NAN, INF, -INF.\n\nArgs:\nvalue: Value to be validated.\n\nReturns:\nBoolean: True if value is a number and not NAN, INF, -INF or\ngreater than absolute limit of 2^53 else False.", "source": "codesearchnet"} {"code": "def FindServiceByName(self, full_name):\n \n full_name = _NormalizeFullyQualifiedName(full_name)\n if full_name not in self._service_descriptors:\n self._FindFileContainingSymbolInDb(full_name)\n return self._service_descriptors[full_name]", "docstring": "Loads the named service descriptor from the pool.\n\nArgs:\nfull_name: The full name of the service descriptor to load.\n\nReturns:\nThe service descriptor for the named service.\n\nRaises:\nKeyError: if the service cannot be found in the pool.", "source": "juraj-google-style"} {"code": "def exists(self, uri):\n \n \n try:\n urllib.request.urlopen(uri)\n return True\n except urllib.error.HTTPError:\n return False", "docstring": "Method returns true is the entity exists in the Repository,\nfalse, otherwise\n\nArgs:\nuri(str): Entity URI\n\nReturns:\nbool", "source": "juraj-google-style"} {"code": "def stripped_op_list_for_graph(graph_def):\n used_ops = ops_used_by_graph_def(graph_def)\n op_defs = []\n for op in sorted(used_ops):\n op_def = op_def_registry.get(op)\n if op_def is not None:\n op_defs.append(op_def)\n return op_def_pb2.OpList(op=op_defs)", "docstring": "Collect the stripped OpDefs for ops used by a graph.\n\nThis function computes the `stripped_op_list` field of `MetaGraphDef` and\nsimilar protos. The result can be communicated from the producer to the\nconsumer, which can then use the C++ function\n`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.\n\nArgs:\ngraph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.\n\nReturns:\nAn `OpList` of ops used by the graph.", "source": "github-repos"} {"code": "def _add_sample_measure(self, measure_params, num_samples):\n \n \n measured_qubits = list({qubit for qubit, cmembit in measure_params})\n num_measured = len(measured_qubits)\n \n axis = list(range(self._number_of_qubits))\n for qubit in reversed(measured_qubits):\n \n \n axis.remove(self._number_of_qubits - 1 - qubit)\n probabilities = np.reshape(np.sum(np.abs(self._statevector) ** 2,\n axis=tuple(axis)),\n 2 ** num_measured)\n \n samples = self._local_random.choice(range(2 ** num_measured),\n num_samples, p=probabilities)\n \n memory = []\n for sample in samples:\n classical_memory = self._classical_memory\n for count, (qubit, cmembit) in enumerate(sorted(measure_params)):\n qubit_outcome = int((sample & (1 << count)) >> count)\n membit = 1 << cmembit\n classical_memory = (classical_memory & (~membit)) | (qubit_outcome << cmembit)\n value = bin(classical_memory)[2:]\n memory.append(hex(int(value, 2)))\n return memory", "docstring": "Generate memory samples from current statevector.\n\nArgs:\nmeasure_params (list): List of (qubit, cmembit) values for\nmeasure instructions to sample.\nnum_samples (int): The number of memory samples to generate.\n\nReturns:\nlist: A list of memory values in hex format.", "source": "juraj-google-style"} {"code": "def _PrintAnalysisStatusUpdateWindow(self, processing_status):\n if self._stdout_output_writer:\n self._ClearScreen()\n output_text = 'plaso - {0:s} version {1:s}\\n\\n'.format(self._tool_name, plaso.__version__)\n self._output_writer.Write(output_text)\n self._PrintAnalysisStatusHeader(processing_status)\n table_view = views.CLITabularTableView(column_names=['Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags', 'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])\n self._AddsAnalysisProcessStatusTableRow(processing_status.foreman_status, table_view)\n for worker_status in processing_status.workers_status:\n self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)\n table_view.Write(self._output_writer)\n self._output_writer.Write('\\n')\n if processing_status.aborted:\n self._output_writer.Write('Processing aborted - waiting for clean up.\\n\\n')\n if self._stdout_output_writer:\n sys.stdout.flush()", "docstring": "Prints an analysis status update in window mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "codesearchnet"} {"code": "def bel_edges(self, nanopub: Mapping[(str, Any)], namespace_targets: Mapping[(str, List[str])]={}, rules: List[str]=[], orthologize_target: str=None) -> List[Mapping[(str, Any)]]:\n edges = bel.edge.edges.create_edges(nanopub, self.endpoint, namespace_targets=namespace_targets, rules=rules, orthologize_target=orthologize_target)\n return edges", "docstring": "Create BEL Edges from BEL nanopub\n\nArgs:\nnanopub (Mapping[str, Any]): bel nanopub\nnamespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize\nrules (List[str]): which computed edge rules to process, default is all,\nlook at BEL Specification yaml file for computed edge signature keys,\ne.g. degradation, if any rule in list is 'skip', then skip computing edges\njust return primary_edge\northologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize\n\nReturns:\nList[Mapping[str, Any]]: edge list with edge attributes (e.g. context)", "source": "codesearchnet"} {"code": "def GetRootKey(self):\n root_registry_key = virtual.VirtualWinRegistryKey('')\n for mapped_key in self._MAPPED_KEYS:\n key_path_segments = key_paths.SplitKeyPath(mapped_key)\n if (not key_path_segments):\n continue\n registry_key = root_registry_key\n for name in key_path_segments[:(- 1)]:\n sub_registry_key = registry_key.GetSubkeyByName(name)\n if (not sub_registry_key):\n sub_registry_key = virtual.VirtualWinRegistryKey(name)\n registry_key.AddSubkey(sub_registry_key)\n registry_key = sub_registry_key\n sub_registry_key = registry_key.GetSubkeyByName(key_path_segments[(- 1)])\n if ((not sub_registry_key) and isinstance(registry_key, virtual.VirtualWinRegistryKey)):\n sub_registry_key = virtual.VirtualWinRegistryKey(key_path_segments[(- 1)], registry=self)\n registry_key.AddSubkey(sub_registry_key)\n return root_registry_key", "docstring": "Retrieves the Windows Registry root key.\n\nReturns:\nWinRegistryKey: Windows Registry root key.\n\nRaises:\nRuntimeError: if there are multiple matching mappings and\nthe correct mapping cannot be resolved.", "source": "codesearchnet"} {"code": "def update(self, force=False):\n if (self.is_404 and (not force)):\n return 0\n if self._last_modified:\n headers = {'If-Modified-Since': self._last_modified}\n else:\n headers = None\n try:\n res = self._board._requests_session.get(self._api_url, headers=headers)\n except:\n return 0\n if (res.status_code == 304):\n return 0\n elif (res.status_code == 404):\n self.is_404 = True\n self._board._thread_cache.pop(self.id, None)\n return 0\n elif (res.status_code == 200):\n if self.is_404:\n self.is_404 = False\n self._board._thread_cache[self.id] = self\n self.want_update = False\n self.omitted_images = 0\n self.omitted_posts = 0\n self._last_modified = res.headers['Last-Modified']\n posts = res.json()['posts']\n original_post_count = len(self.replies)\n self.topic = Post(self, posts[0])\n if (self.last_reply_id and (not force)):\n self.replies.extend((Post(self, p) for p in posts if (p['no'] > self.last_reply_id)))\n else:\n self.replies[:] = [Post(self, p) for p in posts[1:]]\n new_post_count = len(self.replies)\n post_count_delta = (new_post_count - original_post_count)\n if (not post_count_delta):\n return 0\n self.last_reply_id = self.replies[(- 1)].post_number\n return post_count_delta\n else:\n res.raise_for_status()", "docstring": "Fetch new posts from the server.\n\nArguments:\nforce (bool): Force a thread update, even if thread has 404'd.\n\nReturns:\nint: How many new posts have been fetched.", "source": "codesearchnet"} {"code": "def unwrap_aliases(data_type):\n \n unwrapped_alias = False\n while is_alias(data_type):\n unwrapped_alias = True\n data_type = data_type.data_type\n return data_type, unwrapped_alias", "docstring": "Convenience method to unwrap all Alias(es) from around a DataType.\n\nArgs:\ndata_type (DataType): The target to unwrap.\n\nReturn:\nTuple[DataType, bool]: The underlying data type and a bool indicating\nwhether the input type had at least one alias layer.", "source": "juraj-google-style"} {"code": "def Matches(self, file_entry):\n if (not self._filters):\n return True\n results = []\n for file_entry_filter in self._filters:\n result = file_entry_filter.Matches(file_entry)\n results.append(result)\n return ((True in results) or (False not in results))", "docstring": "Compares the file entry against the filter collection.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to compare.\n\nReturns:\nbool: True if the file entry matches one of the filters. If no filters\nare provided or applicable the result will be True.", "source": "codesearchnet"} {"code": "def GetFileObjectByPathSpec(self, path_spec):\n file_entry = self.GetFileEntryByPathSpec(path_spec)\n if (not file_entry):\n return None\n return file_entry.GetFileObject()", "docstring": "Retrieves a file-like object for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nFileIO: a file-like object or None if not available.", "source": "codesearchnet"} {"code": "def resolve_variables(variables, context, provider):\n \n for variable in variables:\n variable.resolve(context, provider)", "docstring": "Given a list of variables, resolve all of them.\n\nArgs:\nvariables (list of :class:`stacker.variables.Variable`): list of\nvariables\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of the\nbase provider", "source": "juraj-google-style"} {"code": "def write(self, symbol, data):\n cursor = self._collection.find()\n for res in cursor:\n library = self._arctic_lib.arctic[res['library_name']]\n dslice = self._slice(data, to_dt(res['start'], mktz('UTC')), to_dt(res['end'], mktz('UTC')))\n if (len(dslice) != 0):\n library.write(symbol, dslice)", "docstring": "Split the tick data to the underlying collections and write the data to each low\nlevel library.\n\nArgs:\nsymbol (str): the symbol for the timeseries data\ndata (list of dicts or pandas dataframe): Tick data to write\nif a list of dicts is given the list must be in time order and the time must be stored in\nan element named 'index' the value of which must be a timezone aware datetime.\nFor a pandas dataframe the index must be a datetime", "source": "codesearchnet"} {"code": "def to_utc_datetime(self, has_tz: bool=False) -> datetime.datetime:\n epoch = self._epoch_datetime_utc()\n if not has_tz:\n epoch = epoch.replace(tzinfo=None)\n return epoch + datetime.timedelta(microseconds=self.micros)", "docstring": "Returns a ``datetime.datetime`` object of UTC for this Timestamp.\n\nNote that this method returns a ``datetime.datetime`` object without a\ntimezone info by default, as builtin `datetime.datetime.utcnow` method. If\nthis is used as part of the processed data, one should set has_tz=True to\navoid offset due to default timezone mismatch.\n\nArgs:\nhas_tz: whether the timezone info is attached, default to False.\n\nReturns:\na ``datetime.datetime`` object of UTC for this Timestamp.", "source": "github-repos"} {"code": "def SetKeyPathPrefix(self, key_path_prefix):\n self._key_path_prefix = key_path_prefix\n self._key_path_prefix_length = len(key_path_prefix)\n self._key_path_prefix_upper = key_path_prefix.upper()", "docstring": "Sets the Window Registry key path prefix.\n\nArgs:\nkey_path_prefix (str): Windows Registry key path prefix.", "source": "codesearchnet"} {"code": "class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor):\n\n def __init__(self, force_token_map):\n force_token_map = dict(force_token_map)\n force_token_array = jnp.ones(max(force_token_map.keys()) + 1, dtype=jnp.int32) * -1\n for index, token in force_token_map.items():\n if token is not None:\n force_token_array = force_token_array.at[index].set(token)\n self.force_token_array = jnp.int32(force_token_array)\n\n def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n\n def _force_token(generation_idx):\n batch_size = scores.shape[0]\n current_token = self.force_token_array[generation_idx]\n new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float('inf')\n updates = jnp.zeros((batch_size, 1), dtype=scores.dtype)\n new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token))\n return new_scores\n scores = lax.cond(cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(self.force_token_array[cur_len] >= 0, lambda: _force_token(cur_len), lambda: scores))\n return scores", "docstring": "[`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to\ntoken indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens\nto `-inf` so that they are sampled at their corresponding index.\n\nArgs:\nforce_token_map (`list`):\nMap giving token ids and indices where they will be forced to be sampled.", "source": "github-repos"} {"code": "def pad_image(self, image: np.ndarray, size: Dict[str, int], random_padding: bool=False, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n output_height, output_width = (size['height'], size['width'])\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n delta_width = output_width - input_width\n delta_height = output_height - input_height\n if random_padding:\n pad_top = np.random.randint(low=0, high=delta_height + 1)\n pad_left = np.random.randint(low=0, high=delta_width + 1)\n else:\n pad_top = delta_height \n pad_left = delta_width \n pad_bottom = delta_height - pad_top\n pad_right = delta_width - pad_left\n padding = ((pad_top, pad_bottom), (pad_left, pad_right))\n return pad(image, padding, data_format=data_format, input_data_format=input_data_format)", "docstring": "Pad the image to the specified size.\n\nArgs:\nimage (`np.ndarray`):\nThe image to be padded.\nsize (`Dict[str, int]`):\nThe size `{\"height\": h, \"width\": w}` to pad the image to.\nrandom_padding (`bool`, *optional*, defaults to `False`):\nWhether to use random padding or not.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe data format of the output image. If unset, the same format as the input image is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"} {"code": "def mcast_ip_mask(ip_addr_and_mask, return_tuple=True):\n \n regex_mcast_ip_and_mask = __re.compile(\"^(((2[2-3][4-9])|(23[0-3]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2][0-9])|[3-9]))$\")\n if return_tuple:\n while not regex_mcast_ip_and_mask.match(ip_addr_and_mask):\n print(\"Not a good multicast IP and CIDR mask combo.\")\n print(\"Please try again.\")\n ip_addr_and_mask = input(\"Please enter a multicast IP address and mask in the follwing format x.x.x.x/x: \")\n ip_cidr_split = ip_addr_and_mask.split(\"/\")\n ip_addr = ip_cidr_split[0]\n cidr = ip_cidr_split[1]\n return ip_addr, cidr\n elif not return_tuple:\n if not regex_mcast_ip_and_mask.match(ip_addr_and_mask):\n return False\n else:\n return True", "docstring": "Function to check if a address is multicast and that the CIDR mask is good\nArgs:\nip_addr_and_mask: Multicast IP address and mask in the following format 239.1.1.1/24\nreturn_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False\n\nReturns: see return_tuple for return options", "source": "juraj-google-style"} {"code": "def _ParseBooleanValue(self, byte_stream):\n if (byte_stream == b'\\x00'):\n return False\n if (byte_stream == b'\\x01'):\n return True\n raise errors.ParseError('Unsupported boolean value.')", "docstring": "Parses a boolean value.\n\nArgs:\nbyte_stream (bytes): byte stream.\n\nReturns:\nbool: boolean value.\n\nRaises:\nParseError: when the boolean value cannot be parsed.", "source": "codesearchnet"} {"code": "def threat(self, name, owner=None, **kwargs):\n \n return Threat(self.tcex, name, owner=owner, **kwargs)", "docstring": "Create the Threat TI object.\n\nArgs:\nowner:\nname:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"} {"code": "def sspro_results(self):\n return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro)", "docstring": "Parse the SSpro output file and return a dict of secondary structure compositions.\n\nReturns:\ndict: Keys are sequence IDs, values are the lists of secondary structure predictions.\nH: helix\nE: strand\nC: the rest", "source": "codesearchnet"} {"code": "def put_path(self, url, path):\n cache_path = self._url_to_path(url)\n try:\n dir = os.path.dirname(cache_path)\n os.makedirs(dir)\n except OSError as e:\n if (e.errno != errno.EEXIST):\n raise Error(('Failed to create cache directories for ' % cache_path))\n try:\n os.unlink(cache_path)\n except OSError:\n pass\n try:\n os.link(path, cache_path)\n except OSError:\n try:\n shutil.copyfile(path, cache_path)\n except IOError:\n raise Error(('Failed to cache %s as %s for %s' % (path, cache_path, url)))", "docstring": "Puts a resource already on disk into the disk cache.\n\nArgs:\nurl: The original url of the resource\npath: The resource already available on disk\n\nRaises:\nCacheError: If the file cannot be put in cache", "source": "codesearchnet"} {"code": "def is_apk_installed(device: AndroidDevice, package_name: str) -> bool:\n try:\n out = device.adb.shell(['pm', 'list', 'package'])\n return bool(utils.grep('^package:%s$' % package_name, out))\n except adb.AdbError as error:\n raise errors.DeviceError(device, error)", "docstring": "Check if the given apk is already installed.\n\nArgs:\ndevice: AndroidDevice, Mobly's Android controller object.\npackage_name: str, name of the package.\n\nReturns:\nTrue if package is installed. False otherwise.", "source": "github-repos"} {"code": "def run_inference(self, batch: Sequence[pandas.DataFrame], model: BaseEstimator, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n for dataframe in iter(batch):\n if dataframe.shape[0] != 1:\n raise ValueError('Only dataframes with single rows are supported.')\n predictions, splits = self._model_inference_fn(model, batch, inference_args)\n return utils._convert_to_result(splits, predictions, model_id=self._model_uri)", "docstring": "Runs inferences on a batch of pandas dataframes.\n\nArgs:\nbatch: A sequence of examples as numpy arrays. They should\nbe single examples.\nmodel: A dataframe model or pipeline. Must implement predict(X).\nWhere the parameter X is a pandas dataframe.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"} {"code": "def config_cmd_handler(conf, config='config'):\n if (conf[config].create or conf[config].update):\n conf.create_config_(update=conf[config].update)\n if conf[config].create_local:\n conf.create_config_(index=(- 1), update=conf[config].update)\n if conf[config].edit:\n if (not conf.config_files_[0].is_file()):\n conf.create_config_(update=conf[config].update)\n subprocess.call(shlex.split('{} {}'.format(conf[config].editor, conf.config_files_[0])))", "docstring": "Implement the behavior of a subcmd using config_conf_section\n\nArgs:\nconf (:class:`~loam.manager.ConfigurationManager`): it should contain a\nsection created with :func:`config_conf_section` function.\nconfig (str): name of the configuration section created with\n:func:`config_conf_section` function.", "source": "codesearchnet"} {"code": "def AddArguments(cls, argument_group):\n \n shared_4n6time_output.Shared4n6TimeOutputArgumentsHelper.AddArguments(\n argument_group)\n MySQL4n6TimeDatabaseArgumentsHelper.AddArguments(argument_group)", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"} {"code": "def get_kwdefaults(func, parse_source=False):\n r\n \n \n argspec = inspect.getargspec(func)\n kwdefaults = {}\n if argspec.args is None or argspec.defaults is None:\n pass\n else:\n args = argspec.args\n defaults = argspec.defaults\n \n kwpos = len(args) - len(defaults)\n kwdefaults = OrderedDict(zip(args[kwpos:], defaults))\n if parse_source and argspec.keywords:\n \n keyword_defaults = parse_func_kwarg_keys(func, with_vals=True)\n for key, val in keyword_defaults:\n assert key not in kwdefaults, 'parsing error'\n kwdefaults[key] = val\n return kwdefaults", "docstring": "r\"\"\"\nArgs:\nfunc (func):\n\nReturns:\ndict:\n\nCommandLine:\npython -m utool.util_inspect get_kwdefaults\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_inspect import * # NOQA\n>>> import utool as ut\n>>> func = dummy_func\n>>> parse_source = True\n>>> kwdefaults = get_kwdefaults(func, parse_source)\n>>> print('kwdefaults = %s' % (ut.repr4(kwdefaults),))", "source": "juraj-google-style"} {"code": "def write_config_files(self, host, hyperparameters, input_data_config):\n config_path = os.path.join(self.container_root, host, 'input', 'config')\n resource_config = {'current_host': host, 'hosts': self.hosts}\n json_input_data_config = {}\n for c in input_data_config:\n channel_name = c['ChannelName']\n json_input_data_config[channel_name] = {'TrainingInputMode': 'File'}\n if ('ContentType' in c):\n json_input_data_config[channel_name]['ContentType'] = c['ContentType']\n _write_json_file(os.path.join(config_path, 'hyperparameters.json'), hyperparameters)\n _write_json_file(os.path.join(config_path, 'resourceconfig.json'), resource_config)\n _write_json_file(os.path.join(config_path, 'inputdataconfig.json'), json_input_data_config)", "docstring": "Write the config files for the training containers.\n\nThis method writes the hyperparameters, resources and input data configuration files.\n\nArgs:\nhost (str): Host to write the configuration for\nhyperparameters (dict): Hyperparameters for training.\ninput_data_config (dict): Training input channels to be used for training.\n\nReturns: None", "source": "codesearchnet"} {"code": "def get_vcf_entry(variant_obj, case_id=None):\n \n if variant_obj['category'] == 'snv':\n var_type = 'TYPE'\n else:\n var_type = 'SVTYPE'\n\n info_field = ';'.join(\n [\n 'END='+str(variant_obj['end']),\n var_type+'='+variant_obj['sub_category'].upper()\n ]\n )\n\n variant_string = \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\".format(\n variant_obj['chromosome'],\n variant_obj['position'],\n variant_obj['dbsnp_id'],\n variant_obj['reference'],\n variant_obj['alternative'],\n variant_obj['quality'],\n ';'.join(variant_obj['filters']),\n info_field\n )\n\n if case_id:\n variant_string += \"\\tGT\"\n for sample in variant_obj['samples']:\n variant_string += \"\\t\" + sample['genotype_call']\n\n return variant_string", "docstring": "Get vcf entry from variant object\n\nArgs:\nvariant_obj(dict)\nReturns:\nvariant_string(str): string representing variant in vcf format", "source": "juraj-google-style"} {"code": "def _stringify_path(path_or_buffer):\n \n\n try:\n import pathlib\n _PATHLIB_INSTALLED = True\n except ImportError:\n _PATHLIB_INSTALLED = False\n\n if hasattr(path_or_buffer, '__fspath__'):\n return path_or_buffer.__fspath__()\n\n if _PATHLIB_INSTALLED and isinstance(path_or_buffer, pathlib.Path):\n return text_type(path_or_buffer)\n\n return path_or_buffer", "docstring": "Convert path like object to string\n\nArgs:\npath_or_buffer: object to be converted\n\nReturns:\nstring_path_or_buffer: maybe string version of path_or_buffer", "source": "juraj-google-style"} {"code": "def get_max_id(cls, session):\n id_base = None\n for c in ([cls] + list(cls.__bases__)):\n for base_class in c.__bases__:\n if (base_class.__name__ == 'Base'):\n if (id_base is None):\n id_base = c\n else:\n raise RuntimeError(('Multiple base object classes for class ' + cls.__name__))\n if (id_base is None):\n raise RuntimeError(('Error searching for base class of ' + cls.__name__))\n max_id = session.query(func.max(id_base.id)).scalar()\n if (max_id is None):\n max_id = 0\n return max_id", "docstring": "Get the current max value of the ``id`` column.\n\nWhen creating and storing ORM objects in bulk, :mod:`sqlalchemy` does not automatically\ngenerate an incrementing primary key ``id``. To do this manually, one needs to know the\ncurrent max ``id``. For ORM object classes that are derived from other ORM object classes,\nthe max ``id`` of the lowest base class is returned. This is designed to be used with\ninheritance by joining, in which derived and base class objects have identical ``id`` values.\n\nArgs:\nsession: database session to operate in", "source": "codesearchnet"} {"code": "def property_get(self, callself: 'cfg.Variable', is_class: bool=False) -> 'BaseValue':\n del callself, is_class\n return self", "docstring": "Bind this value to the given self or cls.\n\nThis function is similar to __get__ except at the abstract level. This does\nnot trigger any code execution inside the VM. See __get__ for more details.\n\nArgs:\ncallself: The Variable that should be passed as self or cls when the call\nis made. We only need one of self or cls, so having them share a\nparameter prevents accidentally passing in both.\nis_class: Whether callself is self or cls. Should be cls only when we want\nto directly pass in a class to bind a class method to, rather than\npassing in an instance and calling get_class().\n\nReturns:\nAnother abstract value that should be returned in place of this one. The\ndefault implementation returns self, so this can always be called safely.", "source": "github-repos"} {"code": "def gather(weights, indices, dim, output_shape=None):\n \n dim = convert_to_dimension(dim)\n output_shape = convert_to_shape(output_shape)\n if weights.dtype == tf.bool:\n return cast(gather(to_float(weights), indices, dim, output_shape), tf.bool)\n return einsum([one_hot(indices, dim, dtype=weights.dtype), weights],\n reduced_dims=[dim], output_shape=output_shape)", "docstring": "Shorthand for einsum([one_hot(indices, dim)], weights, reduced_dims=[dim]).\n\nArgs:\nweights: a Tensor\nindices: a Tensor with integer type\ndim: a Dimension\noutput_shape: an optional mtf.Shape\nReturns:\na Tensor", "source": "juraj-google-style"} {"code": "def __init__(self, enum_values=None, case_sensitive=True):\n \n super(EnumParser, self).__init__()\n self.enum_values = enum_values\n self.case_sensitive = case_sensitive", "docstring": "Initialize EnumParser.\n\nArgs:\nenum_values: Array of values in the enum.\ncase_sensitive: Whether or not the enum is to be case-sensitive.", "source": "juraj-google-style"} {"code": "def GetBlockByHeight(self, height):\n \n hash = self.GetBlockHash(height)\n if hash is not None:\n return self.GetBlockByHash(hash)", "docstring": "Get a block by its height.\nArgs:\nheight(int): the height of the block to retrieve.\n\nReturns:\nneo.Core.Block: block instance.", "source": "juraj-google-style"} {"code": "def clear_executor_errors(self):\n if self._context_handle:\n pywrap_tfe.TFE_ContextClearExecutors(self._context_handle)\n else:\n raise ValueError('Context is not initialized.')", "docstring": "Clear errors in both local executors and remote workers.\n\nAfter receiving errors from remote workers, additional requests on the fly\ncould further taint the status on the remote workers due to the async nature\nof remote execution. Calling this method block on waiting for all pending\nnodes in remote executors to finish and clear their error statuses.\n\nRaises:\nValueError: if context is not initialized.", "source": "github-repos"} {"code": "def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS):\n for flag_name in flag_names:\n mark_flag_as_required(flag_name, flag_values)", "docstring": "Ensures that flags are not None during program execution.\n\nRecommended usage:\n\nif __name__ == '__main__':\nflags.mark_flags_as_required(['flag1', 'flag2', 'flag3'])\napp.run()\n\nArgs:\nflag_names: Sequence[str], names of the flags.\nflag_values: flags.FlagValues, optional FlagValues instance where the flags\nare defined.\nRaises:\nAttributeError: If any of flag name has not already been defined as a flag.", "source": "codesearchnet"} {"code": "def load_model(itos_filename, classifier_filename, num_classes):\n itos = pickle.load(Path(itos_filename).open('rb'))\n stoi = collections.defaultdict((lambda : 0), {str(v): int(k) for (k, v) in enumerate(itos)})\n (bptt, em_sz, nh, nl) = (70, 400, 1150, 3)\n dps = (np.array([0.4, 0.5, 0.05, 0.3, 0.4]) * 0.5)\n vs = len(itos)\n model = get_rnn_classifer(bptt, (20 * 70), num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1, layers=[(em_sz * 3), 50, num_classes], drops=[dps[4], 0.1], dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])\n model.load_state_dict(torch.load(classifier_filename, map_location=(lambda storage, loc: storage)))\n model.reset()\n model.eval()\n return (stoi, model)", "docstring": "Load the classifier and int to string mapping\n\nArgs:\nitos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)\nclassifier_filename (str): The filename of the trained classifier\n\nReturns:\nstring to int mapping, trained classifer model", "source": "codesearchnet"} {"code": "def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:\n max_rel_dist = int(2 * max(q_size, k_size) - 1)\n rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')\n rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n return rel_pos_resized[relative_coords.long()]", "docstring": "Get relative positional embeddings according to the relative positions of\nquery and key sizes.\n\nArgs:\nq_size (int):\nsize of the query.\nk_size (int):\nsize of key k.\nrel_pos (`torch.Tensor`):\nrelative position embeddings (L, channel).\n\nReturns:\nExtracted positional embeddings according to relative positions.", "source": "github-repos"} {"code": "def get_transformation(self, struct1, struct2):\n if self._primitive_cell:\n raise ValueError('get_transformation cannot be used with the primitive cell option')\n (struct1, struct2) = self._process_species((struct1, struct2))\n (s1, s2, fu, s1_supercell) = self._preprocess(struct1, struct2, False)\n ratio = (fu if s1_supercell else (1 / fu))\n if (s1_supercell and (fu > 1)):\n raise ValueError('Struct1 must be the supercell, not the other way around')\n if ((len(s1) * ratio) >= len(s2)):\n match = self._strict_match(s1, s2, fu=fu, s1_supercell=False, use_rms=True, break_on_match=False)\n if (match is None):\n return None\n mapping = [(list(match[4]).index(i) if (i in match[4]) else None) for i in range(len(s1))]\n return (match[2], match[3], mapping)\n else:\n match = self._strict_match(s2, s1, fu=fu, s1_supercell=True, use_rms=True, break_on_match=False)\n if (match is None):\n return None\n not_included = list(range((len(s2) * fu)))\n for i in match[4]:\n not_included.remove(i)\n mapping = (list(match[4]) + not_included)\n return (match[2], (- match[3]), mapping)", "docstring": "Returns the supercell transformation, fractional translation vector,\nand a mapping to transform struct2 to be similar to struct1.\n\nArgs:\nstruct1 (Structure): Reference structure\nstruct2 (Structure): Structure to transform.\n\nReturns:\nsupercell (numpy.ndarray(3, 3)): supercell matrix\nvector (numpy.ndarray(3)): fractional translation vector\nmapping (list(int or None)):\nThe first len(struct1) items of the mapping vector are the\nindices of struct1's corresponding sites in struct2 (or None\nif there is no corresponding site), and the other items are\nthe remaining site indices of struct2.", "source": "codesearchnet"} {"code": "def serialize_example(transformed_json_data, features, feature_indices, target_name):\n import six\n import tensorflow as tf\n from trainer import feature_transforms\n line = str(transformed_json_data[target_name][0])\n for (name, info) in feature_indices:\n if (features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM, feature_transforms.SCALE_TRANSFORM]):\n line += (' %d:%s' % (info['index_start'], str(transformed_json_data[name][0])))\n elif (features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM, feature_transforms.MULTI_HOT_TRANSFORM]):\n for i in range(info['size']):\n if (i in transformed_json_data[name]):\n line += (' %d:1' % (info['index_start'] + i))\n elif (features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]):\n for i in range(info['size']):\n line += (' %d:%s' % ((info['index_start'] + i), str(transformed_json_data[name][i])))\n return line", "docstring": "Makes an instance of data in libsvm format.\n\nArgs:\ntransformed_json_data: dict of transformed data.\nfeatures: features config.\nfeature_indices: output of feature_transforms.get_transformed_feature_indices()\n\nReturns:\nThe text line representation of an instance in libsvm format.", "source": "codesearchnet"} {"code": "def make_input_feature_spec(include_label=True):\n result = {}\n if include_label:\n result['clicked'] = tf.io.FixedLenFeature(shape=[], dtype=tf.int64)\n for name in _INTEGER_COLUMN_NAMES:\n result[name] = tf.io.VarLenFeature(dtype=tf.int64)\n for name in _CATEGORICAL_COLUMN_NAMES:\n result[name] = tf.io.VarLenFeature(dtype=tf.string)\n return result", "docstring": "Input schema definition.\n\nArgs:\ninclude_label: Indicates whether the label feature should be included.\n\nReturns:\nA `Schema` object.", "source": "github-repos"} {"code": "def _add_step(self, step):\n self._closed()\n self.has_workflow_step = (self.has_workflow_step or step.is_workflow)\n self.wf_steps[step.name_in_workflow] = step", "docstring": "Add a step to the workflow.\n\nArgs:\nstep (Step): a step from the steps library.", "source": "codesearchnet"} {"code": "def inspect_file(path):\n with open(path, 'rb') as f:\n (labels, count) = inspect((tx.decode(line) for line in f))\n return (labels, count)", "docstring": "Inspect SDFile structure\n\nReturns:\ntuple: (data label list, number of records)", "source": "codesearchnet"} {"code": "def sign_hash(private_key, hash, hash_algo):\n \n hash_algo = _hash_algorithms[hash_algo]\n return get_privatekey(private_key).sign(\n hash,\n padding.PKCS1v15(),\n utils.Prehashed(hash_algo),\n )", "docstring": "Sign the given hash with the given private key.\n\nArgs:\nprivate_key (str): PEM enoded private key\nhash (byte str): hash to sign\nhash_algo (str): name of hash algorithm used\n\nReturns:\nbyte string representing the signature", "source": "juraj-google-style"} {"code": "def process_one_file(options):\n log.info('Process %s => %s', options.input, options.output)\n try:\n ret = check_or_generate_pyi(options)\n except utils.UsageError:\n logging.exception('')\n return 1\n if not options.check:\n if options.pickle_output:\n pyi_output = options.verify_pickle\n else:\n pyi_output = options.output\n if pyi_output:\n _write_pyi_output(options, ret.pyi, pyi_output)\n if options.pickle_output:\n log.info('write pickle %r => %r', options.input, options.output)\n write_pickle(ret.ast, options, ret.context.loader)\n if options.unused_imports_info_files:\n if options.use_rewrite:\n pass\n else:\n cwd = os.getcwd()\n unused_paths = sorted(ret.context.loader.get_unused_imports_map_paths())\n with options.open_function(options.unused_imports_info_files, 'wt', encoding='utf-8') as f:\n for unused_path in unused_paths:\n f.write(f'{os.path.relpath(unused_path, cwd)}\\n')\n exit_status = handle_errors(ret.context.errorlog, options)\n ret.context.program = None\n if options.touch and (not exit_status):\n with options.open_function(options.touch, 'a'):\n os.utime(options.touch, None)\n return exit_status", "docstring": "Check a .py file or generate a .pyi for it, according to options.\n\nArgs:\noptions: config.Options object.\n\nReturns:\nAn error code (0 means no error).", "source": "github-repos"} {"code": "def get_errors(self):\n errors = []\n errors.extend(self._get_signature_errors())\n errors.extend(self._get_additional_errors())\n errors.extend(self._get_entry_errors())\n return (errors if errors else None)", "docstring": "Verify that this MAR file is well formed.\n\nReturns:\nA list of strings describing errors in the MAR file\nNone if this MAR file appears well formed.", "source": "codesearchnet"} {"code": "def from_dict(vpc_config, do_sanitize=False):\n if do_sanitize:\n vpc_config = sanitize(vpc_config)\n if (vpc_config is None):\n return (None, None)\n return (vpc_config[SUBNETS_KEY], vpc_config[SECURITY_GROUP_IDS_KEY])", "docstring": "Extracts subnets and security group ids as lists from a VpcConfig dict\n\nArgs:\nvpc_config (dict): a VpcConfig dict containing 'Subnets' and 'SecurityGroupIds'\ndo_sanitize (bool): whether to sanitize the VpcConfig dict before extracting values\n\nReturns:\nTuple of lists as (subnets, security_group_ids)\nIf vpc_config parameter is None, returns (None, None)\n\nRaises:\nValueError if sanitize enabled and vpc_config is invalid\nKeyError if sanitize disabled and vpc_config is missing key(s)", "source": "codesearchnet"} {"code": "def __call__(self,\n state: Sequence[tf.Tensor],\n timestep: tf.Tensor) -> Sequence[tf.Tensor]:\n \n raise NotImplementedError", "docstring": "Returns action fluents for the current `state` and `timestep`.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\ntimestep (tf.Tensor): The current timestep.\n\nReturns:\nSequence[tf.Tensor]: A tuple of action fluents.", "source": "juraj-google-style"} {"code": "def compute_output_shape(self, input_shape):\n \n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_last':\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = tf_layers_util.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return tf.TensorShape([input_shape[0]] + new_space + [self.filters])\n else:\n space = input_shape[2:]\n new_space = []\n for i in range(len(space)):\n new_dim = tf_layers_util.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return tf.TensorShape([input_shape[0], self.filters] + new_space)", "docstring": "Computes the output shape of the layer.\n\nArgs:\ninput_shape: Shape tuple (tuple of integers) or list of shape tuples\n(one per output tensor of the layer). Shape tuples can include None for\nfree dimensions, instead of an integer.\n\nReturns:\noutput_shape: A tuple representing the output shape.", "source": "juraj-google-style"} {"code": "def read_data_to_asp(file: str) -> List[str]:\n \n if file.endswith(\".json\"):\n with open(file) as f:\n data = json.load(f)\n return schema2asp(data2schema(data))\n elif file.endswith(\".csv\"):\n df = pd.read_csv(file)\n df = df.where((pd.notnull(df)), None)\n data = list(df.T.to_dict().values())\n schema = data2schema(data)\n asp = schema2asp(schema)\n return asp\n else:\n raise Exception(\"invalid file type\")", "docstring": "Reads the given JSON file and generates the ASP definition.\nArgs:\nfile: the json data file\nReturns:\nthe asp definition.", "source": "juraj-google-style"} {"code": "def orient_averaged_fixed(tm):\n \n S = np.zeros((2,2), dtype=complex)\n Z = np.zeros((4,4))\n ap = np.linspace(0, 360, tm.n_alpha+1)[:-1]\n aw = 1.0/tm.n_alpha\n\n for alpha in ap:\n for (beta, w) in zip(tm.beta_p, tm.beta_w):\n (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)\n S += w * S_ang\n Z += w * Z_ang\n\n sw = tm.beta_w.sum()\n \n S *= aw/sw\n Z *= aw/sw\n\n return (S, Z)", "docstring": "Compute the T-matrix using variable orientation scatterers.\n\nThis method uses a fast Gaussian quadrature and is suitable\nfor most use. Uses the set particle orientation PDF, ignoring\nthe alpha and beta attributes.\n\nArgs:\ntm: TMatrix (or descendant) instance.\n\nReturns:\nThe amplitude (S) and phase (Z) matrices.", "source": "juraj-google-style"} {"code": "def __init__(self, options):\n \n\n self.queue = Queue(options)\n self.routing = Routing(options)\n self.__options = options\n self.__should_spawn_new_requests = False\n self.__should_stop = False\n self.__stopping = False\n self.__stopped = False\n self.__threads = {}\n self.__lock = threading.Lock()\n\n signal.signal(signal.SIGINT, self.__signal_handler)\n DebugHelper.setup(self.__options)", "docstring": "Constructs a Crawler instance.\n\nArgs:\noptions (:class:`nyawc.Options`): The options to use for the current crawling runtime.", "source": "juraj-google-style"} {"code": "def MultiHeadedAttentionQKV(\n feature_depth, num_heads=8, dropout=0.0, mode='train'):\n \n return combinators.Serial(\n combinators.Parallel(\n combinators.Parallel(\n core.Dense(feature_depth),\n core.Dense(feature_depth),\n core.Dense(feature_depth),\n ),\n combinators.Identity()\n ),\n PureMultiHeadedAttention( \n feature_depth=feature_depth, num_heads=num_heads,\n dropout=dropout, mode=mode),\n core.Dense(feature_depth),\n )", "docstring": "Transformer-style multi-headed attention.\n\nAccepts inputs of the form (q, k, v), mask.\n\nArgs:\nfeature_depth: int: depth of embedding\nnum_heads: int: number of attention heads\ndropout: float: dropout rate\nmode: str: 'train' or 'eval'\n\nReturns:\nMulti-headed self-attention layer.", "source": "juraj-google-style"} {"code": "def add_update_resource_views(self, resource_views):\n \n \n if not isinstance(resource_views, list):\n raise HDXError('ResourceViews should be a list!')\n for resource_view in resource_views:\n self.add_update_resource_view(resource_view)", "docstring": "Add new or update existing resource views in resource with new metadata.\n\nArgs:\nresource_views (List[Union[ResourceView,Dict]]): A list of resource views metadata from ResourceView objects or dictionaries\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def FromEvent(cls, service_event):\n \n _, _, name = service_event.key_path.rpartition(\n WindowsService._REGISTRY_KEY_PATH_SEPARATOR)\n service_type = service_event.regvalue.get('Type', '')\n image_path = service_event.regvalue.get('ImagePath', '')\n start_type = service_event.regvalue.get('Start', '')\n service_dll = service_event.regvalue.get('ServiceDll', '')\n object_name = service_event.regvalue.get('ObjectName', '')\n\n if service_event.pathspec:\n source = (service_event.pathspec.location, service_event.key_path)\n else:\n source = ('Unknown', 'Unknown')\n return cls(\n name=name, service_type=service_type, image_path=image_path,\n start_type=start_type, object_name=object_name,\n source=source, service_dll=service_dll)", "docstring": "Creates a service object from an event.\n\nArgs:\nservice_event (EventObject): event to create a new service object from.\n\nReturns:\nWindowsService: service.", "source": "juraj-google-style"} {"code": "def fetch(version='bayestar2017'):\n doi = {'bayestar2015': '10.7910/DVN/40C44C', 'bayestar2017': '10.7910/DVN/LCYHJG'}\n try:\n doi = doi[version]\n except KeyError as err:\n raise ValueError('Version \"{}\" does not exist. Valid versions are: {}'.format(version, ', '.join(['\"{}\"'.format(k) for k in doi.keys()])))\n requirements = {'bayestar2015': {'contentType': 'application/x-hdf'}, 'bayestar2017': {'filename': 'bayestar2017.h5'}}[version]\n local_fname = os.path.join(data_dir(), 'bayestar', '{}.h5'.format(version))\n fetch_utils.dataverse_download_doi(doi, local_fname, file_requirements=requirements)", "docstring": "Downloads the specified version of the Bayestar dust map.\n\nArgs:\nversion (Optional[:obj:`str`]): The map version to download. Valid versions are\n:obj:`'bayestar2017'` (Green, Schlafly, Finkbeiner et al. 2018) and\n:obj:`'bayestar2015'` (Green, Schlafly, Finkbeiner et al. 2015). Defaults\nto :obj:`'bayestar2017'`.\n\nRaises:\n:obj:`ValueError`: The requested version of the map does not exist.\n\n:obj:`DownloadError`: Either no matching file was found under the given DOI, or\nthe MD5 sum of the file was not as expected.\n\n:obj:`requests.exceptions.HTTPError`: The given DOI does not exist, or there\nwas a problem connecting to the Dataverse.", "source": "codesearchnet"} {"code": "def _FetchServerCertificate(self):\n if self.server_certificate:\n return True\n response = self.http_manager.OpenServerEndpoint('server.pem', verify_cb=self.VerifyServerPEM)\n if response.Success():\n self.server_certificate = response.data\n return True\n self.timer.SlowPoll()\n return False", "docstring": "Attempts to fetch the server cert.\n\nReturns:\nTrue if we succeed.", "source": "codesearchnet"} {"code": "def create_team(self, name):\n request = self._get_request()\n return request.post(self.TEAM_CREATE_URL, {'name': name})", "docstring": "Creates a new Team\n\nCreates a new Team and makes you a member. You must not currently belong to a team to invoke.\n\nArgs:\n\nname (str): The name of your team\n\nReturns:\nA Team object", "source": "codesearchnet"} {"code": "def get_integer_index(miller_index: bool, round_dp: int=4, verbose: bool=True) -> Tuple[(int, int, int)]:\n miller_index = np.asarray(miller_index)\n miller_index /= min([m for m in miller_index if (m != 0)])\n miller_index /= np.max(np.abs(miller_index))\n md = [Fraction(n).limit_denominator(12).denominator for n in miller_index]\n miller_index *= reduce((lambda x, y: (x * y)), md)\n int_miller_index = np.int_(np.round(miller_index, 1))\n miller_index /= np.abs(reduce(gcd, int_miller_index))\n miller_index = np.array([round(h, round_dp) for h in miller_index])\n int_miller_index = np.int_(np.round(miller_index, 1))\n if (np.any((np.abs((miller_index - int_miller_index)) > 1e-06)) and verbose):\n warnings.warn('Non-integer encountered in Miller index')\n else:\n miller_index = int_miller_index\n miller_index += 0\n\n def n_minus(index):\n return len([h for h in index if (h < 0)])\n if (n_minus(miller_index) > n_minus((miller_index * (- 1)))):\n miller_index *= (- 1)\n if ((sum((miller_index != 0)) == 2) and (n_minus(miller_index) == 1) and (abs(min(miller_index)) > max(miller_index))):\n miller_index *= (- 1)\n return tuple(miller_index)", "docstring": "Attempt to convert a vector of floats to whole numbers.\n\nArgs:\nmiller_index (list of float): A list miller indexes.\nround_dp (int, optional): The number of decimal places to round the\nmiller index to.\nverbose (bool, optional): Whether to print warnings.\n\nReturns:\n(tuple): The Miller index.", "source": "codesearchnet"} {"code": "def extract_distribution(monitoring_info_proto):\n if not is_distribution(monitoring_info_proto):\n raise ValueError('Unsupported type %s' % monitoring_info_proto.type)\n return _decode_distribution(coders.VarIntCoder(), monitoring_info_proto.payload)", "docstring": "Returns a tuple of (count, sum, min, max).\n\nArgs:\nproto: The monitoring info for the distribution.", "source": "github-repos"} {"code": "def export_default_instruments(target_folder, source_folder = None, raise_errors = False, verbose=True):\n \n print('export_def_instr called')\n instruments_to_load = get_classes_in_folder(source_folder, Instrument, verbose = True)\n print('instruments to load:')\n print(instruments_to_load)\n\n if verbose:\n print(('attempt to load {:d} instruments: '.format(len(instruments_to_load))))\n loaded_instruments, failed = Instrument.load_and_append(instruments_to_load, raise_errors = raise_errors)\n print('loaded instruments:')\n print(loaded_instruments, failed)\n\n for name, value in loaded_instruments.items():\n filename = os.path.join(target_folder, '{:s}.b26'.format(name))\n\n value.save_b26(filename)\n\n if verbose:\n print('\\n================================================')\n print('================================================')\n print(('saved {:d} instruments, {:d} failed'.format(len(loaded_instruments), len(failed))))\n if failed != {}:\n for error_name, error in failed.items():\n print(('failed to create instruments: ', error_name, error))", "docstring": "tries to instantiate all the instruments that are imported in /instruments/__init__.py\nand saves instruments that could be instantiate into a .b2 file in the folder path\nArgs:\ntarget_folder: target path for .b26 files", "source": "juraj-google-style"} {"code": "def account_states(self, **kwargs):\n \n path = self._get_id_path('account_states')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "This method lets users get the status of whether or not the movie has\nbeen rated or added to their favourite or watch lists. A valid session\nid is required.\n\nArgs:\nsession_id: see Authentication.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"} {"code": "def load_pos_model(lang='en', version='2'):\n src_dir = 'pos{}'.format(version)\n p = locate_resource(src_dir, lang)\n fh = _open(p)\n return dict(np.load(fh))", "docstring": "Return a part of speech tagger parameters for `lang` and of version `version`\n\nArgs:\nlang (string): language code.\nversion (string): version of the parameters to be used.", "source": "codesearchnet"} {"code": "def _parse_service(service) -> tuple[str, str]:\n if not isinstance(service, str):\n raise ValueError(f'`service` must be a string, but `service` was of type {type(service)}. service={service}')\n if not service:\n raise ValueError('`service` must not be empty')\n parts = service.split(':\n if len(parts) == 2:\n protocol, address = parts\n elif len(parts) == 1:\n address = parts[0]\n protocol = _pywrap_utils_exp.TF_DATA_DefaultProtocol()\n else:\n raise ValueError(f\"Malformed `service` string has multiple ':\n return (protocol, address)", "docstring": "Converts a tf.data service string into a (protocol, address) tuple.\n\nArgs:\nservice: A string in the format \"protocol://address\" or just \"address\". If\nthe string is only an address, the default protocol will be used.\n\nReturns:\nThe (protocol, address) tuple", "source": "github-repos"} {"code": "def pre_fetch(self, feed):\n pass", "docstring": "Pre-fetches all required items to be update into the cache.\n\nThis increases performance for update operations.\n\nArgs:\nfeed: List of feed items to retrieve", "source": "github-repos"} {"code": "def output(self, _filename):\n \n\n for contract in self.slither.contracts_derived:\n txt = \"\\nContract %s\"%contract.name\n table = PrettyTable([\"Function\",\n \"require or assert\"])\n for function in contract.functions:\n require = function.all_slithir_operations()\n require = [ir for ir in require if isinstance(ir, SolidityCall) and ir.function in require_or_assert]\n require = [ir.node for ir in require]\n table.add_row([function.name, self._convert([str(m.expression) for m in set(require)])])\n txt += \"\\n\"+str(table)\n self.info(txt)", "docstring": "_filename is not used\nArgs:\n_filename(string)", "source": "juraj-google-style"} {"code": "def find_elb(name='', env='', region=''):\n \n LOG.info('Find %s ELB in %s [%s].', name, env, region)\n\n url = '{0}/applications/{1}/loadBalancers'.format(API_URL, name)\n response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n assert response.ok\n\n elb_dns = None\n accounts = response.json()\n for account in accounts:\n if account['account'] == env and account['region'] == region:\n elb_dns = account['dnsname']\n break\n else:\n raise SpinnakerElbNotFound('Elb for \"{0}\" in region {1} not found'.format(name, region))\n\n LOG.info('Found: %s', elb_dns)\n return elb_dns", "docstring": "Get an application's AWS elb dns name.\n\nArgs:\nname (str): ELB name\nenv (str): Environment/account of ELB\nregion (str): AWS Region\n\nReturns:\nstr: elb DNS record", "source": "juraj-google-style"} {"code": "def item(self, key):\n return _item.Item(self._name, key, context=self._context)", "docstring": "Retrieves an Item object for the specified key in this bucket.\n\nThe item need not exist.\n\nArgs:\nkey: the key of the item within the bucket.\nReturns:\nAn Item instance representing the specified key.", "source": "codesearchnet"} {"code": "def transform(self, target_type: Type[T], value: F, context: PipelineContext=None) -> T:\n pass", "docstring": "Transforms an object to a new type.\n\nArgs:\ntarget_type: The type to be converted to.\nvalue: The object to be transformed.\ncontext: The context of the transformation (mutable).", "source": "codesearchnet"} {"code": "def MultiDeleteAttributes(self, subjects, attributes, start=None, end=None, sync=True):\n for subject in subjects:\n self.DeleteAttributes(subject, attributes, start=start, end=end, sync=sync)", "docstring": "Remove all specified attributes from a list of subjects.\n\nArgs:\nsubjects: The list of subjects that will have these attributes removed.\nattributes: A list of attributes.\nstart: A timestamp, attributes older than start will not be deleted.\nend: A timestamp, attributes newer than end will not be deleted.\nsync: If true we block until the operation completes.", "source": "codesearchnet"} {"code": "def _ProcessFileEntry(self, mediator, file_entry):\n display_name = mediator.GetDisplayName()\n logger.debug('[ProcessFileEntry] processing file entry: {0:s}'.format(display_name))\n reference_count = mediator.resolver_context.GetFileObjectReferenceCount(file_entry.path_spec)\n try:\n if self._IsMetadataFile(file_entry):\n self._ProcessMetadataFile(mediator, file_entry)\n else:\n file_entry_processed = False\n for data_stream in file_entry.data_streams:\n if self._abort:\n break\n if self._CanSkipDataStream(file_entry, data_stream):\n logger.debug('[ProcessFileEntry] Skipping datastream {0:s} for {1:s}: {2:s}'.format(data_stream.name, file_entry.type_indicator, display_name))\n continue\n self._ProcessFileEntryDataStream(mediator, file_entry, data_stream)\n file_entry_processed = True\n if (not file_entry_processed):\n self._ProcessFileEntryDataStream(mediator, file_entry, None)\n finally:\n new_reference_count = mediator.resolver_context.GetFileObjectReferenceCount(file_entry.path_spec)\n if (reference_count != new_reference_count):\n if mediator.resolver_context.ForceRemoveFileObject(file_entry.path_spec):\n logger.warning('File-object not explicitly closed for file: {0:s}'.format(display_name))\n logger.debug('[ProcessFileEntry] done processing file entry: {0:s}'.format(display_name))", "docstring": "Processes a file entry.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry.", "source": "codesearchnet"} {"code": "def forward(self, spectrogram: torch.FloatTensor):\n batch_size, _, seq_length = spectrogram.shape\n hidden_states = self.input_conv(spectrogram)\n hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)\n for resblock in self.resblocks:\n hidden_states = resblock(hidden_states)\n kernel_hidden_states = self.kernel_conv(hidden_states)\n bias_hidden_states = self.bias_conv(hidden_states)\n kernels = kernel_hidden_states.view(batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length).contiguous()\n biases = bias_hidden_states.view(batch_size, self.conv_layers, self.conv_out_channels, seq_length).contiguous()\n return (kernels, biases)", "docstring": "Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location\nvariable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels,\nseq_length).\n\nArgs:\nspectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`):\nTensor containing the log-mel spectrograms.\n\nReturns:\nTuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of\nlocation variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels,\nself.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of\nlocation variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels,\nseq_length)`.", "source": "github-repos"} {"code": "def find_matching_symlink(path, source):\n\n def to_abs(target):\n if os.path.isabs(target):\n return target\n else:\n return os.path.normpath(os.path.join(path, target))\n abs_source = to_abs(source)\n for name in os.listdir(path):\n linkpath = os.path.join(path, name)\n if os.path.islink:\n source_ = os.readlink(linkpath)\n if (to_abs(source_) == abs_source):\n return name\n return None", "docstring": "Find a symlink under `path` that points at `source`.\n\nIf source is relative, it is considered relative to `path`.\n\nReturns:\nstr: Name of symlink found, or None.", "source": "codesearchnet"} {"code": "def extract_storm_patches(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, patch_radius=16):\n storm_objects = []\n if (len(label_grid.shape) == 3):\n ij_grid = np.indices(label_grid.shape[1:])\n for (t, time) in enumerate(times):\n storm_objects.append([])\n centers = list(center_of_mass(data[t], labels=label_grid[t], index=np.arange(1, (label_grid[t].max() + 1))))\n if (len(centers) > 0):\n for (o, center) in enumerate(centers):\n int_center = np.round(center).astype(int)\n obj_slice_buff = [slice((int_center[0] - patch_radius), (int_center[0] + patch_radius)), slice((int_center[1] - patch_radius), (int_center[1] + patch_radius))]\n storm_objects[(- 1)].append(STObject(data[t][obj_slice_buff], np.where((label_grid[t][obj_slice_buff] == (o + 1)), 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], time, time, dx=dx, step=dt))\n if (t > 0):\n dims = storm_objects[(- 1)][(- 1)].timesteps[0].shape\n storm_objects[(- 1)][(- 1)].estimate_motion(time, data[(t - 1)], dims[1], dims[0])\n else:\n ij_grid = np.indices(label_grid.shape)\n storm_objects.append([])\n centers = list(center_of_mass(data, labels=label_grid, index=np.arange(1, (label_grid.max() + 1))))\n if (len(centers) > 0):\n for (o, center) in enumerate(centers):\n int_center = np.round(center).astype(int)\n obj_slice_buff = (slice((int_center[0] - patch_radius), (int_center[0] + patch_radius)), slice((int_center[1] - patch_radius), (int_center[1] + patch_radius)))\n storm_objects[(- 1)].append(STObject(data[obj_slice_buff], np.where((label_grid[obj_slice_buff] == (o + 1)), 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], times[0], times[0], dx=dx, step=dt))\n return storm_objects", "docstring": "After storms are labeled, this method extracts boxes of equal size centered on each storm from the grid and places\nthem into STObjects. The STObjects contain intensity, location, and shape information about each storm\nat each timestep.\n\nArgs:\nlabel_grid: 2D or 3D array output by label_storm_objects.\ndata: 2D or 3D array used as input to label_storm_objects.\nx_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length.\ny_grid: 2D array of y-coordinate data.\ntimes: List or array of time values, preferably as integers\ndx: grid spacing in same units as x_grid and y_grid.\ndt: period elapsed between times\npatch_radius: Number of grid points from center of mass to extract\n\nReturns:\nstorm_objects: list of lists containing STObjects identified at each time.", "source": "codesearchnet"} {"code": "def GetPresetsByOperatingSystem(self, operating_system):\n \n preset_definitions = []\n for preset_definition in self._definitions.values():\n for preset_operating_system in preset_definition.operating_systems:\n if preset_operating_system.IsEquivalent(operating_system):\n preset_definitions.append(preset_definition)\n\n return preset_definitions", "docstring": "Retrieves preset definitions for a specific operating system.\n\nArgs:\noperating_system (OperatingSystemArtifact): an operating system artifact\nattribute container.\n\nReturns:\nlist[PresetDefinition]: preset definition that correspond with the\noperating system.", "source": "juraj-google-style"} {"code": "def _create_hunt(self, name, args):\n \n runner_args = self.grr_api.types.CreateHuntRunnerArgs()\n runner_args.description = self.reason\n hunt = self.grr_api.CreateHunt(\n flow_name=name, flow_args=args, hunt_runner_args=runner_args)\n print('{0!s}: Hunt created'.format(hunt.hunt_id))\n self._check_approval_wrapper(hunt, hunt.Start)\n return hunt", "docstring": "Create specified hunt.\n\nArgs:\nname: string containing hunt name.\nargs: proto (*FlowArgs) for type of hunt, as defined in GRR flow proto.\n\nReturns:\nThe newly created GRR hunt object.\n\nRaises:\nValueError: if approval is needed and approvers were not specified.", "source": "juraj-google-style"} {"code": "def _merge_nrows(nrows, static_nrows, value, dtype, validate):\n static_value_nrows = tensor_shape.dimension_at_index(value.shape, 0)\n if isinstance(value, tensor.Tensor):\n value_nrows = array_ops.shape(value, out_type=dtype)[0]\n else:\n value_nrows = value.nrows()\n if nrows is None:\n nrows = value_nrows\n elif static_value_nrows.value is not None and static_nrows.value is not None:\n if not static_value_nrows.is_compatible_with(static_nrows):\n raise ValueError('fields have incompatible nrows')\n nrows = value_nrows\n elif validate:\n nrows = control_flow_ops.with_dependencies([check_ops.assert_equal(nrows, value_nrows, message='fields have incompatible nrows')], nrows)\n return (nrows, static_nrows._merge_with(static_value_nrows))", "docstring": "Merges `nrows` with `nrows(value)`.\n\nChecks that `value` has the expected number of rows (`nrows`), and returns\n`nrows`. If `validate` is true, then add validation ops that check that\nthe `nrows` values match.\n\nArgs:\nnrows: scalar integer Tensor.\nstatic_nrows: tf.Dimension: static value of nrows, if known.\nvalue: Tensor or RaggedTensor or StructuredTensor\ndtype: dtype for `nrows`.\nvalidate: bool -- whether to add validation ops.\n\nReturns:\nA tuple `(nrows, static_nrows)`.", "source": "github-repos"} {"code": "def _add_logger_by_name(self, name):\n data = dict(request.forms)\n loc = data.pop('loc', '')\n port = data.pop('port', None)\n conn_type = data.pop('conn_type', None)\n if ((not port) or (not conn_type)):\n e = 'Port and/or conn_type not set'\n raise ValueError(e)\n address = [loc, int(port)]\n if ('rotate_log' in data):\n data['rotate_log'] = (True if (data == 'true') else False)\n if ('rotate_log_delta' in data):\n data['rotate_log_delta'] = int(data['rotate_log_delta'])\n self._logger_manager.add_logger(name, address, conn_type, **data)", "docstring": "Handles POST requests for adding a new logger.\n\nExpects logger configuration to be passed in the request's query string.\nThe logger name is included in the URL and the address components and\nconnection type should be included as well. The loc attribute is\ndefaulted to \"localhost\" when making the socket connection if not\ndefined.\n\nloc = IP / interface\nport = port / protocol\nconn_type = udp or ethernet\n\nRaises:\nValueError:\nif the port or connection type are not supplied.", "source": "codesearchnet"} {"code": "def removeMapIdentity(self, subject, vendorSpecific=None):\n \n response = self.removeMapIdentityResponse(subject, vendorSpecific)\n return self._read_boolean_response(response)", "docstring": "See Also: removeMapIdentityResponse()\n\nArgs:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"} {"code": "def fill_list(self, list, input_list):\n \n for name in input_list:\n \n item = QtGui.QStandardItem(name)\n item.setSelectable(True)\n item.setEditable(False)\n\n list.model().appendRow(item)", "docstring": "fills a tree with nested parameters\nArgs:\ntree: QtGui.QTreeView to fill\nparameters: dictionary or Parameter object which contains the information to use to fill", "source": "juraj-google-style"} {"code": "class Phi4MultimodalVisionEncoder(nn.Module):\n\n def __init__(self, config: Phi4MultimodalVisionConfig):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([Phi4MultimodalVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n @can_return_tuple\n def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n \n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Phi4MultimodalVisionEncoderLayer`].\n\nArgs:\nconfig: Phi4MultimodalVisionConfig", "source": "github-repos"} {"code": "def need(self, folder, page=None, perpage=None):\n \n assert isinstance(page, int) or page is None\n assert isinstance(perpage, int) or perpage is None\n return self.get('need', params={'folder': folder,\n 'page': page,\n 'perpage': perpage})", "docstring": "Returns lists of files which are needed by this device in order\nfor it to become in sync.\n\nArgs:\nfolder (str):\npage (int): If defined applies pagination accross the\ncollection of results.\nperpage (int): If defined applies pagination across the\ncollection of results.\n\nReturns:\ndict", "source": "juraj-google-style"} {"code": "def l1_regularizer(weight=1.0, scope=None):\n\n def regularizer(tensor):\n with tf.name_scope(scope, 'L1Regularizer', [tensor]):\n l1_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight')\n return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')\n return regularizer", "docstring": "Define a L1 regularizer.\n\nArgs:\nweight: scale the loss by this factor.\nscope: Optional scope for name_scope.\n\nReturns:\na regularizer function.", "source": "codesearchnet"} {"code": "def size(self, path):\n try:\n return s3io.S3IO(options=self._options).size(path)\n except Exception as e:\n raise BeamIOError('size() operation failed', {path: e})", "docstring": "Get size of path on the FileSystem.\n\nArgs:\npath: string path in question.\n\nReturns: int size of path according to the FileSystem.\n\nRaises:\n``BeamIOError``: if path doesn't exist.", "source": "github-repos"} {"code": "def issued_at(self):\n issued_at = self._issued_at\n if (issued_at is None):\n self._issued_at = int(time.time())\n return self._issued_at", "docstring": "Time when access token was requested, as seconds since epoch.\n\nNote:\nAccessing this property when there wasn't any request attempts\nwill return current time.\n\nReturns:\nint", "source": "codesearchnet"} {"code": "def __init__(self, editor):\n \n self.editor = editor\n self.timer = QTimer(self.editor)\n self.timer.setSingleShot(True)\n self.timer.timeout.connect(self.do_autosave)\n self._enabled = False \n self._interval = self.DEFAULT_AUTOSAVE_INTERVAL", "docstring": "Constructor.\n\nAutosave is disabled after construction and needs to be enabled\nexplicitly if required.\n\nArgs:\neditor (Editor): editor plugin.", "source": "juraj-google-style"} {"code": "def set_learning_phase(value):\n warnings.warn('`tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.')\n deprecated_internal_set_learning_phase(value)", "docstring": "Sets the learning phase to a fixed value.\n\nThe backend learning phase affects any code that calls\n`backend.learning_phase()`\nIn particular, all Keras built-in layers use the learning phase as the default\nfor the `training` arg to `Layer.__call__`.\n\nUser-written layers and models can achieve the same behavior with code that\nlooks like:\n\n```python\ndef call(self, inputs, training=None):\nif training is None:\ntraining = backend.learning_phase()\n```\n\nArgs:\nvalue: Learning phase value, either 0 or 1 (integers).\n0 = test, 1 = train\n\nRaises:\nValueError: if `value` is neither `0` nor `1`.", "source": "github-repos"} {"code": "def get_all_counters(obj, instance_list=None):\n (counters, instances_avail) = win32pdh.EnumObjectItems(None, None, obj, (- 1), 0)\n if (instance_list is None):\n instance_list = instances_avail\n if (not isinstance(instance_list, list)):\n instance_list = [instance_list]\n counter_list = []\n for counter in counters:\n for instance in instance_list:\n instance = ('*' if (instance.lower() == '_total') else instance)\n counter_list.append((obj, instance, counter))\n else:\n counter_list.append((obj, None, counter))\n return (get_counters(counter_list) if counter_list else {})", "docstring": "Get the values for all counters available to a Counter object\n\nArgs:\n\nobj (str):\nThe name of the counter object. You can get a list of valid names\nusing the ``list_objects`` function\n\ninstance_list (list):\nA list of instances to return. Use this to narrow down the counters\nthat are returned.\n\n.. note::\n``_Total`` is returned as ``*``", "source": "codesearchnet"} {"code": "def from_yaml(cls, yaml_path, filename=None):\n \n if filename:\n \n yaml_path = os.path.join(os.path.dirname(yaml_path), filename)\n assert yaml_path.endswith(\".yaml\"), \\\n \"Expected a/path/to/.yaml, got %r\" % yaml_path\n yamlname = os.path.basename(yaml_path)[:-5]\n log.debug(\"Parsing %s\", yaml_path)\n with open(yaml_path) as f:\n text = f.read()\n \n ds = yaml.load(text, Loader=yaml.RoundTripLoader)\n docstring = None\n sections = []\n for d in ds:\n assert len(d) == 1, \\\n \"Expected section length 1, got %d\" % len(d)\n lineno = d._yaml_line_col.line + 1\n name = list(d)[0]\n sections.append(cls(\n yaml_path, lineno, name, d[name]))\n if name == \"builtin.defines.docstring\":\n docstring = d[name][\"value\"]\n\n return sections, yamlname, docstring", "docstring": "Split a dictionary into parameters controllers parts blocks defines\n\nArgs:\nyaml_path (str): File path to YAML file, or a file in the same dir\nfilename (str): If give, use this filename as the last element in\nthe yaml_path (so yaml_path can be __file__)\n\nReturns:\ntuple: (sections, yamlname, docstring) where sections is a\nlist of created sections", "source": "juraj-google-style"} {"code": "def chebyshev(coefs, time, domain):\n return (Chebyshev(coefs, domain=domain)(time) - (0.5 * coefs[0]))", "docstring": "Evaluate a Chebyshev Polynomial\n\nArgs:\ncoefs (list, np.array): Coefficients defining the polynomial\ntime (int, float): Time where to evaluate the polynomial\ndomain (list, tuple): Domain (or time interval) for which the polynomial is defined: [left, right]\n\nReference: Appendix A in the MSG Level 1.5 Image Data Format Description.", "source": "codesearchnet"} {"code": "def get_num_bytes(self, batch: Sequence[torch.Tensor]) -> int:\n return sum((el.element_size() for tensor in batch for el in tensor.values()))", "docstring": "Returns:\nThe number of bytes of data for a batch of dict of Tensors.", "source": "github-repos"} {"code": "def convert_coco_poly_to_mask(segmentations, height: int, width: int, device: torch.device) -> torch.Tensor:\n try:\n from pycocotools import mask as coco_mask\n except ImportError:\n raise ImportError('Pycocotools is not installed in your environment.')\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8, device=device)\n mask = torch.any(mask, axis=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, axis=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8, device=device)\n return masks", "docstring": "Convert a COCO polygon annotation to a mask.\n\nArgs:\nsegmentations (`List[List[float]]`):\nList of polygons, each polygon represented by a list of x-y coordinates.\nheight (`int`):\nHeight of the mask.\nwidth (`int`):\nWidth of the mask.", "source": "github-repos"} {"code": "def __init__(self, path_map: Mapping[str, os.PathLike[str]], expected_input_key_map: Optional[Mapping[str, Collection[str]]]=None):\n self.path_map: Mapping[str, os.PathLike[str]] = path_map\n self.expected_input_key_map: Mapping[str, Collection[str]] = {}\n if expected_input_key_map is not None:\n if set(path_map.keys()) != set(expected_input_key_map.keys()):\n raise KeyError('The `path_map` and `expected_input_key_map` should have the same set of keys.')\n self.expected_input_key_map = expected_input_key_map", "docstring": "Initializes TFRecord represenatative dataset saver.\n\nArgs:\npath_map: Signature def key -> path mapping. Each path is a TFRecord file\nto which a `RepresentativeDataset` is saved. The signature def keys\nshould be a subset of the `SignatureDef` keys of the\n`representative_dataset` argument of the `save()` call.\nexpected_input_key_map: Signature def key -> expected input keys. If set,\nvalidate that the sample has same set of input keys before saving.\n\nRaises:\nKeyError: If path_map and expected_input_key_map have different keys.", "source": "github-repos"} {"code": "def collections(self):\n iterator = self._firestore_api.list_collection_ids(self._database_string, metadata=self._rpc_metadata)\n iterator.client = self\n iterator.item_to_value = _item_to_collection_ref\n return iterator", "docstring": "List top-level collections of the client's database.\n\nReturns:\nSequence[~.firestore_v1beta1.collection.CollectionReference]:\niterator of subcollections of the current document.", "source": "codesearchnet"} {"code": "def add_sample_file(self, sample_path, source, reference, method='', file_format='raw', file_password='', sample_name='', campaign='', confidence='', description='', bucket_list=[]):\n if os.path.isfile(sample_path):\n data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'filetype': file_format, 'upload_type': 'file', 'campaign': campaign, 'confidence': confidence, 'description': description, 'bucket_list': ','.join(bucket_list)}\n if (sample_name != ''):\n data['filename'] = sample_name\n with open(sample_path, 'rb') as fdata:\n if file_password:\n data['password'] = file_password\n r = requests.post('{0}/samples/'.format(self.url), data=data, files={'filedata': fdata}, verify=self.verify, proxies=self.proxies)\n if (r.status_code == 200):\n result_data = json.loads(r.text)\n return result_data\n else:\n log.error('Error with status code {0} and message {1}'.format(r.status_code, r.text))\n return None", "docstring": "Adds a file sample. For meta data only use add_sample_meta.\n\nArgs:\nsample_path: The path on disk of the sample to upload\nsource: Source of the information\nreference: A reference where more information can be found\nmethod: The method for obtaining the sample.\nfile_format: Must be raw, zip, or rar.\nfile_password: The password of a zip or rar archived sample\nsample_name: Specify a filename for the sample rather than using\nthe name on disk\ncampaign: An associated campaign\nconfidence: The campaign confidence\ndescription: A text description of the sample\nbucket_list: A list of bucket list items to add\nReturns:\nA JSON sample object or None if there was an error.", "source": "codesearchnet"} {"code": "def find_eq_stress(strains, stresses, tol=1e-10):\n stress_array = np.array(stresses)\n strain_array = np.array(strains)\n eq_stress = stress_array[np.all((abs(strain_array) < tol), axis=(1, 2))]\n if (eq_stress.size != 0):\n all_same = (abs((eq_stress - eq_stress[0])) < 1e-08).all()\n if ((len(eq_stress) > 1) and (not all_same)):\n raise ValueError('Multiple stresses found for equilibrium strain state, please specify equilibrium stress or remove extraneous stresses.')\n eq_stress = eq_stress[0]\n else:\n warnings.warn('No eq state found, returning zero voigt stress')\n eq_stress = Stress(np.zeros((3, 3)))\n return eq_stress", "docstring": "Finds stress corresponding to zero strain state in stress-strain list\n\nArgs:\nstrains (Nx3x3 array-like): array corresponding to strains\nstresses (Nx3x3 array-like): array corresponding to stresses\ntol (float): tolerance to find zero strain state", "source": "codesearchnet"} {"code": "def parse_env(config_schema, env):\n try:\n return {key: item_schema.parse(key, env.get(key)) for (key, item_schema) in config_schema.items()}\n except KeyError as error:\n raise MissingConfigError('Required config not set: {}'.format(error.args[0]))", "docstring": "Parse the values from a given environment against a given config schema\n\nArgs:\nconfig_schema: A dict which maps the variable name to a Schema object\nthat describes the requested value.\nenv: A dict which represents the value of each variable in the\nenvironment.", "source": "codesearchnet"} {"code": "def PluginAssets(self, plugin_name):\n \n with self._accumulators_mutex:\n \n items = list(six.iteritems(self._accumulators))\n\n return {run: accum.PluginAssets(plugin_name) for run, accum in items}", "docstring": "Get index of runs and assets for a given plugin.\n\nArgs:\nplugin_name: Name of the plugin we are checking for.\n\nReturns:\nA dictionary that maps from run_name to a list of plugin\nassets for that run.", "source": "juraj-google-style"} {"code": "def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool:\n \n signature = base64.b64decode(signature)\n\n try:\n crypto.verify(amazon_cert, signature, request_body, 'sha1')\n result = True\n except crypto.Error:\n result = False\n\n return result", "docstring": "Verifies Alexa request signature.\n\nArgs:\namazon_cert: Pycrypto X509 Amazon certificate.\nsignature: Base64 decoded Alexa request signature from Signature HTTP header.\nrequest_body: full HTTPS request body\nReturns:\nresult: True if verification was successful, False if not.", "source": "juraj-google-style"} {"code": "def trace_region_count(self):\n \n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "docstring": "Retrieves a count of the number of available trace regions.\n\nArgs:\nself (JLink): the ``JLink`` instance.\n\nReturns:\nCount of the number of available trace regions.", "source": "juraj-google-style"} {"code": "def ProcessPathSpec(self, mediator, path_spec):\n \n self.last_activity_timestamp = time.time()\n self.processing_status = definitions.STATUS_INDICATOR_RUNNING\n\n file_entry = path_spec_resolver.Resolver.OpenFileEntry(\n path_spec, resolver_context=mediator.resolver_context)\n\n if file_entry is None:\n display_name = mediator.GetDisplayNameForPathSpec(path_spec)\n logger.warning(\n 'Unable to open file entry with path spec: {0:s}'.format(\n display_name))\n self.processing_status = definitions.STATUS_INDICATOR_IDLE\n return\n\n mediator.SetFileEntry(file_entry)\n\n try:\n if file_entry.IsDirectory():\n self._ProcessDirectory(mediator, file_entry)\n self._ProcessFileEntry(mediator, file_entry)\n\n finally:\n mediator.ResetFileEntry()\n\n self.last_activity_timestamp = time.time()\n self.processing_status = definitions.STATUS_INDICATOR_IDLE", "docstring": "Processes a path specification.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\npath_spec (dfvfs.PathSpec): path specification.", "source": "juraj-google-style"} {"code": "def Serialize(self, writer):\n \n super(Block, self).Serialize(writer)\n writer.WriteSerializableArray(self.Transactions)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"} {"code": "def __init__(self, initialization_list=None):\n \n if initialization_list:\n \n self._trackers = {}\n for value_category_key, description_list in initialization_list.items():\n description = EventTrackerDescription._make(description_list)\n self._trackers[value_category_key] = _EventTracker(\n event_count=description.event_count,\n first_timestamp=description.first_timestamp,\n last_timestamp=description.last_timestamp)\n else:\n \n self._trackers = {\n constants.NAN_KEY: _EventTracker(),\n constants.NEG_INF_KEY: _EventTracker(),\n constants.POS_INF_KEY: _EventTracker(),\n }", "docstring": "Stores alert history for a single device, tensor pair.\n\nArgs:\ninitialization_list: (`list`) An optional list parsed from JSON read\nfrom disk. That entity is used to initialize this NumericsAlertHistory.\nUse the create_jsonable_object method of this class to create such an\nobject.", "source": "juraj-google-style"} {"code": "def is_remote_path(filepath):\n if re.match('^(/cns|/cfs|/gcs|/hdfs|/readahead|/placer|/tfhub|.*:\n return True\n return False", "docstring": "Determines if a given filepath indicates a remote location.\n\nThis function checks if the filepath represents a known remote pattern\nsuch as GCS (`/gcs`), CNS (`/cns`), CFS (`/cfs`), HDFS (`/hdfs`), Placer\n(`/placer`), TFHub (`/tfhub`), or a URL (`.*://`).\n\nArgs:\nfilepath (str): The path to be checked.\n\nReturns:\nbool: True if the filepath is a recognized remote path, otherwise False", "source": "github-repos"} {"code": "def _set_median_session_metrics(session_group, aggregation_metric):\n measurements = sorted(_measurements(session_group, aggregation_metric), key=operator.attrgetter('metric_value.value'))\n median_session = measurements[((len(measurements) - 1) \n del session_group.metric_values[:]\n session_group.metric_values.MergeFrom(session_group.sessions[median_session].metric_values)", "docstring": "Sets the metrics for session_group to those of its \"median session\".\n\nThe median session is the session in session_group with the median value\nof the metric given by 'aggregation_metric'. The median is taken over the\nsubset of sessions in the group whose 'aggregation_metric' was measured\nat the largest training step among the sessions in the group.\n\nArgs:\nsession_group: A SessionGroup protobuffer.\naggregation_metric: A MetricName protobuffer.", "source": "codesearchnet"} {"code": "def start_scan(self, active):\n \n try:\n self.bable.start_scan(self._on_device_found, active_scan=active, sync=True)\n except bable_interface.BaBLEException as err:\n \n if self._active_scan != active:\n raise err\n\n self._active_scan = active\n self.scanning = True", "docstring": "Start a scan. Will call self._on_device_found for each device scanned.\nArgs:\nactive (bool): Indicate if it is an active scan (probing for scan response) or not.", "source": "juraj-google-style"} {"code": "def register_type_spec_from_value_converter(type_object, converter_fn, allow_subclass=False):\n _, type_object = tf_decorator.unwrap(type_object)\n _TYPE_CONVERSION_FUNCTION_REGISTRY.append((type_object, converter_fn, allow_subclass))", "docstring": "Registers a function for converting values with a given type to TypeSpecs.\n\nIf multiple registered `type_object`s match a value, then the most recent\nregistration takes precedence. Custom converters should not be defined for\n`CompositeTensor`s; use `CompositeTensor._type_spec` instead.\n\nArgs:\ntype_object: A Python `type` object representing the type of values accepted\nby `converter_fn`.\nconverter_fn: A function that takes one argument (an instance of the type\nrepresented by `type_object`) and returns a `TypeSpec`.\nallow_subclass: If true, then use `isinstance(value, type_object)` to check\nfor matches. If false, then use `type(value) is type_object`.", "source": "github-repos"} {"code": "def decode(self, probs, sizes=None):\n \n _, max_probs = torch.max(probs.transpose(0, 1), 2)\n strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes)\n return self.process_strings(strings, remove_repetitions=True)", "docstring": "Returns the argmax decoding given the probability matrix. Removes\nrepeated elements in the sequence, as well as blanks.\n\nArguments:\nprobs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim\nsizes(optional): Size of each sequence in the mini-batch\nReturns:\nstrings: sequences of the model's best guess for the transcription on inputs", "source": "juraj-google-style"} {"code": "def get_instance_status(self):\n status_url = self._get_url('status_path')\n res = self.rest_client.session.get(status_url)\n _handle_http_errors(res)\n return res.json()", "docstring": "Get the status the instance for this Streaming Analytics service.\n\nReturns:\ndict: JSON response for the instance status operation.", "source": "codesearchnet"} {"code": "def hpo_diseases(username, password, hpo_ids, p_value_treshold=1):\n \n \n try:\n results = query_phenomizer.query(username, password, *hpo_ids)\n diseases = [result for result in results\n if result['p_value'] <= p_value_treshold]\n return diseases\n except SystemExit:\n return None", "docstring": "Return the list of HGNC symbols that match annotated HPO terms.\n\nArgs:\nusername (str): username to use for phenomizer connection\npassword (str): password to use for phenomizer connection\n\nReturns:\nquery_result: a generator of dictionaries on the form\n{\n'p_value': float,\n'disease_source': str,\n'disease_nr': int,\n'gene_symbols': list(str),\n'description': str,\n'raw_line': str\n}", "source": "juraj-google-style"} {"code": "def _build(self, x, prev_state):\n x.get_shape().with_rank(2)\n self._batch_size = x.get_shape().as_list()[0]\n self._dtype = x.dtype\n x_zeros = tf.concat([x, tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)], 1)\n x_ones = tf.concat([x, tf.ones(shape=(self._batch_size, 1), dtype=self._dtype)], 1)\n halting_linear = basic.Linear(name='halting_linear', output_size=1)\n body = functools.partial(self._body, halting_linear=halting_linear, x_ones=x_ones)\n cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)\n iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)\n core_output_size = [x.value for x in self._core.output_size]\n out_init = tf.zeros(shape=((self._batch_size,) + tuple(core_output_size)), dtype=self._dtype)\n cumul_state_init = _nested_zeros_like(prev_state)\n remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)\n (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop(self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init])\n act_output = basic.Linear(name='act_output_linear', output_size=self._output_size)(final_out)\n return ((act_output, (final_iteration, final_remainder)), final_cumul_state)", "docstring": "Connects the core to the graph.\n\nArgs:\nx: Input `Tensor` of shape `(batch_size, input_size)`.\nprev_state: Previous state. This could be a `Tensor`, or a tuple of\n`Tensor`s.\n\nReturns:\nThe tuple `(output, state)` for this core.\n\nRaises:\nValueError: if the `Tensor` `x` does not have rank 2.", "source": "codesearchnet"} {"code": "def flowshow(flow, win_name='', wait_time=0):\n flow = flowread(flow)\n flow_img = flow2rgb(flow)\n imshow(rgb2bgr(flow_img), win_name, wait_time)", "docstring": "Show optical flow.\n\nArgs:\nflow (ndarray or str): The optical flow to be displayed.\nwin_name (str): The window name.\nwait_time (int): Value of waitKey param.", "source": "codesearchnet"} {"code": "def recall_at_k(y_true: List[int], y_pred: List[List[np.ndarray]], k: int):\n num_examples = float(len(y_pred))\n predictions = np.array(y_pred)\n predictions = np.flip(np.argsort(predictions, (- 1)), (- 1))[(:, :k)]\n num_correct = 0\n for el in predictions:\n if (0 in el):\n num_correct += 1\n return (float(num_correct) / num_examples)", "docstring": "Calculates recall at k ranking metric.\n\nArgs:\ny_true: Labels. Not used in the calculation of the metric.\ny_predicted: Predictions.\nEach prediction contains ranking score of all ranking candidates for the particular data sample.\nIt is supposed that the ranking score for the true candidate goes first in the prediction.\n\nReturns:\nRecall at k", "source": "codesearchnet"} {"code": "def today(boo):\n tod = datetime.strptime(datetime.today().date().isoformat().replace('-', ' '), '%Y %m %d')\n if boo:\n return int(str(tod).replace('-', '')[:8])\n else:\n return str(tod)[:10]", "docstring": "Return today's date as either a String or a Number, as specified by the User.\n\nArgs:\nboo: if true, function returns Number (20151230); if false, returns String (\"2015-12-30\")\nReturns:\neither a Number or a string, dependent upon the user's input", "source": "codesearchnet"} {"code": "def builder(structdef_url: str, fhir_context: context.FhirPathContext) -> expressions.Builder:\n structdef = fhir_context.get_structure_definition(structdef_url)\n struct_type = _fhir_path_data_types.StructureDataType.from_proto(structdef)\n return expressions.Builder(_evaluation.RootMessageNode(fhir_context, struct_type), _PRIMITIVE_HANDLER)", "docstring": "Returns a FHIRPath expression builder.\n\nThis gives the caller tab suggestions and early error detection when\nbuilding FHIRPath expressions. See the documentation on the returned\nexpressions.Builder for more details.\n\nArgs:\nstructdef_url: the URL of the FHIR StructureDefinition to use.\nfhir_context: a DefinitionLoader used to load FHIR structure definitions and\ndependencies.\nReturns: a builder object to creae FHIRPath expressions.", "source": "github-repos"} {"code": "def python_executable(check=True, short=False):\n r\n if not check:\n python_exe = 'python'\n else:\n from os.path import isdir\n python_exe_long = unixpath(sys.executable)\n python_exe = python_exe_long\n if short:\n python_exe_short = basename(python_exe_long)\n found = search_env_paths(python_exe_short, key_list=['PATH'],\n verbose=False)\n found = [f for f in found if not isdir(f)]\n if len(found) > 0:\n if found[0] == python_exe_long:\n \n python_exe = python_exe_short\n return python_exe", "docstring": "r\"\"\"\nArgs:\nshort (bool): (default = False)\n\nReturns:\nstr:\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_cplat import * # NOQA\n>>> short = False\n>>> result = python_executable(short)\n>>> print(result)", "source": "juraj-google-style"} {"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n local_stream = utils.BytearrayStream()\n if self._unique_identifier:\n self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n if self._usage_limits_count:\n self._usage_limits_count.write(local_stream, kmip_version=kmip_version)\n if self._cryptographic_usage_mask:\n self._cryptographic_usage_mask.write(local_stream, kmip_version=kmip_version)\n if self._lease_time:\n self._lease_time.write(local_stream, kmip_version=kmip_version)\n self.length = local_stream.length()\n super(CheckResponsePayload, self).write(output_stream, kmip_version=kmip_version)\n output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Check response payload to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is not defined.", "source": "codesearchnet"} {"code": "def set_role(self, name, value=None, default=False, disable=False):\n \n cmd = self.command_builder('username %s role' % name, value=value,\n default=default, disable=disable)\n return self.configure(cmd)", "docstring": "Configures the user role vale in EOS\n\nArgs:\nname (str): The name of the user to create\n\nvalue (str): The value to configure for the user role\n\ndefault (bool): Configure the user role using the EOS CLI\ndefault command\n\ndisable (bool): Negate the user role using the EOS CLI no command\n\nReturns:\nTrue if the operation was successful otherwise False", "source": "juraj-google-style"} {"code": "def set_forced_variation(self, experiment_key, user_id, variation_key):\n if (not self.is_valid):\n self.logger.error(enums.Errors.INVALID_DATAFILE.format('set_forced_variation'))\n return False\n if (not validator.is_non_empty_string(experiment_key)):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n return False\n if (not isinstance(user_id, string_types)):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n return False\n return self.config.set_forced_variation(experiment_key, user_id, variation_key)", "docstring": "Force a user into a variation for a given experiment.\n\nArgs:\nexperiment_key: A string key identifying the experiment.\nuser_id: The user ID.\nvariation_key: A string variation key that specifies the variation which the user.\nwill be forced into. If null, then clear the existing experiment-to-variation mapping.\n\nReturns:\nA boolean value that indicates if the set completed successfully.", "source": "codesearchnet"} {"code": "def get_inter_op_parallelism_threads():\n return context.context().inter_op_parallelism_threads", "docstring": "Get number of threads used for parallelism between independent operations.\n\nDetermines the number of threads used by independent non-blocking operations.\n0 means the system picks an appropriate number.\n\nReturns:\nNumber of parallel threads", "source": "github-repos"} {"code": "def read_data_event(self, whence, complete=False, can_flush=False):\n \n return Transition(None, _read_data_handler(whence, self, complete, can_flush))", "docstring": "Creates a transition to a co-routine for retrieving data as bytes.\n\nArgs:\nwhence (Coroutine): The co-routine to return to after the data is satisfied.\ncomplete (Optional[bool]): True if STREAM_END should be emitted if no bytes are read or\navailable; False if INCOMPLETE should be emitted in that case.\ncan_flush (Optional[bool]): True if NEXT may be requested after INCOMPLETE is emitted as a result of this\ndata request.", "source": "juraj-google-style"} {"code": "def _GetIdentifierFromPath(self, parser_mediator):\n \n file_entry = parser_mediator.GetFileEntry()\n path = file_entry.path_spec.location\n file_system = file_entry.GetFileSystem()\n path_segments = file_system.SplitPath(path)\n return path_segments[-2]", "docstring": "Extracts a container or a graph ID from a JSON file's path.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\n\nReturns:\nstr: container or graph identifier.", "source": "juraj-google-style"} {"code": "def if_sqlserver_disable_constraints_triggers(session: SqlASession, tablename: str) -> None:\n with if_sqlserver_disable_constraints(session, tablename):\n with if_sqlserver_disable_triggers(session, tablename):\n (yield)", "docstring": "If we're running under SQL Server, disable triggers AND constraints for the\nspecified table while the resource is held.\n\nArgs:\nsession: SQLAlchemy :class:`Session`\ntablename: table name", "source": "codesearchnet"} {"code": "def json(self, include_id=False, date_fmt=None, object_id_fmt=str):\n (has_slots, d) = _get_dict(self)\n _id = self._id\n if (not include_id):\n self._id = None\n object_ids = {k: v for (k, v) in d.items() if isinstance(v, bson.ObjectId)}\n for (k, v) in object_ids.items():\n if (object_id_fmt is None):\n setattr(self, k, None)\n else:\n setattr(self, k, object_id_fmt(v))\n datetimes = {k: v for (k, v) in d.items() if isinstance(v, datetime.datetime)}\n for (k, v) in datetimes.items():\n if (date_fmt is None):\n ts = (time.mktime(v.timetuple()) + (v.microsecond / 1000000.0))\n setattr(self, k, ts)\n else:\n setattr(self, k, v.strftime(date_fmt))\n j = marshal_dict(self, JSON_TYPES, 'json', include_id=include_id, date_fmt=date_fmt, object_id_fmt=object_id_fmt)\n self._id = _id\n for (k, v) in object_ids.items():\n setattr(self, k, v)\n for (k, v) in datetimes.items():\n setattr(self, k, v)\n return j", "docstring": "Helper method to convert to MongoDB documents to JSON\n\nThis includes helpers to convert non-JSON compatible types\nto valid JSON types. HOWEVER, it cannot recurse into nested\nclasses.\n\nArgs:\ninclude_id: bool, True to cast _id to a str,\nFalse to omit from the result\ndate_fmt: str-or-None: None to cast to UNIX timestamp,\nstr (strftime format) to convert to string,\nfor example: '%Y-%m-%d_%H:%M:%S'\nobject_id_fmt: type, Cast the bson.ObjectId's to this format,\nor None to exclude. This only applies to\nObjectId variables other than _id.\nReturns:\ndict", "source": "codesearchnet"} {"code": "def get_minimum_indentation(text):\n lines = text.split('\\n')\n indentations = [get_indentation(line_) for line_ in lines if (len(line_.strip()) > 0)]\n if (len(indentations) == 0):\n return 0\n return min(indentations)", "docstring": "r\"\"\"\nreturns the number of preceding spaces\n\nArgs:\ntext (str): unicode text\n\nReturns:\nint: indentation\n\nCommandLine:\npython -m utool.util_str --exec-get_minimum_indentation --show\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import * # NOQA\n>>> import utool as ut\n>>> text = ' foo\\n bar'\n>>> result = get_minimum_indentation(text)\n>>> print(result)\n3", "source": "codesearchnet"} {"code": "def parse(self, text, layers=None):\n \n params = {\n \"text\": text,\n \"key\": self.key,\n }\n\n if layers is not None:\n \n if isinstance(layers, six.string_types):\n params[\"layers\"] = layers\n\n \n elif isinstance(layers, collections.Iterable):\n params[\"layers\"] = \",\".join(layers)\n\n req = requests.get(self.NLU_URL, params=params)\n return req.json()", "docstring": "Parsing passed text to json.\n\nArgs:\ntext: Text to parse.\nlayers (optional): Special fields. Only one string\nor iterable object (e.g \"Data\", (\"Data\", \"Fio\")).\nOnly these fields will be returned.\n\n\nReturns:\nThe parsed text into a json object.", "source": "juraj-google-style"} {"code": "def update_panel(self, panel_obj, version=None, date_obj=None):\n \n LOG.info(\"Updating panel %s\", panel_obj['panel_name'])\n \n date = panel_obj['date']\n if version:\n LOG.info(\"Updating version from {0} to version {1}\".format(\n panel_obj['version'], version))\n panel_obj['version'] = version\n \n if date_obj:\n date = date_obj\n else:\n date = date_obj or dt.datetime.now()\n panel_obj['date'] = date\n\n updated_panel = self.panel_collection.find_one_and_replace(\n {'_id': panel_obj['_id']},\n panel_obj,\n return_document=pymongo.ReturnDocument.AFTER\n )\n\n return updated_panel", "docstring": "Replace a existing gene panel with a new one\n\nKeeps the object id\n\nArgs:\npanel_obj(dict)\nversion(float)\ndate_obj(datetime.datetime)\n\nReturns:\nupdated_panel(dict)", "source": "juraj-google-style"} {"code": "def _retrieve_info(self, request):\n info = _metadata.get_service_account_info(request, service_account=self._service_account_email)\n self._service_account_email = info['email']\n self._scopes = info['scopes']", "docstring": "Retrieve information about the service account.\n\nUpdates the scopes and retrieves the full service account email.\n\nArgs:\nrequest (google.auth.transport.Request): The object used to make\nHTTP requests.", "source": "codesearchnet"} {"code": "def _fix_unknown_dimension(self, input_shape, output_shape):\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged, input_shape = {}, output_shape = {}'.format(input_shape, output_shape)\n known, unknown = (1, None)\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original \n elif original != known:\n raise ValueError(msg)\n return output_shape", "docstring": "Find and replace a missing dimension in an output shape.\n\nThis is a near direct port of the internal Numpy function\n`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\nArgs:\ninput_shape: Shape of array being reshaped\noutput_shape: Desired shape of the array with at most\na single -1 which indicates a dimension that should be\nderived from the input shape.\n\nReturns:\nThe new output shape with a -1 replaced with its computed value.\n\nRaises:\nValueError: If the total array size of the output_shape is\ndifferent than the input_shape, or more than one unknown dimension\nis specified.", "source": "github-repos"} {"code": "def to_json(self):\n for pool in self._pools:\n if (pool is not None):\n pool.flush(True)\n return {'filehandles': pickle.dumps(self._filehandles)}", "docstring": "Returns writer state to serialize in json.\n\nReturns:\nA json-izable version of the OutputWriter state.", "source": "codesearchnet"} {"code": "def find_elb(name='', env='', region=''):\n LOG.info('Find %s ELB in %s [%s].', name, env, region)\n url = '{0}/applications/{1}/loadBalancers'.format(API_URL, name)\n response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n assert response.ok\n elb_dns = None\n accounts = response.json()\n for account in accounts:\n if ((account['account'] == env) and (account['region'] == region)):\n elb_dns = account['dnsname']\n break\n else:\n raise SpinnakerElbNotFound('Elb for \"{0}\" in region {1} not found'.format(name, region))\n LOG.info('Found: %s', elb_dns)\n return elb_dns", "docstring": "Get an application's AWS elb dns name.\n\nArgs:\nname (str): ELB name\nenv (str): Environment/account of ELB\nregion (str): AWS Region\n\nReturns:\nstr: elb DNS record", "source": "codesearchnet"} {"code": "def add(name, beacon_data, **kwargs):\n \n ret = {'comment': 'Failed to add beacon {0}.'.format(name),\n 'result': False}\n\n if name in list_(return_yaml=False, **kwargs):\n ret['comment'] = 'Beacon {0} is already configured.'.format(name)\n return ret\n\n \n \n if any('beacon_module' in key for key in beacon_data):\n res = next(value for value in beacon_data if 'beacon_module' in value)\n beacon_name = res['beacon_module']\n else:\n beacon_name = name\n\n if beacon_name not in list_available(return_yaml=False, **kwargs):\n ret['comment'] = 'Beacon \"{0}\" is not available.'.format(beacon_name)\n return ret\n\n if 'test' in kwargs and kwargs['test']:\n ret['result'] = True\n ret['comment'] = 'Beacon: {0} would be added.'.format(name)\n else:\n try:\n \n \n eventer = salt.utils.event.get_event('minion', opts=__opts__)\n res = __salt__['event.fire']({'name': name,\n 'beacon_data': beacon_data,\n 'func': 'validate_beacon'},\n 'manage_beacons')\n if res:\n event_ret = eventer.get_event(\n tag='/salt/minion/minion_beacon_validation_complete',\n wait=kwargs.get('timeout', 30))\n valid = event_ret['valid']\n vcomment = event_ret['vcomment']\n\n if not valid:\n ret['result'] = False\n ret['comment'] = ('Beacon {0} configuration invalid, '\n 'not adding.\\n{1}'.format(name, vcomment))\n return ret\n\n except KeyError:\n \n \n ret['result'] = False\n ret['comment'] = 'Event module not available. Beacon add failed.'\n return ret\n\n try:\n res = __salt__['event.fire']({'name': name,\n 'beacon_data': beacon_data,\n 'func': 'add'}, 'manage_beacons')\n if res:\n event_ret = eventer.get_event(\n tag='/salt/minion/minion_beacon_add_complete',\n wait=kwargs.get('timeout', 30))\n if event_ret and event_ret['complete']:\n beacons = event_ret['beacons']\n if name in beacons and beacons[name] == beacon_data:\n ret['result'] = True\n ret['comment'] = 'Added beacon: {0}.'.format(name)\n elif event_ret:\n ret['result'] = False\n ret['comment'] = event_ret['comment']\n else:\n ret['result'] = False\n ret['comment'] = 'Did not receive the manage event ' \\\n 'before the timeout of {0}s' \\\n ''.format(kwargs.get('timeout', 30))\n return ret\n except KeyError:\n \n \n ret['result'] = False\n ret['comment'] = 'Event module not available. Beacon add failed.'\n return ret", "docstring": "Add a beacon on the minion\n\nArgs:\n\nname (str):\nName of the beacon to configure\n\nbeacon_data (dict):\nDictionary or list containing configuration for beacon.\n\nReturns:\ndict: Boolean and status message on success or failure of add.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' beacons.add ps \"[{'processes': {'salt-master': 'stopped', 'apache2': 'stopped'}}]\"", "source": "juraj-google-style"} {"code": "def download_to_directory(self, directory, url, basename=None, overwrite=False, subdir=None):\n log = getLogger('ocrd.resolver.download_to_directory')\n log.debug('directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|', directory, url, basename, overwrite, subdir)\n if (url is None):\n raise Exception(\"'url' must be a string\")\n if (directory is None):\n raise Exception(\"'directory' must be a string\")\n if (basename is None):\n if ((subdir is not None) or (directory and url.startswith(('file:\n basename = url.rsplit('/', 1)[(- 1)]\n else:\n basename = safe_filename(url)\n if (subdir is not None):\n basename = join(subdir, basename)\n outfilename = join(directory, basename)\n if (exists(outfilename) and (not overwrite)):\n log.debug('File already exists and overwrite=False: %s', outfilename)\n return outfilename\n outfiledir = outfilename.rsplit('/', 1)[0]\n if (not isdir(outfiledir)):\n makedirs(outfiledir)\n log.debug(\"Downloading <%s> to '%s'\", url, outfilename)\n if url.startswith('file:\n url = url[len('file:\n if (':\n copyfile(url, outfilename)\n else:\n response = requests.get(url)\n if (response.status_code != 200):\n raise Exception(('Not found: %s (HTTP %d)' % (url, response.status_code)))\n with open(outfilename, 'wb') as outfile:\n outfile.write(response.content)\n return outfilename", "docstring": "Download a file to the workspace.\n\nEarly Shortcut: If url is a file://-URL and that file is already in the directory, keep it there.\n\nIf basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename.\nIf basename is not given and no subdir is given, use the alnum characters in the URL as the basename.\n\nArgs:\ndirectory (string): Directory to download files to\nbasename (string, None): basename part of the filename on disk.\nurl (string): URL to download from\noverwrite (boolean): Whether to overwrite existing files with that name\nsubdir (string, None): Subdirectory to create within the directory. Think fileGrp.\n\nReturns:\nLocal filename", "source": "codesearchnet"} {"code": "def _create_tpu_topology(core_locations: List[_CoreLocation], num_tasks: int, num_devices_per_task: int) -> topology.Topology:\n assert min([l.x for l in core_locations]) == 0\n assert min([l.y for l in core_locations]) == 0\n assert min([l.z for l in core_locations]) == 0\n assert min([l.core for l in core_locations]) == 0\n x_max = max([l.x for l in core_locations])\n y_max = max([l.y for l in core_locations])\n z_max = max([l.z for l in core_locations])\n core_max = max([l.core for l in core_locations])\n mesh_shape = [x_max + 1, y_max + 1, z_max + 1, core_max + 1]\n device_coordinates = [[l.x, l.y, l.z, l.core] for l in core_locations]\n device_coordinates = numpy_compat.np_asarray(device_coordinates).reshape(num_tasks, num_devices_per_task, 4)\n return topology.Topology(mesh_shape=mesh_shape, device_coordinates=device_coordinates)", "docstring": "Returns a Topology object build from a _CoreLocation list.\n\nArgs:\ncore_locations: A list of _CoreLocation objects sorted first by TF task ID\nand then by per-task device ordinals.\nnum_tasks: The number of TF tasks in the cluster.\nnum_devices_per_task: The number of TPU devices local to each task.", "source": "github-repos"} {"code": "def export_to_tf_tensor(self, x, laid_out_x):\n \n return self.combine_slices(laid_out_x.all_slices, x.shape)", "docstring": "Turn a Tensor into a tf.Tensor.\n\nArgs:\nx: a Tensor\nlaid_out_x: a LaidOutTensor\nReturns:\na tf.Tensor", "source": "juraj-google-style"} {"code": "def _get_record(self, model_class, record_id):\n url = '{host}/{namespace}/{model}/{id}'.format(host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), id=record_id)\n data = self._get_json(url)['data']\n fresh_model = model_class(data['attributes'])\n fresh_model.id = data['id']\n fresh_model.validate()\n if (self._cache is not None):\n self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model)\n return fresh_model", "docstring": "Get a single record from the API.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\nrecord_id (int): The id of the record requested.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: An instance of model_class or None.", "source": "codesearchnet"} {"code": "def _VerifyMethodCall(self):\n expected = self._PopNextMethod()\n while isinstance(expected, MethodGroup):\n (expected, method) = expected.MethodCalled(self)\n if (method is not None):\n return method\n if (expected != self):\n raise UnexpectedMethodCallError(self, expected)\n return expected", "docstring": "Verify the called method is expected.\n\nThis can be an ordered method, or part of an unordered set.\n\nReturns:\nThe expected mock method.\n\nRaises:\nUnexpectedMethodCall if the method called was not expected.", "source": "codesearchnet"} {"code": "def migrate(connection, dsn):\n all_migrations = _get_all_migrations()\n logger.debug('Collected migrations: {}'.format(all_migrations))\n for (version, modname) in all_migrations:\n if (_is_missed(connection, version) and (version <= SCHEMA_VERSION)):\n logger.info('Missed migration: {} migration is missed. Migrating...'.format(version))\n module = __import__(modname, fromlist='dummy')\n trans = connection.begin()\n try:\n module.Migration().migrate(connection)\n _update_version(connection, version)\n trans.commit()\n except:\n trans.rollback()\n logger.error(\"Failed to migrate '{}' on {} \".format(version, dsn))\n raise", "docstring": "Collects all migrations and applies missed.\n\nArgs:\nconnection (sqlalchemy connection):", "source": "codesearchnet"} {"code": "def decode_conjure_enum_type(cls, obj, conjure_type):\n if (not (isinstance(obj, str) or (str(type(obj)) == \"\"))):\n raise Exception('Expected to find str type but found {} instead'.format(type(obj)))\n if (obj in conjure_type.__members__):\n return conjure_type[obj]\n else:\n return conjure_type['UNKNOWN']", "docstring": "Decodes json into a conjure enum type.\n\nArgs:\nobj: the json object to decode\nconjure_type: a class object which is the enum type\nwe're decoding into.\nReturns:\nAn instance of enum of type conjure_type.", "source": "codesearchnet"} {"code": "def get_privkey(self, address: AddressHex, password: str) -> PrivateKey:\n address = add_0x_prefix(address).lower()\n if (not self.address_in_keystore(address)):\n raise ValueError(('Keystore file not found for %s' % address))\n with open(self.accounts[address]) as data_file:\n data = json.load(data_file)\n acc = Account(data, password, self.accounts[address])\n return acc.privkey", "docstring": "Find the keystore file for an account, unlock it and get the private key\n\nArgs:\naddress: The Ethereum address for which to find the keyfile in the system\npassword: Mostly for testing purposes. A password can be provided\nas the function argument here. If it's not then the\nuser is interactively queried for one.\nReturns\nThe private key associated with the address", "source": "codesearchnet"} {"code": "def List(self, request, global_params=None):\n config = self.GetMethodConfig('List')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists `WorkerPool`s.\n\nArgs:\nrequest: (CloudbuildProjectsLocationsWorkerPoolsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListWorkerPoolsResponse) The response message.", "source": "github-repos"} {"code": "def create_workspace(self, did, name, version_id=None):\n payload = {'isPublic': True, 'name': name}\n if version_id:\n payload['versionId'] = version_id\n return self._api.request('post', (('/api/documents/d/' + did) + '/workspaces'), body=payload)", "docstring": "Create a workspace in the specified document.\n\nArgs:\n- did (str): the document id of where to create the new workspace\n- name (str): the new name of the copied workspace.\n- version_id (str): the ID of the version to be copied into a new workspace\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"} {"code": "def get_resource_from_handle(self, resource_handle, verify_repo=True):\n \n if verify_repo:\n \n \n \n \n if resource_handle.variables.get(\"repository_type\") != self.name():\n raise ResourceError(\"repository_type mismatch - requested %r, \"\n \"repository_type is %r\"\n % (resource_handle.variables[\"repository_type\"],\n self.name()))\n\n if resource_handle.variables.get(\"location\") != self.location:\n raise ResourceError(\"location mismatch - requested %r, \"\n \"repository location is %r \"\n % (resource_handle.variables[\"location\"],\n self.location))\n\n resource = self.pool.get_resource_from_handle(resource_handle)\n resource._repository = self\n return resource", "docstring": "Get a resource.\n\nArgs:\nresource_handle (`ResourceHandle`): Handle of the resource.\n\nReturns:\n`PackageRepositoryResource` instance.", "source": "juraj-google-style"} {"code": "def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id):\n feed_item_target_service = client.GetService('FeedItemTargetService', 'v201809')\n ad_group_target = {'xsi_type': 'FeedItemAdGroupTarget', 'feedId': feed_item['feedId'], 'feedItemId': feed_item['feedItemId'], 'adGroupId': adgroup_id}\n operation = {'operator': 'ADD', 'operand': ad_group_target}\n response = feed_item_target_service.mutate([operation])\n new_ad_group_target = response['value'][0]\n print(('Feed item target for feed ID %s and feed item ID %s was created to restrict serving to ad group ID %s' % (new_ad_group_target['feedId'], new_ad_group_target['feedItemId'], new_ad_group_target['adGroupId'])))", "docstring": "Restricts the feed item to an ad group.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_item: The feed item.\nadgroup_id: The ad group ID.", "source": "codesearchnet"} {"code": "def RegisterOutputs(cls, output_classes, disabled=False):\n \n for output_class in output_classes:\n cls.RegisterOutput(output_class, disabled)", "docstring": "Registers output classes.\n\nThe output classes are identified based on their NAME attribute.\n\nArgs:\noutput_classes (list[type]): output module classes.\ndisabled (Optional[bool]): True if the output module is disabled due to\nthe module not loading correctly or not.\n\nRaises:\nKeyError: if output class is already set for the corresponding name.", "source": "juraj-google-style"} {"code": "def Git(repository, directory, rev=None, prefix=None, shallow_clone=True):\n \n repository_loc = str(prefix)\n if prefix is None:\n repository_loc = str(CFG[\"tmp_dir\"])\n\n from benchbuild.utils.cmd import git\n\n src_dir = local.path(repository_loc) / directory\n if not source_required(src_dir):\n Copy(src_dir, \".\")\n return\n\n extra_param = []\n if shallow_clone:\n extra_param.append(\"--depth\")\n extra_param.append(\"1\")\n\n git(\"clone\", extra_param, repository, src_dir)\n if rev:\n with local.cwd(src_dir):\n git(\"checkout\", rev)\n\n update_hash(src_dir)\n Copy(src_dir, \".\")\n return repository_loc", "docstring": "Get a clone of the given repo\n\nArgs:\nrepository (str): Git URL of the SOURCE repo.\ndirectory (str): Name of the repo folder on disk.\ntgt_root (str): TARGET folder for the git repo.\nDefaults to ``CFG[\"tmpdir\"]``\nshallow_clone (bool): Only clone the repository shallow\nDefaults to true", "source": "juraj-google-style"} {"code": "def GetAttribute(self, identifier):\n \n if not self._is_parsed:\n self._Parse()\n self._is_parsed = True\n\n if identifier not in self._attributes:\n return None\n\n return self._attributes[identifier]", "docstring": "Retrieves a specific attribute.\n\nArgs:\nidentifier (str): identifier of the attribute within the volume.\n\nReturns:\nVolumeAttribute: volume attribute or None if not available.", "source": "juraj-google-style"} {"code": "def RefreshResumableUploadState(self):\n if (self.strategy != RESUMABLE_UPLOAD):\n return\n self.EnsureInitialized()\n refresh_request = http_wrapper.Request(url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'})\n refresh_response = http_wrapper.MakeRequest(self.http, refresh_request, redirections=0, retries=self.num_retries)\n range_header = self._GetRangeHeaderFromResponse(refresh_response)\n if (refresh_response.status_code in (http_client.OK, http_client.CREATED)):\n self.__complete = True\n self.__progress = self.total_size\n self.stream.seek(self.progress)\n self.__final_response = refresh_response\n elif (refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE):\n if (range_header is None):\n self.__progress = 0\n else:\n self.__progress = (self.__GetLastByte(range_header) + 1)\n self.stream.seek(self.progress)\n else:\n raise exceptions.HttpError.FromResponse(refresh_response)", "docstring": "Talk to the server and refresh the state of this resumable upload.\n\nReturns:\nResponse if the upload is complete.", "source": "codesearchnet"} {"code": "def copy_table(self, src, dst):\n self.create_table_from(dst, src)\n self.execute('INSERT INTO {dst} SELECT * FROM {src}'.format(dst=dst, src=src))\n self.commit()", "docstring": "Create a carbon copy of the source table.\n\nArguments:\n\nsrc (str): The name of the table to copy.\ndst (str): The name of the target duplicate table.\n\nRaises:\n\nsql.OperationalError: If source table does not exist.", "source": "codesearchnet"} {"code": "def get_full_path(path):\n if path_utils.isabs(path):\n return path\n else:\n return path_utils.join(_pytype_source_dir(), path)", "docstring": "Full path to a file or directory within the pytype source tree.\n\nArguments:\npath: An absolute or relative path.\n\nReturns:\npath for absolute paths.\nfull path resolved relative to pytype/ for relative paths.", "source": "github-repos"} {"code": "def decode_schedule(string):\n \n splits = string.split()\n steps = [int(x[1:]) for x in splits[1:] if x[0] == '@']\n pmfs = np.reshape(\n [float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1])\n return splits[0], tuplize(steps), tuplize(pmfs)", "docstring": "Decodes a string into a schedule tuple.\n\nArgs:\nstring: The string encoding of a schedule tuple.\n\nReturns:\nA schedule tuple, see encode_schedule for details.", "source": "juraj-google-style"} {"code": "def get_representations_of_kind(kind, start=None, end=None):\n \n q = Property.query(ancestor=Property.key_for_kind(kind))\n if start is not None and start != '':\n q = q.filter(Property.key >= Property.key_for_property(kind, start))\n if end is not None:\n if end == '':\n return {}\n q = q.filter(Property.key < Property.key_for_property(kind, end))\n\n result = {}\n for property in q:\n result[property.property_name] = property.property_representation\n\n return result", "docstring": "Return all representations of properties of kind in the specified range.\n\nNOTE: This function does not return unindexed properties.\n\nArgs:\nkind: name of kind whose properties you want.\nstart: only return properties >= start if start is not None.\nend: only return properties < end if end is not None.\n\nReturns:\nA dictionary mapping property names to its list of representations.", "source": "juraj-google-style"} {"code": "def _update_version(connection, version):\n if (connection.engine.name == 'sqlite'):\n connection.execute('PRAGMA user_version = {}'.format(version))\n elif (connection.engine.name == 'postgresql'):\n connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME)))\n connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME)))\n connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);'.format(POSTGRES_SCHEMA_NAME))\n if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone():\n connection.execute('UPDATE {}.user_version SET version = {};'.format(POSTGRES_SCHEMA_NAME, version))\n else:\n connection.execute('INSERT INTO {}.user_version (version) VALUES ({})'.format(POSTGRES_SCHEMA_NAME, version))\n else:\n raise DatabaseMissingError('Do not know how to migrate {} engine.'.format(connection.engine.driver))", "docstring": "Updates version in the db to the given version.\n\nArgs:\nconnection (sqlalchemy connection): sqlalchemy session where to update version.\nversion (int): version of the migration.", "source": "codesearchnet"} {"code": "def AddScanNode(self, path_spec, parent_scan_node):\n scan_node = self._scan_nodes.get(path_spec, None)\n if scan_node:\n raise KeyError('Scan node already exists.')\n scan_node = SourceScanNode(path_spec)\n if parent_scan_node:\n if (parent_scan_node.path_spec not in self._scan_nodes):\n raise RuntimeError('Parent scan node not present.')\n scan_node.parent_node = parent_scan_node\n parent_scan_node.sub_nodes.append(scan_node)\n if (not self._root_path_spec):\n self._root_path_spec = path_spec\n self._scan_nodes[path_spec] = scan_node\n if path_spec.IsFileSystem():\n self._file_system_scan_nodes[path_spec] = scan_node\n self.updated = True\n return scan_node", "docstring": "Adds a scan node for a certain path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nparent_scan_node (SourceScanNode): parent scan node or None.\n\nReturns:\nSourceScanNode: scan node.\n\nRaises:\nKeyError: if the scan node already exists.\nRuntimeError: if the parent scan node is not present.", "source": "codesearchnet"} {"code": "def _combine_multiple_returns(self, signatures: 'list[tuple[PyTDSignature, dict[str, cfg.Variable], matcher.GoodMatch]]'):\n options = []\n for sig, _, _ in signatures:\n t = sig.pytd_sig.return_type\n params = pytd_utils.GetTypeParameters(t)\n if params:\n replacement = {}\n for param_type in params:\n replacement[param_type] = pytd.AnythingType()\n replace_visitor = visitors.ReplaceTypeParameters(replacement)\n t = t.Visit(replace_visitor)\n options.append(t)\n if len(set(options)) == 1:\n return options[0]\n ret_type = optimize.Optimize(pytd_utils.JoinTypes(options))\n return ret_type.Visit(visitors.ReplaceUnionsWithAny())", "docstring": "Combines multiple return types.\n\nArgs:\nsignatures: The candidate signatures.\n\nReturns:\nThe combined return type.", "source": "github-repos"} {"code": "def remove_time_limit_wrapper(env):\n \n if isinstance(env, gym.wrappers.TimeLimit):\n env = env.env\n env_ = env\n while isinstance(env_, gym.Wrapper):\n if isinstance(env_, gym.wrappers.TimeLimit):\n raise ValueError(\"Can remove only top-level TimeLimit gym.Wrapper.\")\n env_ = env_.env\n return env", "docstring": "Removes top level TimeLimit Wrapper.\n\nRemoves TimeLimit Wrapper from top level if exists, throws error if any other\nTimeLimit Wrapper is present in stack.\n\nArgs:\nenv: environment\n\nReturns:\nthe env with removed time limit wrapper.", "source": "juraj-google-style"} {"code": "def has_overlap(self, interval: 'Interval') -> bool:\n \n if self.begin < interval.end and interval.begin < self.end:\n return True\n return False", "docstring": "Check if self has overlap with `interval`.\n\nArgs:\ninterval: interval to be examined\n\nReturns:\nbool: True if self has overlap with `interval` otherwise False", "source": "juraj-google-style"} {"code": "def _Open(self, path_spec=None, mode='rb'):\n \n if not path_spec:\n raise ValueError('Missing path specification.')\n\n if path_spec.HasParent():\n raise errors.PathSpecError('Unsupported path specification with parent.')\n\n location = getattr(path_spec, 'location', None)\n\n if location is None:\n raise errors.PathSpecError('Path specification missing location.')\n\n \n \n try:\n is_device = pysmdev.check_device(location)\n except IOError as exception:\n \n \n \n \n\n \n exception_string = str(exception)\n if not isinstance(exception_string, py2to3.UNICODE_TYPE):\n exception_string = py2to3.UNICODE_TYPE(\n exception_string, errors='replace')\n\n if ' access denied ' in exception_string:\n raise errors.AccessError(\n 'Access denied to file: {0:s} with error: {1!s}'.format(\n location, exception_string))\n is_device = False\n\n if not is_device:\n try:\n stat_info = os.stat(location)\n except OSError as exception:\n raise IOError('Unable to open file with error: {0!s}.'.format(\n exception))\n\n \n \n if stat.S_ISCHR(stat_info.st_mode) or stat.S_ISBLK(stat_info.st_mode):\n is_device = True\n\n if is_device:\n self._file_object = pysmdev.handle()\n self._file_object.open(location, mode=mode)\n self._size = self._file_object.media_size\n\n else:\n self._file_object = open(location, mode=mode)\n self._size = stat_info.st_size", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"} {"code": "def get_reaction(self, reactants, products):\n return self._make_request('/reaction', payload={'reactants[]': reactants, 'products[]': products}, mp_decode=False)", "docstring": "Gets a reaction from the Materials Project.\n\nArgs:\nreactants ([str]): List of formulas\nproducts ([str]): List of formulas\n\nReturns:\nrxn", "source": "codesearchnet"} {"code": "def get_placeholders(arg, check_duplicates=False):\n \n placeholders = []\n last_match = None\n arg = normalize_placeholders(arg)\n for cur_match in re.finditer(r'\\s*{{|}}\\s*', arg):\n matched_text = cur_match.group().strip()\n if not last_match and matched_text == '{{':\n last_match = cur_match\n continue\n\n last_matched_text = '' if not last_match else last_match.group().strip()\n \n if (not last_matched_text and matched_text == '}}') or (last_matched_text == '{{' and matched_text != '}}'):\n raise CLIError(PLACEHOLDER_BRACKETS_ERROR.format(arg))\n elif last_matched_text == '{{' and matched_text == '}}':\n \n start_index, end_index = last_match.span()[1], cur_match.span()[0]\n placeholders.append(arg[start_index: end_index].strip())\n last_match = None\n\n \n if last_match:\n raise CLIError(PLACEHOLDER_BRACKETS_ERROR.format(arg))\n\n \n if check_duplicates and len(placeholders) != len(set(placeholders)):\n raise CLIError(DUPLICATED_PLACEHOLDER_ERROR.format(arg))\n\n return placeholders", "docstring": "Get all the placeholders' names in order.\nUse the regex below to locate all the opening ({{) and closing brackets (}}).\nAfter that, extract \"stuff\" inside the brackets.\n\nArgs:\narg: The word which this function performs searching on.\ncheck_duplicates: True if we want to check for duplicated positional arguments.\n\nReturns:\nA list of positional arguments in order.", "source": "juraj-google-style"} {"code": "def put(self, key, value):\n key = self._service_key(key)\n self._service_ops['put'](key, value)", "docstring": "Stores the object `value` named by `key` in `service`.\n\nArgs:\nkey: Key naming `value`.\nvalue: the object to store.", "source": "codesearchnet"} {"code": "def take_profit_replace(self, accountID, orderID, **kwargs):\n return self.replace(accountID, orderID, order=TakeProfitOrderRequest(**kwargs))", "docstring": "Shortcut to replace a pending Take Profit Order in an Account\n\nArgs:\naccountID : The ID of the Account\norderID : The ID of the Take Profit Order to replace\nkwargs : The arguments to create a TakeProfitOrderRequest\n\nReturns:\nv20.response.Response containing the results from submitting\nthe request", "source": "codesearchnet"} {"code": "def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)\n return position_ids.unsqueeze(0).expand(input_shape)", "docstring": "We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\nArgs:\ninputs_embeds: torch.Tensor\n\nReturns: torch.Tensor", "source": "github-repos"} {"code": "def __init__(self, source, strict=True):\n \n self._visited_top_module = False\n if not source:\n raise ValueError('The source code of the tree is required.')\n self._source = source\n self._strict = strict\n\n \n \n self._current_lineno = None \n self._current_offset = None \n\n super(LanguageFence, self).__init__()", "docstring": "Creates a LanguageFence.\n\nArgs:\nsource: String, the source code of the AST that will be verified.\nstrict: Boolean, set to False to allow unsafe constructs.\nRaises:\nValueError: if source code has not been supplied.", "source": "juraj-google-style"} {"code": "class PhrasalConstraint(Constraint):\n\n def __init__(self, token_ids: List[int]):\n super(Constraint, self).__init__()\n if not isinstance(token_ids, list) or len(token_ids) == 0:\n raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.')\n if any((not isinstance(token_id, int) or token_id < 0 for token_id in token_ids)):\n raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.')\n self.token_ids = token_ids\n self.seqlen = len(self.token_ids)\n self.fulfilled_idx = -1\n self.completed = False\n\n def advance(self):\n if self.completed:\n return None\n return self.token_ids[self.fulfilled_idx + 1]\n\n def does_advance(self, token_id: int):\n if not isinstance(token_id, int):\n raise TypeError(f'`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}')\n if self.completed:\n return False\n return token_id == self.token_ids[self.fulfilled_idx + 1]\n\n def update(self, token_id: int):\n if not isinstance(token_id, int):\n raise TypeError(f'`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}')\n stepped = False\n completed = False\n reset = False\n if self.does_advance(token_id):\n self.fulfilled_idx += 1\n stepped = True\n if self.fulfilled_idx == self.seqlen - 1:\n completed = True\n self.completed = completed\n else:\n reset = True\n self.reset()\n return (stepped, completed, reset)\n\n def reset(self):\n self.completed = False\n self.fulfilled_idx = 0\n\n def remaining(self):\n return self.seqlen - (self.fulfilled_idx + 1)\n\n def copy(self, stateful=False):\n new_constraint = PhrasalConstraint(self.token_ids)\n if stateful:\n new_constraint.seq_len = self.seqlen\n new_constraint.fulfilled_idx = self.fulfilled_idx\n new_constraint.completed = self.completed\n return new_constraint", "docstring": "[`Constraint`] enforcing that an ordered sequence of tokens is included in the output.\n\nArgs:\ntoken_ids (`List[int]`):\nThe id of the token that must be generated by the output.", "source": "github-repos"} {"code": "def create_lock_key(self, device, new_device_json, id_override=None, type_override=None):\n object_id = (id_override or device.object_id())\n object_type = (type_override or device.object_type())\n url_string = '{}/{}s/{}/keys'.format(self.BASE_URL, object_type, object_id)\n try:\n arequest = requests.post(url_string, data=json.dumps(new_device_json), headers=API_HEADERS)\n response_json = arequest.json()\n return response_json\n except requests.exceptions.RequestException:\n return None", "docstring": "Create a new lock key code.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nnew_device_json (String): The JSON string required to create the device.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\nresponse_json (Dict): The API's response in dictionary format", "source": "codesearchnet"} {"code": "def recover_cfg(self, start=None, end=None, symbols=None, callback=None, arch_mode=None):\n \n \n if arch_mode is None:\n arch_mode = self.binary.architecture_mode\n\n \n self._load(arch_mode=arch_mode)\n\n \n start = start if start else self.binary.entry_point\n\n cfg, _ = self._recover_cfg(start=start, end=end, symbols=symbols, callback=callback)\n\n return cfg", "docstring": "Recover CFG.\n\nArgs:\nstart (int): Start address.\nend (int): End address.\nsymbols (dict): Symbol table.\ncallback (function): A callback function which is called after each successfully recovered CFG.\narch_mode (int): Architecture mode.\n\nReturns:\nControlFlowGraph: A CFG.", "source": "juraj-google-style"} {"code": "async def join(\n self,\n *,\n remote_addrs: Iterable[str],\n listen_addr: str = \"0.0.0.0:2377\",\n join_token: str,\n advertise_addr: str = None,\n data_path_addr: str = None\n ) -> bool:\n \n\n data = {\n \"RemoteAddrs\": list(remote_addrs),\n \"JoinToken\": join_token,\n \"ListenAddr\": listen_addr,\n \"AdvertiseAddr\": advertise_addr,\n \"DataPathAddr\": data_path_addr,\n }\n\n await self.docker._query(\"swarm/join\", method=\"POST\", data=clean_map(data))\n\n return True", "docstring": "Join a swarm.\n\nArgs:\nlisten_addr\nUsed for inter-manager communication\n\nadvertise_addr\nExternally reachable address advertised to other nodes.\n\ndata_path_addr\nAddress or interface to use for data path traffic.\n\nremote_addrs\nAddresses of manager nodes already participating in the swarm.\n\njoin_token\nSecret token for joining this swarm.", "source": "juraj-google-style"} {"code": "def deleted(self, deleted_since, filters=None, params=None):\n return self.tc_requests.deleted(self.api_type, self.api_sub_type, deleted_since, owner=self.owner, filters=filters, params=params)", "docstring": "Gets the indicators deleted.\n\nArgs:\nparams:\nfilters:\ndeleted_since: Date since its been deleted", "source": "codesearchnet"} {"code": "def _get_js_files(cls, extra_files):\n \n return cls._get_media_files(\n packager=Packager(),\n media_packages=getattr(cls, 'js_packages', {}),\n media_type='js',\n extra_files=extra_files)", "docstring": "Return all JavaScript files from the Media class.\n\nArgs:\nextra_files (list):\nThe contents of the Media class's original :py:attr:`js`\nattribute, if one was provided.\n\nReturns:\nlist:\nThe JavaScript files to return for the :py:attr:`js` attribute.", "source": "juraj-google-style"} {"code": "def _decode_quadratic_biases(quadratic_string, edgelist):\n quadratic_bytes = base64.b64decode(quadratic_string)\n return {tuple(edge): bias for (edge, bias) in zip(edgelist, struct.unpack(('<' + ('d' * (len(quadratic_bytes)", "docstring": "Inverse of _serialize_quadratic_biases\n\nArgs:\nquadratic_string (str) : base 64 encoded string of little\nendian 8 byte floats, one for each of the edges.\nedgelist (list): a list of edges of the form [(node1, node2), ...].\n\nReturns:\ndict: J. A dict of the form {edge1: bias1, ...} where each\nedge is of the form (node1, node2).\n\nExample:\n>>> _decode_quadratic_biases('AAAAAAAA8L8AAAAAAADwP5qZmZmZmdk/',\n... [(0, 1), (1, 2), (0, 2)])\n{(0, 1): -1.0, (0, 2): 0.4, (1, 2): 1.0}", "source": "codesearchnet"} {"code": "def convert_to_rgb(image: ImageInput) -> ImageInput:\n requires_backends(convert_to_rgb, ['vision'])\n if not isinstance(image, PIL.Image.Image):\n return image\n if image.mode == 'RGB':\n return image\n image = image.convert('RGBA')\n new_image = PIL.Image.new('RGBA', image.size, 'WHITE')\n new_image.paste(image, (0, 0), image)\n new_image = new_image.convert('RGB')\n return new_image", "docstring": "Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image\nas is.\nArgs:\nimage (Image):\nThe image to convert.", "source": "github-repos"} {"code": "def use_external_data_format(num_parameters: int) -> bool:\n return compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "docstring": "Flag indicating if the model requires using external data format\n\nArgs:\nnum_parameters: Number of parameter on the model\n\nReturns:\nTrue if model.num_parameters() * size_of(float32) >= 2Gb False otherwise", "source": "github-repos"} {"code": "def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str:\n try:\n spec_dict = yaml.load(open(yaml_fn, 'r').read(), Loader=yaml.SafeLoader)\n spec_dict['admin'] = {}\n spec_dict['admin']['version_underscored'] = spec_dict['version'].replace('.', '_')\n spec_dict['admin']['parser_fn'] = yaml_fn.replace('.yaml', '_parser.py')\n add_relations(spec_dict)\n add_functions(spec_dict)\n add_namespaces(spec_dict)\n enhance_function_signatures(spec_dict)\n add_function_signature_help(spec_dict)\n with open(json_fn, 'w') as f:\n json.dump(spec_dict, f)\n except Exception as e:\n log.error('Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.'.format(yaml_fn))\n sys.exit()\n return spec_dict['version']", "docstring": "Enhance BEL specification and save as JSON file\n\nLoad all BEL Specification YAML files and convert to JSON files\nafter enhancing them. Also create a bel_versions.json file with\nall available BEL versions for fast loading.\n\nArgs:\nyaml_fn: original YAML version of BEL Spec\njson_fn: enhanced JSON version of BEL Spec\nReturns:\nstr: version of BEL Spec", "source": "codesearchnet"} {"code": "def GetMessages(self, formatter_mediator, event):\n \n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n trigger_type = event_values.get('trigger_type', None)\n if trigger_type is not None:\n event_values['trigger_type'] = self._TRIGGER_TYPES.get(\n trigger_type, '0x{0:04x}'.format(trigger_type))\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"} {"code": "def etherscan_verify_contract(\n chain_id: int,\n apikey: str,\n source_module: DeploymentModule,\n contract_name: str,\n):\n \n etherscan_api = api_of_chain_id[chain_id]\n deployment_info = get_contracts_deployment_info(\n chain_id=chain_id,\n module=source_module,\n )\n if deployment_info is None:\n raise FileNotFoundError(\n f'Deployment file not found for chain_id={chain_id} and module={source_module}',\n )\n contract_manager = ContractManager(contracts_precompiled_path())\n\n data = post_data_for_etherscan_verification(\n apikey=apikey,\n deployment_info=deployment_info['contracts'][contract_name],\n source=join_sources(source_module=source_module, contract_name=contract_name),\n contract_name=contract_name,\n metadata=json.loads(contract_manager.contracts[contract_name]['metadata']),\n constructor_args=get_constructor_args(\n deployment_info=deployment_info,\n contract_name=contract_name,\n contract_manager=contract_manager,\n ),\n )\n response = requests.post(etherscan_api, data=data)\n content = json.loads(response.content.decode())\n print(content)\n print(f'Status: {content[\"status\"]}; {content[\"message\"]} ; GUID = {content[\"result\"]}')\n\n etherscan_url = etherscan_api.replace('api-', '').replace('api', '')\n etherscan_url += '/verifyContract2?a=' + data['contractaddress']\n manual_submission_guide = f\n\n if content['status'] != '1':\n if content['result'] == 'Contract source code already verified':\n return\n else:\n raise ValueError(\n 'Etherscan submission failed for an unknown reason\\n' +\n manual_submission_guide,\n )\n\n \n guid = content['result']\n status = '0'\n retries = 10\n while status == '0' and retries > 0:\n retries -= 1\n r = guid_status(etherscan_api=etherscan_api, guid=guid)\n status = r['status']\n if r['result'] == 'Fail - Unable to verify':\n raise ValueError(manual_submission_guide)\n if r['result'] == 'Pass - Verified':\n return\n print('Retrying...')\n sleep(5)\n raise TimeoutError(manual_submission_guide)", "docstring": "Calls Etherscan API for verifying the Solidity source of a contract.\n\nArgs:\nchain_id: EIP-155 chain id of the Ethereum chain\napikey: key for calling Etherscan API\nsource_module: a module name to look up contracts_source_path()\ncontract_name: 'TokenNetworkRegistry', 'SecretRegistry' etc.", "source": "juraj-google-style"} {"code": "def delete(self, customer_id, token_id, data={}, **kwargs):\n url = '{}/{}/tokens/{}'.format(self.base_url, customer_id, token_id)\n return self.delete_url(url, data, **kwargs)", "docstring": "Delete Given Token For a Customer\n\nArgs:\ncustomer_id : Customer Id for which tokens have to be deleted\ntoken_id : Id for which TOken object has to be deleted\nReturns:\nDict for deleted token", "source": "codesearchnet"} {"code": "def add_rect(self, width, height, rid=None): \n \n assert(width > 0 and height >0)\n\n \n section, rotated = self._select_fittest_section(width, height)\n if not section:\n return None\n \n if rotated:\n width, height = height, width\n \n \n self._sections.remove(section)\n self._split(section, width, height)\n \n \n rect = Rectangle(section.x, section.y, width, height, rid)\n self.rectangles.append(rect)\n return rect", "docstring": "Add rectangle of widthxheight dimensions.\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\nrid: Optional rectangle user id\n\nReturns:\nRectangle: Rectangle with placemente coordinates\nNone: If the rectangle couldn be placed.", "source": "juraj-google-style"} {"code": "def make_usage_key_from_deprecated_string(self, location_url):\n warnings.warn('make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key', DeprecationWarning, stacklevel=2)\n return BlockUsageLocator.from_string(location_url).replace(run=self.run)", "docstring": "Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location.\n\nNOTE: this prejudicially takes the tag, org, and course from the url not self.\n\nRaises:\nInvalidKeyError: if the url does not parse", "source": "codesearchnet"} {"code": "def _load_generic(packname, package, section, target):\n \n from acorn.config import settings\n spack = settings(packname)\n if spack.has_section(section):\n secitems = dict(spack.items(section))\n for fqdn, active in secitems.items():\n target[fqdn] = active == \"1\"", "docstring": "Loads the settings for generic options that take FQDN and a boolean value\n(1 or 0).\n\nArgs:\npackname (str): name of the package to get config settings for.\npackage: actual package object.", "source": "juraj-google-style"} {"code": "def init_logger(logger_name='sip', log_level=None, p3_mode: bool=True, show_thread: bool=False, propagate: bool=False, show_log_origin=False):\n log = logging.getLogger(logger_name)\n log.propagate = propagate\n for handler in log.handlers:\n log.removeHandler(handler)\n _debug = ('%(filename)s:%(lineno)d | ' if show_log_origin else '')\n if p3_mode:\n _prefix = '%(asctime)s - %(name)s - %(levelname)s'\n if show_thread:\n _format = '{} - %(threadName)s - {}%(message)s'.format(_prefix, _debug)\n else:\n _format = '{} - {}%(message)s'.format(_prefix, _debug)\n formatter = logging.Formatter(_format)\n formatter.converter = time.gmtime\n else:\n _prefix = '%(asctime)s | %(name)s | %(levelname)s'\n if show_thread:\n _format = '{} | %(threadName)s | {}%(message)s'.format(_prefix, _debug)\n else:\n _format = '{} | {}%(message)s'.format(_prefix, _debug)\n formatter = SIPFormatter(_format, datefmt='%Y-%m-%dT%H:%M:%S.%fZ')\n handler = logging.StreamHandler(stream=sys.stdout)\n handler.setFormatter(formatter)\n log.addHandler(handler)\n if log_level:\n log.setLevel(log_level)\n else:\n log.setLevel(os.getenv('SIP_LOG_LEVEL', 'DEBUG'))", "docstring": "Initialise the SIP logger.\n\nAttaches a stdout stream handler to the 'sip' logger. This will\napply to all logger objects with a name prefixed by 'sip.'\n\nThis function respects the 'SIP_LOG_LEVEL' environment variable to\nset the logging level.\n\nArgs:\nlogger_name (str, optional): Name of the logger object.\nlog_level (str or int, optional): Logging level for the SIP logger.\np3_mode (bool, optional): Print logging statements in a format that\nP3 can support.\nshow_thread (bool, optional): Display the thread in the log message.\npropagate (bool, optional): Propagate settings to parent loggers.\nshow_log_origin (boo, optional): If true show the origin\n(file, line no.) of log messages.", "source": "codesearchnet"} {"code": "def create_image_table(self, r=None):\n logger.info('Creating image table...')\n if (r is not None):\n self.r = r\n self.image_table = ImageTable(self)", "docstring": "Create and store a new ImageTable instance based on the current\nDataset. Will generally be called privately, but may be useful as a\nconvenience method in cases where the user wants to re-generate the\ntable with a new smoothing kernel of different radius.\n\nArgs:\nr (int): An optional integer indicating the radius of the smoothing\nkernel. By default, this is None, which will keep whatever\nvalue is currently set in the Dataset instance.", "source": "codesearchnet"} {"code": "def delete_lines(self, lines):\n for (k, i) in enumerate(lines):\n del self[(i - k)]", "docstring": "Delete all lines with given line numbers.\n\nArgs:\nlines (list): List of integers corresponding to line numbers to delete", "source": "codesearchnet"} {"code": "def with_embedding_spec(self, column_name: str='embedding', convert_fn: Optional[Callable[[List[float]], Any]]=None) -> 'ColumnSpecsBuilder':\n\n def value_fn(chunk: Chunk) -> Any:\n if chunk.embedding is None or chunk.embedding.dense_embedding is None:\n raise ValueError(f'Expected chunk to contain embedding. {chunk}')\n values = chunk.embedding.dense_embedding\n if convert_fn:\n return convert_fn(values)\n return '{' + ','.join((str(x) for x in values)) + '}'\n self._specs.append(ColumnSpec.vector(column_name=column_name, value_fn=value_fn))\n return self", "docstring": "Add embedding :class:`.ColumnSpec` with optional conversion.\n\nArgs:\ncolumn_name: Name for the embedding column (defaults to \"embedding\")\nconvert_fn: Optional function to convert the dense embedding values\nIf None, uses default PostgreSQL array format\n\nReturns:\nSelf for method chaining\n\nExample:\n>>> builder.with_embedding_spec(\n... column_name=\"embedding_vector\",\n... convert_fn=lambda values: '{' + ','.join(f\"{x:.4f}\"\n... for x in values) + '}'\n... )", "source": "github-repos"} {"code": "def flatten(inputs, scope=None):\n \n if len(inputs.get_shape()) < 2:\n raise ValueError('Inputs must be have a least 2 dimensions')\n dims = inputs.get_shape()[1:]\n k = dims.num_elements()\n with tf.name_scope(scope, 'Flatten', [inputs]):\n return tf.reshape(inputs, [-1, k])", "docstring": "Flattens the input while maintaining the batch_size.\n\nAssumes that the first dimension represents the batch.\n\nArgs:\ninputs: a tensor of size [batch_size, ...].\nscope: Optional scope for name_scope.\n\nReturns:\na flattened tensor with shape [batch_size, k].\nRaises:\nValueError: if inputs.shape is wrong.", "source": "juraj-google-style"} {"code": "def json_dumps(self, data):\n return json.dumps(data, separators=(',', ':'), sort_keys=True, cls=self.json_encoder, ensure_ascii=False).encode('utf8')", "docstring": "Standardized json.dumps function with separators and sorted keys set\n\nArgs:\ndata (dict or list): data to be dumped\n\nReturns:\nstring: json", "source": "codesearchnet"} {"code": "def guess_depth(packages):\n if (len(packages) == 1):\n return (packages[0].count('.') + 2)\n return (min((p.count('.') for p in packages)) + 1)", "docstring": "Guess the optimal depth to use for the given list of arguments.\n\nArgs:\npackages (list of str): list of packages.\n\nReturns:\nint: guessed depth to use.", "source": "codesearchnet"} {"code": "def create_mock_system_install_device(self, api_level, code_name='REL'):\n self.mock_device.build_info = {'build_version_sdk': bytearray(api_level, 'utf8'), 'build_version_codename': code_name}\n return self.mock_device", "docstring": "Create a mock device with a particular API level.\n\nArgs:\napi_level: A string reflecting the value of the ro.build.version.sdk\nproperty.\ncode_name: The codename of the device's build, defaults to 'REL'\n\nReturns:\nA mock object for the AndroidDevice.", "source": "github-repos"} {"code": "def _use_prototype(self, spec, prototypes):\n \n prototype = spec['based-on']\n del spec['based-on']\n for attr in prototype:\n if attr not in spec:\n spec[attr] = copy.deepcopy(prototype[attr])\n\n return spec", "docstring": "Populates the given spec with the values of it's declared prototype\n\nArgs:\nspec (dict): spec to update\nprototypes (dict): Configuration spec containing the prototypes\n\nReturns:\ndict: updated spec", "source": "juraj-google-style"} {"code": "def _batch_prepare_for_model(self, batch_ids_pairs: List[Tuple[List[int], None]], batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]], batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n batch_outputs = {}\n for input_ids, entity_ids, entity_token_span_pairs in zip(batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs):\n first_ids, second_ids = input_ids\n first_entity_ids, second_entity_ids = entity_ids\n first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs\n outputs = self.prepare_for_model(first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n for key, value in outputs.items():\n if key not in batch_outputs:\n batch_outputs[key] = []\n batch_outputs[key].append(value)\n batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\n\nArgs:\nbatch_ids_pairs: list of tokenized input ids or input ids pairs\nbatch_entity_ids_pairs: list of entity ids or entity ids pairs\nbatch_entity_token_spans_pairs: list of entity spans or entity spans pairs\nmax_entity_length: The maximum length of the entity sequence.", "source": "github-repos"} {"code": "def _html_tree_view_config(cls) -> Dict[str, Any]:\n return {}", "docstring": "Returns the config (rendering arguments) of current extension.\n\nReturns:\nA dictionary of rendering arguments for the subtree. These arguments\nwill override the arguments passed to `view.render()`. See the\n`render()` method for the full list of arguments.", "source": "github-repos"} {"code": "def bind(self, extension: Extension) -> 'DictMentor':\n if (not Extension.is_valid_extension(extension)):\n raise ValueError('Cannot bind extension due to missing interface requirements')\n self._extensions.append(extension)\n return self", "docstring": "Add any predefined or custom extension.\n\nArgs:\nextension: Extension to add to the processor.\n\nReturns:\nThe DictMentor itself for chaining.", "source": "codesearchnet"} {"code": "def _CreateImage(media_service, opener, url):\n \n \n image_data = opener.open(url).read().decode('utf-8')\n image = {\n 'type': 'IMAGE',\n 'data': image_data,\n 'xsi_type': 'Image'\n }\n\n return media_service.upload(image)[0]", "docstring": "Creates an image and uploads it to the server.\n\nArgs:\nmedia_service: a SudsServiceProxy instance for AdWords's MediaService.\nopener: an OpenerDirector instance.\nurl: a str URL used to load image data.\n\nReturns:\nThe image that was successfully uploaded.", "source": "juraj-google-style"} {"code": "def check(self):\n errors = []\n results = []\n for fn in self._files:\n if (not os.path.isdir(fn)):\n try:\n with open(fn, 'r') as f:\n line_ct = 1\n for line in f:\n for word in split_words(line):\n if ((word in self._misspelling_dict) or (word.lower() in self._misspelling_dict)):\n results.append([fn, line_ct, word])\n line_ct += 1\n except UnicodeDecodeError:\n pass\n except IOError:\n errors.append(('%s' % sys.exc_info()[1]))\n return (errors, results)", "docstring": "Checks the files for misspellings.\n\nReturns:\n(errors, results)\nerrors: List of system errors, usually file access errors.\nresults: List of spelling errors - each tuple is filename,\nline number and misspelled word.", "source": "codesearchnet"} {"code": "def get_residue_annotations(self, seq_resnum, seqprop=None, structprop=None, chain_id=None, use_representatives=False):\n if use_representatives:\n if (seqprop and structprop and chain_id):\n raise ValueError('Overriding sequence, structure, and chain IDs with representatives. Set use_representatives to False if custom IDs are to be used.')\n elif ((not seqprop) or (not structprop) or (not chain_id)):\n raise ValueError('Input sequence, structure, and chain to map between, or set use_representatives to True.')\n if use_representatives:\n seqprop = self.representative_sequence\n structprop = self.representative_structure\n chain_id = self.representative_chain\n f = SeqFeature(FeatureLocation((seq_resnum - 1), seq_resnum))\n seq_features = f.extract(seqprop)\n all_info = ssbio.utils.clean_single_dict(indict=seq_features.letter_annotations, prepend_to_keys='seq_', remove_keys_containing='_chain_index')\n all_info['seq_resnum'] = seq_resnum\n all_info['seq_residue'] = str(seq_features.seq)\n if structprop:\n chain = structprop.chains.get_by_id(chain_id)\n mapping_to_structure_resnum = self.map_seqprop_resnums_to_structprop_resnums(resnums=seq_resnum, seqprop=seqprop, structprop=structprop, chain_id=chain_id, use_representatives=use_representatives)\n if (f.location.end.position in mapping_to_structure_resnum):\n struct_resnum = mapping_to_structure_resnum[f.location.end.position]\n struct_f = SeqFeature(FeatureLocation((struct_resnum - 1), struct_resnum))\n struct_seq_features = struct_f.extract(chain.seq_record)\n struct_info = ssbio.utils.clean_single_dict(indict=struct_seq_features.letter_annotations, prepend_to_keys='struct_', remove_keys_containing='structure_resnums')\n struct_info['struct_resnum'] = struct_resnum\n struct_info['struct_residue'] = str(struct_seq_features.seq)\n all_info.update(struct_info)\n if (seq_features.seq != struct_seq_features.seq):\n log.warning('Sequence residue ({}{}) does not match structure residue ({}{}). This may simply be due to differences in the structure'.format(seq_features.seq, seq_resnum, struct_seq_features.seq, struct_resnum))\n return all_info", "docstring": "Get all residue-level annotations stored in the SeqProp ``letter_annotations`` field for a given residue number.\n\nUses the representative sequence, structure, and chain ID stored by default. If other properties from other\nstructures are desired, input the proper IDs. An alignment for the given sequence to the structure must\nbe present in the sequence_alignments list.\n\nArgs:\nseq_resnum (int): Residue number in the sequence\nseqprop (SeqProp): SeqProp object\nstructprop (StructProp): StructProp object\nchain_id (str): ID of the structure's chain to get annotation from\nuse_representatives (bool): If the representative sequence/structure/chain IDs should be used\n\nReturns:\ndict: All available letter_annotations for this residue number", "source": "codesearchnet"} {"code": "def update_panel(store, panel_name, csv_lines, option):\n \n new_genes= []\n panel_obj = store.gene_panel(panel_name)\n if panel_obj is None:\n return None\n try:\n new_genes = parse_genes(csv_lines) \n except SyntaxError as error:\n flash(error.args[0], 'danger')\n return None\n\n \n if option == 'replace':\n \n for gene in panel_obj['genes']:\n \n gene['hgnc_symbol'] = gene['symbol']\n store.add_pending(panel_obj, gene, action='delete', info=None)\n\n for new_gene in new_genes:\n if not new_gene['hgnc_id']:\n flash(\"gene missing hgnc id: {}\".format(new_gene['hgnc_symbol']),'danger')\n continue\n gene_obj = store.hgnc_gene(new_gene['hgnc_id'])\n if gene_obj is None:\n flash(\"gene not found: {} - {}\".format(new_gene['hgnc_id'], new_gene['hgnc_symbol']),'danger')\n continue\n if new_gene['hgnc_symbol'] and gene_obj['hgnc_symbol'] != new_gene['hgnc_symbol']:\n flash(\"symbol mis-match: {0} | {1}\".format(\n gene_obj['hgnc_symbol'], new_gene['hgnc_symbol']), 'warning')\n\n info_data = {\n 'disease_associated_transcripts': new_gene['transcripts'],\n 'reduced_penetrance': new_gene['reduced_penetrance'],\n 'mosaicism': new_gene['mosaicism'],\n 'inheritance_models': new_gene['inheritance_models'],\n 'database_entry_version': new_gene['database_entry_version'],\n }\n if option == 'replace': \n action = 'add'\n else: \n existing_genes = {gene['hgnc_id'] for gene in panel_obj['genes']}\n action = 'edit' if gene_obj['hgnc_id'] in existing_genes else 'add'\n store.add_pending(panel_obj, gene_obj, action=action, info=info_data)\n\n return panel_obj", "docstring": "Update an existing gene panel with genes.\n\nArgs:\nstore(scout.adapter.MongoAdapter)\npanel_name(str)\ncsv_lines(iterable(str)): Stream with genes\noption(str): 'add' or 'replace'\n\nReturns:\npanel_obj(dict)", "source": "juraj-google-style"} {"code": "def SetIndexName(self, index_name):\n self._index_name = index_name\n logger.debug('Elasticsearch index name: {0:s}'.format(index_name))", "docstring": "Set the index name.\n\nArgs:\nindex_name (str): name of the index.", "source": "codesearchnet"} {"code": "class Reshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n super().__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], *operation_utils.compute_reshape_output_shape(input_shape[1:], self.target_shape, 'target_shape'))\n\n def compute_output_spec(self, inputs):\n output_shape = self.compute_output_shape(inputs.shape)\n return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse)\n\n def build(self, input_shape):\n sample_output_shape = operation_utils.compute_reshape_output_shape(input_shape[1:], self.target_shape, 'target_shape')\n self._resolved_target_shape = tuple((-1 if d is None else d for d in sample_output_shape))\n\n def call(self, inputs):\n return ops.reshape(inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Layer that reshapes inputs into the given shape.\n\nArgs:\ntarget_shape: Target shape. Tuple of integers, does not include the\nsamples dimension (batch size).\n\nInput shape:\nArbitrary, although all dimensions in the input shape must be\nknown/fixed. Use the keyword argument `input_shape` (tuple of integers,\ndoes not include the samples/batch size axis) when using this layer as\nthe first layer in a model.\n\nOutput shape:\n`(batch_size, *target_shape)`\n\nExample:\n\n>>> x = keras.Input(shape=(12,))\n>>> y = keras.layers.Reshape((3, 4))(x)\n>>> y.shape\n(None, 3, 4)\n\n>>> # also supports shape inference using `-1` as dimension\n>>> y = keras.layers.Reshape((-1, 2, 2))(x)\n>>> y.shape\n(None, 3, 2, 2)", "source": "github-repos"} {"code": "def delete(self, file_path, branch, commit_message, **kwargs):\n \n path = '%s/%s' % (self.path, file_path.replace('/', '%2F'))\n data = {'branch': branch, 'commit_message': commit_message}\n self.gitlab.http_delete(path, query_data=data, **kwargs)", "docstring": "Delete a file on the server.\n\nArgs:\nfile_path (str): Path of the file to remove\nbranch (str): Branch from which the file will be removed\ncommit_message (str): Commit message for the deletion\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "juraj-google-style"} {"code": "def set_package_releases(self, project_name, versions):\n self.packages[project_name] = sorted(versions, reverse=True)", "docstring": "Storage package information in ``self.packages``\n\nArgs:\nproject_name (str): This will be used as a the key in the\ndictionary.\nversions (list): List of ``str`` representing the available\nversions of a project.", "source": "codesearchnet"} {"code": "def l2_normalize(x, axis=None):\n return nn.l2_normalize(x, axis=axis)", "docstring": "Normalizes a tensor wrt the L2 norm alongside the specified axis.\n\nArgs:\nx: Tensor or variable.\naxis: axis along which to perform normalization.\n\nReturns:\nA tensor.", "source": "github-repos"} {"code": "def set_control_scheme(self, index):\n \n self._current_control_scheme = index % self._num_control_schemes\n self._control_scheme_buffer[0] = self._current_control_scheme", "docstring": "Sets the control scheme for the agent. See :obj:`ControlSchemes`.\n\nArgs:\nindex (int): The control scheme to use. Should be set with an enum from :obj:`ControlSchemes`.", "source": "juraj-google-style"} {"code": "def get(self, key):\n \n self._create_file_if_none_exists()\n with open(self.filename, 'rb') as file_object:\n cache_pickle = pickle.load(file_object)\n val = cache_pickle.get(key, None)\n return val", "docstring": "Gets a value by a key.\n\nArgs:\nkey (str): Key to retrieve the value.\n\nReturns: Retrieved value.", "source": "juraj-google-style"} {"code": "def show(self, displayAll = False):\n\t\t\n\t\tfrom pprint import pprint\n\t\tif displayAll:\n\t\t\tpprint(self.attributes)\n\t\telse:\n\t\t\tdisp_attr = {}\n\t\t\tfor key in self.disp_attr_keys:\n\t\t\t\ttry:\n\t\t\t\t\tdisp_attr[key] = self.attributes[key]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tif key == 'lowercaseEmail':\n\t\t\t\t\t\tdisp_attr[key] = disp_attr['email'].lower()\n\t\t\t\t\telse:\n\t\t\t\t\t\tdisp_attr[key] = None\n\t\t\tpprint(disp_attr)\n\t\tdel pprint", "docstring": "Prints relevant attributes of an object\nArgs:\ndisplayAll\t\tif True displays ALL class attributes.", "source": "juraj-google-style"} {"code": "def find_and_replace_userids(self, text):\n \n\n match = True\n pattern = re.compile('<@([A-Z0-9]{9})>')\n while match:\n match = pattern.search(text)\n if match:\n name = self.get_user_display_name(match.group(1))\n text = re.sub(re.compile(match.group(0)), '@' + name, text)\n\n return text", "docstring": "Finds occurrences of Slack userids and attempts to replace them with\ndisplay names.\n\nArgs:\ntext (string): The message text\nReturns:\nstring: The message text with userids replaced.", "source": "juraj-google-style"} {"code": "def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:\n \n df = pd.DataFrame(index=table.index)\n\n for column in self.columns:\n df = column.update_dataframe(df, table=table, validate=validate)\n\n return df", "docstring": "Return a fully recoded dataframe.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply recoding logic.\nvalidate (bool): If ``True``, recoded table must pass validation tests.", "source": "juraj-google-style"} {"code": "def ResetConsoleAttr(encoding=None):\n return GetConsoleAttr(encoding=encoding, reset=True)", "docstring": "Resets the console attribute state to the console default.\n\nArgs:\nencoding: Reset to this encoding instead of the default.\nascii -- ASCII. This is the default.\nutf8 -- UTF-8 unicode.\nwin -- Windows code page 437.\n\nReturns:\nThe global ConsoleAttr state object.", "source": "github-repos"} {"code": "def match_files(self, file_metas: List[FileMetadata], pattern: str) -> Iterator[FileMetadata]:\n re_pattern = re.compile(self.translate_pattern(pattern))\n match = re_pattern.match\n for file_metadata in file_metas:\n if match(file_metadata.path):\n yield file_metadata", "docstring": "Filter :class:`FileMetadata` objects by *pattern*\n\nArgs:\nfile_metas (list of :class:`FileMetadata`):\nFiles to consider when matching\npattern (str): File pattern\n\nSee Also:\n:meth:`translate_pattern`\n\nReturns:\nGenerator of matching :class:`FileMetadata`", "source": "github-repos"} {"code": "def __init__(self, output, start_height, end_height):\n \n self.Output = output\n self.StartHeight = start_height\n self.EndHeight = end_height", "docstring": "Create instance.\n\nArgs:\noutput (int): the index of the previous output.\nstart_height (int): start block number.\nend_height (int): end block number.", "source": "juraj-google-style"} {"code": "def xcompile(source_code, args=0, optimize=True):\n code = crianza.compile(crianza.parse(source_code), optimize=optimize)\n return crianza.native.compile(code, args=args)", "docstring": "Parses Crianza source code and returns a native Python function.\n\nArgs:\nargs: The resulting function's number of input parameters.\n\nReturns:\nA callable Python function.", "source": "codesearchnet"} {"code": "def load(self):\n df = pd.read_csv(self.input_file, sep=',', quotechar='\"', encoding='utf-8', dtype=object)\n df = df[['NUTS-Code', 'Description']]\n df.columns = ['key', 'name']\n df = df[(df['key'].str.len() == 4)]\n df = df[(df['key'].str[2:] != 'ZZ')]\n return df", "docstring": "Load data, from default location\n\nReturns:\npandas.DataFrame: columns 'key' (NUTS2 code), 'name'", "source": "codesearchnet"} {"code": "def solve_for_fermi_energy(self, temperature, chemical_potentials, bulk_dos):\n \n\n fdos = FermiDos(bulk_dos, bandgap=self.band_gap)\n\n def _get_total_q(ef):\n\n qd_tot = sum([\n d['charge'] * d['conc']\n for d in self.defect_concentrations(\n chemical_potentials=chemical_potentials, temperature=temperature, fermi_level=ef)\n ])\n qd_tot += fdos.get_doping(fermi=ef + self.vbm, T=temperature)\n return qd_tot\n\n return bisect(_get_total_q, -1., self.band_gap + 1.)", "docstring": "Solve for the Fermi energy self-consistently as a function of T\nand p_O2\nObservations are Defect concentrations, electron and hole conc\nArgs:\nbulk_dos: bulk system dos (pymatgen Dos object)\ngap: Can be used to specify experimental gap.\nWill be useful if the self consistent Fermi level\nis > DFT gap\nReturns:\nFermi energy", "source": "juraj-google-style"} {"code": "def _FormatReturnOrExitToken(self, token_data):\n \n error_string = bsmtoken.BSM_ERRORS.get(token_data.status, 'UNKNOWN')\n return {\n 'error': error_string,\n 'token_status': token_data.status,\n 'call_status': token_data.return_value}", "docstring": "Formats a return or exit token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_exit|bsm_token_data_return32|\nbsm_token_data_return64): AUT_EXIT, AUT_RETURN32 or\nAUT_RETURN64 token data.\n\nReturns:\ndict[str, str]: token values.", "source": "juraj-google-style"} {"code": "def process(self, rpc_executor, mark_streamer=None):\n if (self.func is None):\n raise ProcessingFunctionError('No processing function set for node', stream=self.stream)\n results = self.func(*[x[0] for x in self.inputs], rpc_executor=rpc_executor, mark_streamer=mark_streamer)\n if (results is None):\n results = []\n return results", "docstring": "Run this node's processing function.\n\nArgs:\nrpc_executor (RPCExecutor): An object capable of executing RPCs\nin case we need to do that.\nmark_streamer (callable): Function that can be called to manually\nmark a streamer as triggered by index.\n\nReturns:\nlist(IOTileReading): A list of IOTileReadings with the results of\nthe processing function or an empty list if no results were\nproduced", "source": "codesearchnet"} {"code": "def run(self, group_x=1, group_y=1, group_z=1) -> None:\n return self.mglo.run(group_x, group_y, group_z)", "docstring": "Run the compute shader.\n\nArgs:\ngroup_x (int): The number of work groups to be launched in the X dimension.\ngroup_y (int): The number of work groups to be launched in the Y dimension.\ngroup_z (int): The number of work groups to be launched in the Z dimension.", "source": "codesearchnet"} {"code": "def remove(self, *l):\n for a in flatten(l):\n self._remove([self.Inner(a)], self.l)", "docstring": "remove inner from outer\n\nArgs:\n*l element that is passes into Inner init", "source": "codesearchnet"} {"code": "def __add_action(self, relative_directory, action):\n \n generator_action_container = self.__actions.retrieve_element_or_default(relative_directory, None)\n\n if generator_action_container is None:\n generator_action_container = GeneratorActionContainer()\n generator_action_container.add_generator_action(action)\n self.__actions.add_element(location=relative_directory, element=generator_action_container)\n else:\n generator_action_container.add_generator_action(action)", "docstring": "Add action into the dictionary of actions.\n\nArgs:\nrelative_directory:\naction:", "source": "juraj-google-style"} {"code": "def find_test_functions(tree: ast.AST, skip_noqa: bool = False) -> List[ast.FunctionDef]:\n \n function_finder = TestFuncLister(skip_noqa)\n function_finder.visit(tree)\n return function_finder.get_found_funcs()", "docstring": "Collect functions that look like tests.\n\nArgs:\ntree\nskip_noqa: Flag used by command line debugger to skip functions that\nare marked with \"# noqa\". Defaults to ``False``.", "source": "juraj-google-style"} {"code": "def _FormatMessage(template, parameters):\n \n def GetParameter(m):\n try:\n return parameters[int(m.group(0)[1:])]\n except IndexError:\n return INVALID_EXPRESSION_INDEX\n\n parts = template.split('$$')\n return '$'.join(re.sub(r'\\$\\d+', GetParameter, part) for part in parts)", "docstring": "Formats the message. Unescapes '$$' with '$'.\n\nArgs:\ntemplate: message template (e.g. 'a = $0, b = $1').\nparameters: substitution parameters for the format.\n\nReturns:\nFormatted message with parameters embedded in template placeholders.", "source": "juraj-google-style"} {"code": "def get_package_for_module(module):\n \n if isinstance(module, six.string_types):\n try:\n module = sys.modules[module]\n except KeyError:\n return None\n\n try:\n return six.text_type(module.package)\n except AttributeError:\n if module.__name__ == '__main__':\n try:\n file_name = module.__file__\n except AttributeError:\n pass\n else:\n base_name = os.path.basename(file_name)\n split_name = os.path.splitext(base_name)\n if len(split_name) == 1:\n return six.text_type(base_name)\n return u'.'.join(split_name[:-1])\n\n return six.text_type(module.__name__)", "docstring": "Get package name for a module.\n\nHelper calculates the package name of a module.\n\nArgs:\nmodule: Module to get name for. If module is a string, try to find\nmodule in sys.modules.\n\nReturns:\nIf module contains 'package' attribute, uses that as package name.\nElse, if module is not the '__main__' module, the module __name__.\nElse, the base name of the module file name. Else None.", "source": "juraj-google-style"} {"code": "def mkdir_interactive(dirpath):\n \n from benchbuild.utils.cmd import mkdir\n if os.path.exists(dirpath):\n return\n\n response = ui.ask(\n \"The directory {dirname} does not exist yet. \"\n \"Should I create it?\".format(dirname=dirpath),\n default_answer=True,\n default_answer_str=\"yes\")\n\n if response:\n mkdir(\"-p\", dirpath)\n print(\"Created directory {0}.\".format(dirpath))", "docstring": "Create a directory if required.\n\nThis will query the user for a confirmation.\n\nArgs:\ndirname: The path to create.", "source": "juraj-google-style"} {"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"} {"code": "def __process_instr(self, instr, avoid, next_addr, initial_state, execution_state, trace_current):\n if (instr.mnemonic == ReilMnemonic.JCC):\n not_taken_addr = next_addr\n (address, index) = split_address(instr.address)\n logger.debug('[+] Processing branch: {:\n if isinstance(instr.operands[0], ReilRegisterOperand):\n next_ip = self.__process_branch_cond(instr, avoid, initial_state, execution_state, trace_current, not_taken_addr)\n else:\n next_ip = self.__process_branch_uncond(instr, trace_current, not_taken_addr)\n else:\n trace_current += [(instr, None)]\n self.__cpu.execute(instr)\n next_ip = next_addr\n return next_ip", "docstring": "Process a REIL instruction.\n\nArgs:\ninstr (ReilInstruction): Instruction to process.\navoid (list): List of addresses to avoid while executing the code.\nnext_addr (int): Address of the following instruction.\ninitial_state (State): Initial execution state.\nexecution_state (Queue): Queue of execution states.\ntrace_current (list): Current trace.\n\nReturns:\nint: Returns the next address to execute.", "source": "codesearchnet"} {"code": "def _GetSources(self, event_object):\n \n try:\n source_short, source_long = (\n formatters_manager.FormattersManager.GetSourceStrings(event_object))\n except KeyError as exception:\n logging.warning(\n 'Unable to correctly assemble event with error: {0!s}'.format(\n exception))\n\n return source_short, source_long", "docstring": "Returns properly formatted source strings.\n\nArgs:\nevent_object: the event object (instance od EventObject).", "source": "juraj-google-style"} {"code": "def bespoke_md5(self, md5):\n r = requests.post('http:\n self._output(r.text)", "docstring": "Performs Bespoke MD5 lookup on an MD5.\n\nArgs:\nmd5 - A hash.", "source": "codesearchnet"} {"code": "def map_vals(func, dict_):\n if (not hasattr(func, '__call__')):\n func = func.__getitem__\n keyval_list = [(key, func(val)) for (key, val) in six.iteritems(dict_)]\n dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)\n newdict = dictclass(keyval_list)\n return newdict", "docstring": "applies a function to each of the keys in a dictionary\n\nArgs:\nfunc (callable): a function or indexable object\ndict_ (dict): a dictionary\n\nReturns:\nnewdict: transformed dictionary\n\nCommandLine:\npython -m ubelt.util_dict map_vals\n\nExample:\n>>> import ubelt as ub\n>>> dict_ = {'a': [1, 2, 3], 'b': []}\n>>> func = len\n>>> newdict = ub.map_vals(func, dict_)\n>>> assert newdict == {'a': 3, 'b': 0}\n>>> print(newdict)\n>>> # Can also use indexables as `func`\n>>> dict_ = {'a': 0, 'b': 1}\n>>> func = [42, 21]\n>>> newdict = ub.map_vals(func, dict_)\n>>> assert newdict == {'a': 42, 'b': 21}\n>>> print(newdict)", "source": "codesearchnet"} {"code": "def SetSerializersProfiler(self, serializers_profiler):\n self._serializers_profiler = serializers_profiler\n if self._storage_file:\n self._storage_file.SetSerializersProfiler(serializers_profiler)", "docstring": "Sets the serializers profiler.\n\nArgs:\nserializers_profiler (SerializersProfiler): serializers profiler.", "source": "codesearchnet"} {"code": "def tanh(x):\n if any_symbolic_tensors((x,)):\n return Tanh().symbolic_call(x)\n return backend.numpy.tanh(x)", "docstring": "Hyperbolic tangent, element-wise.\n\nArguments:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"} {"code": "def __init__(self, sync_frequency=1, update_weight=1.0, scope='synchronization', summary_labels=()):\n \n assert isinstance(sync_frequency, int) and sync_frequency > 0\n self.sync_frequency = sync_frequency\n\n assert isinstance(update_weight, float) and update_weight > 0.0\n self.update_weight = update_weight\n\n super(Synchronization, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new synchronization optimizer instance.\n\nArgs:\nsync_frequency: The interval between optimization calls actually performing a\nsynchronization step.\nupdate_weight: The update weight, 1.0 meaning a full assignment of the source\nvariables values.", "source": "juraj-google-style"} {"code": "def create_card(self, card_json):\n return trolly.card.Card(trello_client=self, card_id=card_json['id'], name=card_json['name'], data=card_json)", "docstring": "Create a Card object from JSON object\n\nReturns:\nCard: The card from the given `card_json`.", "source": "codesearchnet"} {"code": "def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value, gdal_type=GDT_Float32):\n UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))\n driver = gdal_GetDriverByName(str('GTiff'))\n try:\n ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)\n except Exception:\n print(('Cannot create output file %s' % f_name))\n return\n ds.SetGeoTransform(geotransform)\n try:\n ds.SetProjection(srs.ExportToWkt())\n except (AttributeError or Exception):\n ds.SetProjection(srs)\n ds.GetRasterBand(1).SetNoDataValue(nodata_value)\n if (isinstance(data, numpy.ndarray) and (data.dtype in [numpy.dtype('int'), numpy.dtype('float')])):\n data = numpy.where(numpy.isnan(data), nodata_value, data)\n ds.GetRasterBand(1).WriteArray(data)\n ds = None", "docstring": "Output Raster to GeoTiff format file.\n\nArgs:\nf_name: output gtiff file name.\nn_rows: Row count.\nn_cols: Col count.\ndata: 2D array data.\ngeotransform: geographic transformation.\nsrs: coordinate system.\nnodata_value: nodata value.\ngdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,\nGDT_Float32 as default.", "source": "codesearchnet"} {"code": "def __init__(self, datastore_client):\n \n super(AversarialBatches, self).__init__(\n datastore_client=datastore_client,\n entity_kind_batches=KIND_ADVERSARIAL_BATCH,\n entity_kind_images=KIND_ADVERSARIAL_IMAGE)", "docstring": "Initializes AversarialBatches.\n\nArgs:\ndatastore_client: instance of CompetitionDatastoreClient", "source": "juraj-google-style"} {"code": "def has_mixture_channel(val: Any) -> bool:\n mixture_getter = getattr(val, '_has_mixture_', None)\n result = (NotImplemented if (mixture_getter is None) else mixture_getter())\n if (result is not NotImplemented):\n return result\n result = has_unitary(val)\n if ((result is not NotImplemented) and result):\n return result\n return (mixture_channel(val, None) is not None)", "docstring": "Returns whether the value has a mixture channel representation.\n\nIn contrast to `has_mixture` this method falls back to checking whether\nthe value has a unitary representation via `has_channel`.\n\nReturns:\nIf `val` has a `_has_mixture_` method and its result is not\nNotImplemented, that result is returned. Otherwise, if `val` has a\n`_has_unitary_` method and its results is not NotImplemented, that\nresult is returned. Otherwise, if the value has a `_mixture_` method\nthat is not a non-default value, True is returned. Returns False if none\nof these functions.", "source": "codesearchnet"} {"code": "def __init__(self, *value):\n \n tag = self.__class__.__name__.replace('Single', '').lower()\n super().__init__(tag, value)", "docstring": "init\n\nset self.tag to classname. e.g.::\n\nArraySingle.tag -> 'array'\n\nArgs:\n*value: the elements you want to put into single's value(list), can be one element or several seperate by comma, or put into a list or combination of those. *value will be flattend to a single one deminision list. In subclasses' init, raw data should be converted to single if needed according to specific subclass.", "source": "juraj-google-style"} {"code": "def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n key_loss_fns = {losses.mean_squared_error, losses.binary_crossentropy, losses.categorical_crossentropy}\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy, losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None or tensor_util.is_tf_type(y):\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError('You are passing a target array of shape ' + str(y.shape) + ' while using as loss `categorical_crossentropy`. `categorical_crossentropy` expects targets to be binary matrices (1s and 0s) of shape (samples, classes). If your targets are integer classes, you can convert them to the expected format via:\\n```\\nfrom keras.utils import to_categorical\\ny_binary = to_categorical(y_int)\\n```\\n\\nAlternatively, you can use the loss function `sparse_categorical_crossentropy` instead, which does expect integer targets.')\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if isinstance(loss, key_loss_classes) or (is_loss_wrapper and loss.fn in key_loss_fns):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss_name + '`. This loss expects targets to have the same shape as the output.')", "docstring": "Does validation on the compatibility of targets and loss functions.\n\nThis helps prevent users from using loss functions incorrectly. This check\nis purely for UX purposes.\n\nArgs:\ntargets: list of Numpy arrays of targets.\nloss_fns: list of loss functions.\noutput_shapes: list of shapes of model outputs.\n\nRaises:\nValueError: if a loss function or target array\nis incompatible with an output.", "source": "github-repos"} {"code": "def check_hardware(self, expected):\n \n\n if len(expected) < 10:\n expected += '\\0'*(10 - len(expected))\n\n err, = self.rpc(0x00, 0x03, expected, result_format=\"L\")\n if err == 0:\n return True\n\n return False", "docstring": "Make sure the hardware version is what we expect.\n\nThis convenience function is meant for ensuring that we are talking to\na tile that has the correct hardware version.\n\nArgs:\nexpected (str): The expected hardware string that is compared\nagainst what is reported by the hardware_version RPC.\n\nReturns:\nbool: true if the hardware is the expected version, false otherwise", "source": "juraj-google-style"} {"code": "def wake_up(func):\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n\n def valid_result(result):\n \"Check if TeslaAPI result succesful.\\n\\n Parameters\\n ----------\\n result : tesla API result\\n This is the result of a Tesla Rest API call.\\n\\n Returns\\n -------\\n bool\\n Tesla API failure can be checked in a dict with a bool in\\n ['response']['result'], a bool, or None or\\n ['response']['reason'] == 'could_not_wake_buses'\\n Returns true when a failure state not detected.\\n\\n \"\n try:\n return ((result is not None) and (result is not False) and ((result is True) or ((isinstance(result, dict) and isinstance(result['response'], dict) and (('result' in result['response']) and (result['response']['result'] is True))) or (('reason' in result['response']) and (result['response']['reason'] != 'could_not_wake_buses')) or ('result' not in result['response']))))\n except TypeError as exception:\n _LOGGER.error('Result: %s, %s', result, exception)\n retries = 0\n sleep_delay = 2\n inst = args[0]\n vehicle_id = args[1]\n result = None\n if ((vehicle_id is not None) and (vehicle_id in inst.car_online) and inst.car_online[vehicle_id]):\n try:\n result = func(*args, **kwargs)\n except TeslaException:\n pass\n if valid_result(result):\n return result\n _LOGGER.debug('wake_up needed for %s -> %s \\nInfo: args:%s, kwargs:%s, vehicle_id:%s, car_online:%s', func.__name__, result, args, kwargs, vehicle_id, inst.car_online)\n inst.car_online[vehicle_id] = False\n while (('wake_if_asleep' in kwargs) and kwargs['wake_if_asleep'] and ((vehicle_id is None) or ((vehicle_id is not None) and (vehicle_id in inst.car_online) and (not inst.car_online[vehicle_id])))):\n result = inst._wake_up(vehicle_id)\n _LOGGER.debug('%s(%s): Wake Attempt(%s): %s', func.__name__, vehicle_id, retries, result)\n if (not result):\n if (retries < 5):\n time.sleep((sleep_delay ** (retries + 2)))\n retries += 1\n continue\n else:\n inst.car_online[vehicle_id] = False\n raise RetryLimitError\n else:\n break\n retries = 0\n while True:\n try:\n result = func(*args, **kwargs)\n _LOGGER.debug('%s(%s): Retry Attempt(%s): %s', func.__name__, vehicle_id, retries, result)\n except TeslaException:\n pass\n finally:\n retries += 1\n time.sleep((sleep_delay ** (retries + 1)))\n if valid_result(result):\n return result\n if (retries >= 5):\n raise RetryLimitError\n return wrapped", "docstring": "Wrap a API f so it will attempt to wake the vehicle if asleep.\n\nThe command f is run once if the vehicle_id was last reported\nonline. Assuming f returns None and wake_if_asleep is True, 5 attempts\nwill be made to wake the vehicle to reissue the command. In addition,\nif there is a `could_not_wake_buses` error, it will retry the command\n\nArgs:\ninst (Controller): The instance of a controller\nvehicle_id (string): The vehicle to attempt to wake.\nTODO: This currently requires a vehicle_id, but update() does not; This\nshould also be updated to allow that case\nwake_if_asleep (bool): Keyword arg to force a vehicle awake. Must be\nset in the wrapped function f\nThrows:\nRetryLimitError", "source": "codesearchnet"} {"code": "def rematerialized_call(self, layer_call, *args, **kwargs):\n\n def compute_size(x):\n return math.prod([d or 1 for d in x.shape]) if isinstance(x, KerasTensor) else 0\n if self._remat_mode.mode == 'full':\n return remat.remat(layer_call)\n elif self._remat_mode.mode == 'list_of_layers' and self.name in self._remat_mode.layer_names:\n return remat.remat(layer_call)\n elif self._remat_mode.mode == 'larger_than':\n output_spec = self.compute_output_spec(*args, **kwargs)\n output_size = sum(tree.flatten(tree.map_structure(compute_size, output_spec)))\n if output_size and output_size > self._remat_mode.output_size_threshold:\n return remat.remat(layer_call)\n elif self._remat_mode.mode == 'activations':\n has_activation = hasattr(self, 'activation') and self.activation is not None\n if has_activation:\n\n @functools.wraps(layer_call)\n def rematerialized_activation_call_wrapper(*args, **kwargs):\n original_activation = self.activation\n self.activation = remat.remat(original_activation)\n try:\n return layer_call(*args, **kwargs)\n finally:\n self.activation = original_activation\n return rematerialized_activation_call_wrapper\n return layer_call", "docstring": "Enable rematerialization dynamically for layer's call method.\n\nArgs:\nlayer_call: The original `call` method of a layer.\n\nReturns:\nRematerialized layer's `call` method.", "source": "github-repos"} {"code": "def update_additional_charge(self, *, recurring_billing_id, description, plan_value, plan_tax, plan_tax_return_base,\n currency):\n \n payload = {\n \"description\": description,\n \"additionalValues\": [\n {\n \"name\": \"ITEM_VALUE\",\n \"value\": plan_value,\n \"currency\": currency\n },\n {\n \"name\": \"ITEM_TAX\",\n \"value\": plan_tax,\n \"currency\": currency\n },\n {\n \"name\": \"ITEM_TAX_RETURN_BASE\",\n \"value\": plan_tax_return_base,\n \"currency\": currency\n }\n ]\n }\n fmt = 'recurringBillItems/{}'.format(recurring_billing_id)\n return self.client._put(self.url + fmt, payload=payload, headers=self.get_headers())", "docstring": "Updates the information from an additional charge in an invoice.\n\nArgs:\nrecurring_billing_id: Identifier of the additional charge.\ndescription:\nplan_value:\nplan_tax:\nplan_tax_return_base:\ncurrency:\n\nReturns:", "source": "juraj-google-style"} {"code": "def cdnode(self, astr_path):\n \n\n \n \n l_absPath = []\n b_valid, l_absPath = self.b_pathInTree(astr_path)\n if b_valid:\n \n self.l_cwd = l_absPath[:]\n self.snode_current = self.snode_root\n self.sbranch_current = self.sbranch_root\n \n for node in l_absPath[1:]:\n self.snode_current = self.snode_current.d_nodes[node]\n self.sbranch_current.dict_branch = self.snode_current.snode_parent.d_nodes\n return {\"status\": True, \"path\": self.l_cwd}\n return {\"status\": False, \"path\": []}", "docstring": "Change working node to astr_path.\n\nThe path is converted to a list, split on '/'. By performing a 'cd'\nall parent and derived nodes need to be updated relative to\nnew location.\n\nArgs:\nastr_path (string): The path to cd to.\n\nReturns:\n{\"status\" : True/False , \"path\": l_cwd -- the path as list}", "source": "juraj-google-style"} {"code": "def with_options(self, options, name=None) -> 'DatasetV2':\n return _OptionsDataset(self, options, name=name)", "docstring": "Returns a new `tf.data.Dataset` with the given options set.\n\nThe options are \"global\" in the sense they apply to the entire dataset.\nIf options are set multiple times, they are merged as long as different\noptions do not use different non-default values.\n\n>>> ds = tf.data.Dataset.range(5)\n>>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),\n... cycle_length=3,\n... num_parallel_calls=3)\n>>> options = tf.data.Options()\n>>> # This will make the interleave order non-deterministic.\n>>> options.deterministic = False\n>>> ds = ds.with_options(options)\n\nArgs:\noptions: A `tf.data.Options` that identifies the options the use.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA new `Dataset` with the transformation applied as described above.\n\nRaises:\nValueError: when an option is set more than once to a non-default value", "source": "github-repos"} {"code": "def form_to_params(fn=None, return_json=True):\n \n def forms_to_params_decorator(fn):\n @handle_type_error\n @wraps(fn)\n def forms_to_params_wrapper(*args, **kwargs):\n kwargs.update(\n dict(request.forms)\n )\n\n if not return_json:\n return fn(*args, **kwargs)\n\n return encode_json_body(\n fn(*args, **kwargs)\n )\n\n return forms_to_params_wrapper\n\n if fn: \n return forms_to_params_decorator(fn)\n\n return forms_to_params_decorator", "docstring": "Convert bottle forms request to parameters for the wrapped function.\n\nArgs:\nreturn_json (bool, default True): Should the decorator automatically\nconvert returned value to JSON?", "source": "juraj-google-style"} {"code": "def request(\n self, main_type, sub_type, result_limit, result_start, owner=None, filters=None, params=None\n ):\n \n params = params or {}\n\n if owner:\n params['owner'] = owner\n if filters and filters.filters:\n params['filters'] = filters.filters_string\n params['resultLimit'] = result_limit or params.get('result_limit', self.result_limit)\n params['resultStart'] = result_start or params.get('result_start', 0)\n if not sub_type:\n url = '/v2/{}'.format(main_type)\n else:\n url = '/v2/{}/{}'.format(main_type, sub_type)\n\n return self.tcex.session.get(url, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nresult_limit:\nresult_start:\nowner:\nfilters:\nparams:\n\nReturn:", "source": "juraj-google-style"} {"code": "def assign(self, droplet_id):\n return self.get_data(('floating_ips/%s/actions/' % self.ip), type=POST, params={'type': 'assign', 'droplet_id': droplet_id})", "docstring": "Assign a FloatingIP to a Droplet.\n\nArgs:\ndroplet_id: int - droplet id", "source": "codesearchnet"} {"code": "def __main():\n \n if len(sys.argv) < 3:\n sys.stderr.write('usage: {0} \\n'.format(sys.argv[0]))\n sys.exit(64)\n user_pkgs = False\n version_only = False\n if six.text_type(sys.argv[1]) == 'list':\n version_only = True\n if six.text_type(sys.argv[2]) == 'system+user':\n user_pkgs = True\n import salt.utils.json\n import timeit\n\n def run():\n \n pkg_list = WinSoftware(user_pkgs=user_pkgs, version_only=version_only)\n print(salt.utils.json.dumps(pkg_list.data, sort_keys=True, indent=4)) \n print('Total: {}'.format(len(pkg_list))) \n\n print('Time Taken: {}'.format(timeit.timeit(run, number=1)))", "docstring": "This module can also be run directly for testing\nArgs:\ndetail|list : Provide ``detail`` or version ``list``.\nsystem|system+user: System installed and System and User installs.", "source": "juraj-google-style"} {"code": "def diffuse_horizontal_illuminance(self, value=999999.0):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `diffuse_horizontal_illuminance`'.format(value))\n if (value < 0.0):\n raise ValueError('value need to be greater or equal 0.0 for field `diffuse_horizontal_illuminance`')\n self._diffuse_horizontal_illuminance = value", "docstring": "Corresponds to IDD Field `diffuse_horizontal_illuminance`\nwill be missing if >= 999900\n\nArgs:\nvalue (float): value for IDD Field `diffuse_horizontal_illuminance`\nUnit: lux\nvalue >= 0.0\nMissing value: 999999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def noisy_operation(self, operation: 'cirq.Operation') -> 'cirq.OP_TREE':\n if (not hasattr(self.noisy_moments, '_not_overridden')):\n return self.noisy_moments([ops.Moment([operation])], operation.qubits)\n if (not hasattr(self.noisy_moment, '_not_overridden')):\n return self.noisy_moment(ops.Moment([operation]), operation.qubits)\n assert False, 'Should be unreachable.'", "docstring": "Adds noise to an individual operation.\n\nArgs:\noperation: The operation to make noisy.\n\nReturns:\nAn OP_TREE corresponding to the noisy operations implementing the\nnoisy version of the given operation.", "source": "codesearchnet"} {"code": "def _is_subscribed_identity(tensor):\n if tensor.op.type != 'Identity':\n return False\n match = re.match('(?P^.*?)/subscription/Identity[^/]+', tensor.name)\n if match is None or len(match.groups()) != 1:\n return False\n prefix_name = match.group('prefix_name')\n assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(tensor.op.name)\n source_tensor = tensor.op.inputs[0]\n if prefix_name != source_tensor.op.name:\n return False\n return True", "docstring": "Checks if the given tensor is an identity op returned by `subscribe()`.\n\nArgs:\ntensor: A `tf.Tensor` to check.\n\nReturns:\nTrue if the given tensor matches the criteria for subscription identities:\nits op type is `Identity`, its name matches the name of its input and\nconforms to the convention for subscribed nodes.\nFalse otherwise.", "source": "github-repos"} {"code": "def update(self, resource, timeout=(- 1)):\n return self._client.update(resource, timeout=timeout, default_values=self.DEFAULT_VALUES)", "docstring": "Updates only name for the Artifact Bundle.\n\nArgs:\nresource (dict): Object to update.\ntimeout:\nTimeout in seconds. Waits for task completion by default. The timeout does not abort the operation\nin OneView, it just stops waiting for its completion.\n\nReturns:\ndict: Updated resource.", "source": "codesearchnet"} {"code": "def get_matching_text(string_list, match_min_size=30, ignore='', end_characters='.!\\r\\n'):\n \n \n a = string_list[0]\n for i in range(1, len(string_list)):\n b = string_list[i]\n result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore,\n end_characters=end_characters)\n a = ''.join(result)\n return a", "docstring": "Returns a string containing matching blocks of text in a list of strings followed by non-matching.\n\nArgs:\nstring_list (List[str]): List of strings to match\nmatch_min_size (int): Minimum block size to match on. Defaults to 30.\nignore (str): Any characters to ignore in matching. Defaults to ''.\nend_characters (str): End characters to look for. Defaults to '.\\r\\n'.\n\nReturns:\nstr: String containing matching blocks of text followed by non-matching", "source": "juraj-google-style"} {"code": "def capture_insert_from_model(cls, table_name, record_id, *, exclude_fields=()):\n exclude_cols = ()\n if exclude_fields:\n model_cls = get_connected_model_for_table_name(table_name)\n exclude_cols = cls._fieldnames_to_colnames(model_cls, exclude_fields)\n raw_query = sql.SQL('\\n SELECT {schema}.hc_capture_insert_from_row(\\n hstore({schema}.{table_name}.*),\\n %(table_name)s,\\n ARRAY[{exclude_cols}]::text[] -- cast to type expected by stored procedure\\n ) AS id\\n FROM {schema}.{table_name}\\n WHERE id = %(record_id)s\\n ').format(schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA), table_name=sql.Identifier(table_name), exclude_cols=sql.SQL(', ').join((sql.Identifier(col) for col in exclude_cols)))\n params = {'record_id': record_id, 'table_name': table_name}\n result_qs = TriggerLog.objects.raw(raw_query, params)\n return list(result_qs)", "docstring": "Create a fresh insert record from the current model state in the database.\n\nFor read-write connected models, this will lead to the attempted creation of a\ncorresponding object in Salesforce.\n\nArgs:\ntable_name (str): The name of the table backing the connected model (without schema)\nrecord_id (int): The primary id of the connected model\nexclude_fields (Iterable[str]): The names of fields that will not be included in the\nwrite record\n\nReturns:\nA list of the created TriggerLog entries (usually one).\n\nRaises:\nLookupError: if ``table_name`` does not belong to a connected model", "source": "codesearchnet"} {"code": "def __add__(self, other: Any) -> 'KeyPath':\n if other is None:\n return self\n if isinstance(other, str):\n other = KeyPath.parse(other)\n elif isinstance(other, KeyPathSet):\n other = other.copy()\n other.rebase(self)\n return other\n elif not isinstance(other, KeyPath):\n other = KeyPath(other)\n assert isinstance(other, KeyPath)\n return KeyPath(other.keys, self)", "docstring": "Concatenates a KeyPath equivalent object.\n\nArgs:\nother: Object to add, which can be None, int (as a 1-level KeyPath),\nstring (parsed as a KeyPath), a KeyPath object, or any other object as\na single key.\n\nReturns:\nNewly concatenated KeyPath.\n\nRaises:\nValueError: If other is a string that cannot be parsed into a KeyPath.", "source": "github-repos"} {"code": "def get(self, id):\n request_url = (self._client.base_api_url + self.detail_url.format(id=id))\n response = self._client.session.get(request_url)\n self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_200_OK)\n return self.response_data_to_model_instance(response.json())", "docstring": "Get the model instance with a given id.\n\nArgs:\nid (int or str): The primary identifier (e.g., pk or UUID)\nfor the task instance to get.\n\nReturns:\n:class:`saltant.models.resource.Model`:\nA :class:`saltant.models.resource.Model` subclass\ninstance representing the resource requested.", "source": "codesearchnet"} {"code": "def imatch_any(patterns, name):\n \n \n if not patterns:\n return True\n return any(imatch(pattern, name) for pattern in patterns)", "docstring": "Test if a name matches any of a list of patterns (case insensitive).\n\nWill return `True` if ``patterns`` is an empty list.\n\nArguments:\npatterns (list): A list of wildcard pattern, e.g ``[\"*.py\",\n\"*.pyc\"]``\nname (str): A filename.\n\nReturns:\nbool: `True` if the name matches at least one of the patterns.", "source": "juraj-google-style"} {"code": "def decode_event(self, log_topics, log_data):\n \n \n\n \n \n if not len(log_topics) or log_topics[0] not in self.event_data:\n raise ValueError('Unknown log type')\n\n event_id_ = log_topics[0]\n\n event = self.event_data[event_id_]\n\n \n \n \n \n unindexed_types = [\n type_\n for type_, indexed in zip(event['types'], event['indexed'])\n if not indexed\n ]\n unindexed_args = decode_abi(unindexed_types, log_data)\n\n \n \n indexed_count = 1 \n\n result = {}\n for name, type_, indexed in zip(\n event['names'], event['types'], event['indexed']):\n if indexed:\n topic_bytes = utils.zpad(\n utils.encode_int(log_topics[indexed_count]),\n 32,\n )\n indexed_count += 1\n value = decode_single(process_type(type_), topic_bytes)\n else:\n value = unindexed_args.pop(0)\n\n result[name] = value\n result['_event_type'] = utils.to_string(event['name'])\n\n return result", "docstring": "Return a dictionary representation the log.\n\nNote:\nThis function won't work with anonymous events.\n\nArgs:\nlog_topics (List[bin]): The log's indexed arguments.\nlog_data (bin): The encoded non-indexed arguments.", "source": "juraj-google-style"} {"code": "def _get_subcommand(name):\n \n \n _LOGGER.debug('Accessing subcommand \"%s\".', name)\n if name not in settings.subcommands:\n raise ValueError(\n '\"{subcommand}\" is not a {command} command. \\'{command} help -a\\' '\n 'lists all available subcommands.'.format(\n command=settings.command, subcommand=name)\n )\n return settings.subcommands[name]", "docstring": "Return the function for the specified subcommand.\n\nArgs:\nname: The name of a subcommand.\n\nReturns:\nThe loadable object from the entry point represented by the subcommand.", "source": "juraj-google-style"} {"code": "def GetDataByPath(self, path):\n \n _, path_data = self._paths.get(path, (None, None))\n return path_data", "docstring": "Retrieves the data associated to a path.\n\nArgs:\npath (str): path of the file entry.\n\nReturns:\nbytes: data or None if not available.", "source": "juraj-google-style"} {"code": "def dict2str(self, d: Dict, joiner: str) -> str:\n result = str()\n for key in d:\n result = ((result + str(key)) + ' : ')\n if isinstance(d[key], list):\n result = ((result + self.list2str(d[key], joiner)) + joiner)\n elif isinstance(d[key], dict):\n result = ((result + self.dict2str(d[key], joiner)) + joiner)\n elif d[key]:\n result = ((result + str(d[key])) + joiner)\n return result", "docstring": "Convert dict to str as input for tokenizer\n\nArgs:\nd (dict): dict for converting\njoiner (str): join the elements using this string to separate them.\n\nReturns: the value of the dict as a string", "source": "codesearchnet"} {"code": "def read_until(self, expected_commands, timeout):\n msg = timeouts.loop_until_timeout_or_valid(timeout, (lambda : self.read_message(timeout)), (lambda m: (m.command in expected_commands)), 0)\n if (msg.command not in expected_commands):\n raise usb_exceptions.AdbTimeoutError('Timed out establishing connection, waiting for: %s', expected_commands)\n return msg", "docstring": "Read AdbMessages from this transport until we get an expected command.\n\nThe ADB protocol specifies that before a successful CNXN handshake, any\nother packets must be ignored, so this method provides the ability to\nignore unwanted commands. It's primarily used during the initial\nconnection to the device. See Read() for more details, including more\nexceptions that may be raised.\n\nArgs:\nexpected_commands: Iterable of expected command responses, like\n('CNXN', 'AUTH').\ntimeout: timeouts.PolledTimeout object to use for timeout.\n\nReturns:\nThe ADB message received that matched one of expected_commands.\n\nRaises:\nAdbProtocolError: If timeout expires between reads, this can happen\nif we are getting spammed with unexpected commands.", "source": "codesearchnet"} {"code": "def forall(self, vars_list: List[str]) -> 'TensorFluent':\n \n return self._aggregation_op(tf.reduce_all, self, vars_list)", "docstring": "Returns the TensorFluent for the forall aggregation function.\n\nArgs:\nvars_list: The list of variables to be aggregated over.\n\nReturns:\nA TensorFluent wrapping the forall aggregation function.", "source": "juraj-google-style"} {"code": "def download_file(url, file_path, mkdir=False):\n \n folder, fname = os.path.split(file_path)\n return download_file_by_name(url, folder, fname, mkdir)", "docstring": "Write a string of data to file.\n\nArgs:\nurl: A string to a valid URL.\nfile_path: Full path to intended download location (e.g. c:/ladybug/testPts.pts)\nmkdir: Set to True to create the directory if doesn't exist (Default: False)", "source": "juraj-google-style"} {"code": "def preprocess_input(features, target, train_config, preprocess_output_dir, model_type):\n target_name = train_config['target_column']\n key_name = train_config['key_column']\n with tf.name_scope('numerical_feature_preprocess'):\n if train_config['numerical_columns']:\n numerical_analysis_file = os.path.join(preprocess_output_dir, NUMERICAL_ANALYSIS)\n if (not file_io.file_exists(numerical_analysis_file)):\n raise ValueError(('File %s not found in %s' % (NUMERICAL_ANALYSIS, preprocess_output_dir)))\n numerical_anlysis = json.loads(python_portable_string(file_io.read_file_to_string(numerical_analysis_file)))\n for name in train_config['numerical_columns']:\n if ((name == target_name) or (name == key_name)):\n continue\n transform_config = train_config['transforms'].get(name, {})\n transform_name = transform_config.get('transform', None)\n if (transform_name == 'scale'):\n value = float(transform_config.get('value', 1.0))\n features[name] = _scale_tensor(features[name], range_min=numerical_anlysis[name]['min'], range_max=numerical_anlysis[name]['max'], scale_min=(- value), scale_max=value)\n elif ((transform_name == 'identity') or (transform_name is None)):\n pass\n else:\n raise ValueError(('For numerical variables, only scale and identity are supported: Error for %s' % name))\n if (target is not None):\n with tf.name_scope('target_feature_preprocess'):\n if (target_name in train_config['categorical_columns']):\n labels = train_config['vocab_stats'][target_name]['labels']\n table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)\n target = table.lookup(target)\n with tf.name_scope('categorical_feature_preprocess'):\n for name in train_config['categorical_columns']:\n if ((name == key_name) or (name == target_name)):\n continue\n transform_config = train_config['transforms'].get(name, {})\n transform_name = transform_config.get('transform', None)\n if is_dnn_model(model_type):\n if ((transform_name == 'embedding') or (transform_name == 'one_hot') or (transform_name is None)):\n map_vocab = True\n else:\n raise ValueError(('Unknown transform %s' % transform_name))\n elif is_linear_model(model_type):\n if ((transform_name == 'one_hot') or (transform_name is None)):\n map_vocab = True\n elif (transform_name == 'embedding'):\n map_vocab = False\n else:\n raise ValueError(('Unknown transform %s' % transform_name))\n if map_vocab:\n labels = train_config['vocab_stats'][name]['labels']\n table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)\n features[name] = table.lookup(features[name])\n return (features, target)", "docstring": "Perform some transformations after reading in the input tensors.\n\nArgs:\nfeatures: dict of feature_name to tensor\ntarget: tensor\ntrain_config: our training config object\npreprocess_output_dir: folder should contain the vocab files.\nmodel_type: the tf model type.\n\nRaises:\nValueError: if wrong transforms are used\n\nReturns:\nNew features dict and new target tensor.", "source": "codesearchnet"} {"code": "def get_extension_by_name(cert_obj, extension_name):\n try:\n return cert_obj.extensions.get_extension_for_oid(getattr(cryptography.x509.oid.ExtensionOID, extension_name))\n except cryptography.x509.ExtensionNotFound:\n pass", "docstring": "Get a standard certificate extension by attribute name.\n\nArgs:\ncert_obj: cryptography.Certificate\nCertificate containing a standard extension.\n\nextension_name : str\nExtension name. E.g., 'SUBJECT_DIRECTORY_ATTRIBUTES'.\n\nReturns:\nCryptography.Extension", "source": "codesearchnet"} {"code": "def get_psd_product(self, vector, dtype=None):\n \n \n if dtype is None:\n dtype = self.nn_dtype\n vector = tf.cast(vector, self.nn_dtype)\n alpha = tf.reshape(vector[0], shape=[1, 1])\n beta = vector[1:]\n \n \n h_beta = self.get_h_product(beta)\n\n \n result = tf.concat(\n [\n alpha * self.nu + tf.reduce_sum(tf.multiply(beta, self.vector_g)),\n tf.multiply(alpha, self.vector_g) + h_beta\n ],\n axis=0)\n return tf.cast(result, dtype)", "docstring": "Function that provides matrix product interface with PSD matrix.\n\nArgs:\nvector: the vector to be multiplied with matrix M\n\nReturns:\nresult_product: Matrix product of M and vector", "source": "juraj-google-style"} {"code": "def create_from_settings(settings):\n return Connection(settings['url'], settings['base_url'], settings['user'], settings['password'], authorizations=settings['authorizations'], debug=settings['debug'])", "docstring": "Create a connection with given settings.\n\nArgs:\nsettings (dict): A dictionary of settings\n\nReturns:\n:class:`Connection`. The connection", "source": "codesearchnet"} {"code": "def _ReadSemanticDataTypeDefinition(self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values):\n return self._ReadDataTypeDefinition(definitions_registry, definition_values, data_type_definition_class, definition_name, supported_definition_values)", "docstring": "Reads a semantic data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\ndata_type_definition_class (str): data type definition class.\ndefinition_name (str): name of the definition.\nsupported_definition_values (set[str]): names of the supported definition\nvalues.\n\nReturns:\nSemanticDataTypeDefinition: semantic data type definition.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "codesearchnet"} {"code": "def update_hparams_for_universal_transformer(hparams):\n \n hparams.daisy_chain_variables = False \n\n \n \n hparams.add_hparam(\"mix_with_transformer\", None)\n\n \n hparams.add_hparam(\"num_mixedin_layers\", 2)\n \n hparams.add_hparam(\"num_inrecurrence_layers\", 1)\n\n \n \n hparams.add_hparam(\"recurrence_type\", \"basic\")\n\n \n hparams.add_hparam(\"num_rec_steps\", hparams.num_hidden_layers)\n\n \n hparams.add_hparam(\"add_position_timing_signal\", True)\n if hparams.add_position_timing_signal:\n hparams.pos = None\n \n \n hparams.add_hparam(\"position_start_index\", None)\n\n \n hparams.add_hparam(\"add_step_timing_signal\", True)\n \n hparams.add_hparam(\"step_timing_signal_type\", \"learned\")\n\n \n \n hparams.add_hparam(\"add_or_concat_timing_signal\", \"add\")\n\n \n \n hparams.add_hparam(\"add_sru\", False)\n\n \n \n hparams.add_hparam(\"transformer_ffn_type\", \"fc\")\n\n \n hparams.add_hparam(\"transform_bias_init\", -1.0)\n hparams.add_hparam(\"couple_carry_transform_gates\", True)\n\n \n \n hparams.add_hparam(\"depth_embedding\", True)\n \n hparams.add_hparam(\"dwa_elements\", True)\n\n \n \n \n hparams.add_hparam(\"gate_ffn_layer\", \"dense\")\n\n \n hparams.add_hparam(\"lstm_forget_bias\", 1.0)\n \n hparams.add_hparam(\"use_memory_as_final_state\", False)\n \n hparams.add_hparam(\"add_ffn_unit_to_the_transition_function\", False)\n\n \n hparams.add_hparam(\"act_type\", \"basic\")\n \n hparams.add_hparam(\"act_max_steps\", 2 * hparams.num_hidden_layers)\n hparams.add_hparam(\"act_halting_bias_init\", 1.0)\n hparams.add_hparam(\"act_epsilon\", 0.01)\n hparams.add_hparam(\"act_loss_weight\", 0.01)\n\n return hparams", "docstring": "Adds default hparams for all of the variants of the Universal Transformer.\n\nArgs:\nhparams: default hparams (usually one of the standard hparams from\ntransformer model (like \"transformer_base\")\n\nReturns:\nhparams with default values for Universal Transformers hyper-parameters", "source": "juraj-google-style"} {"code": "def play_from_queue(self, index, start=True):\n if (not self.speaker_info):\n self.get_speaker_info()\n uri = 'x-rincon-queue:{0}\n self.avTransport.SetAVTransportURI([('InstanceID', 0), ('CurrentURI', uri), ('CurrentURIMetaData', '')])\n self.avTransport.Seek([('InstanceID', 0), ('Unit', 'TRACK_NR'), ('Target', (index + 1))])\n if start:\n self.play()", "docstring": "Play a track from the queue by index.\n\nThe index number is required as an argument, where the first index\nis 0.\n\nArgs:\nindex (int): 0-based index of the track to play\nstart (bool): If the item that has been set should start playing", "source": "codesearchnet"} {"code": "def figure_naming(pretitle='', posttitle='', prefile='', postfile=''):\n if pretitle:\n pretitle = ('%s -- ' % pretitle)\n if posttitle:\n posttitle = (' -- %s' % posttitle)\n if prefile:\n prefile = ('%s_' % prefile)\n if postfile:\n postfile = ('_%s' % postfile)\n return (pretitle, posttitle, prefile, postfile)", "docstring": "Helper function to define the strings that handle pre-post conventions\nfor viewing - plotting title and saving options.\n\nArgs:\npretitle(str): String to include before the general title of the figure.\nposttitle(str): String to include after the general title of the figure.\nprefile(str): String to include before the general filename of the figure.\npostfile(str): String to include after the general filename of the figure.\n\nReturns:\nstr: String to include in the figure name and title, in a suitable form.", "source": "codesearchnet"} {"code": "def initialize(\n self, config_file: str = \"bmi_config.txt\", initialize_indicators=True\n ):\n \n self.t = 0.0\n if not os.path.isfile(config_file):\n self.create_bmi_config_file(config_file)\n\n self.s0 = [\n pd.read_csv(\n config_file, index_col=0, header=None, error_bad_lines=False\n )[1]\n for _ in range(self.res)\n ]\n self.s0_original = self.s0[0].copy(deep=True)\n\n self.latent_state_vector = self.construct_default_initial_state()\n\n for n in self.nodes(data=True):\n rv = LatentVar(n[0])\n n[1][\"rv\"] = rv\n n[1][\"update_function\"] = self.default_update_function\n rv.dataset = [1.0 for _ in range(self.res)]\n rv.partial_t = self.s0[0][f\"∂({n[0]})/∂t\"]\n if initialize_indicators:\n for indicator in n[1][\"indicators\"].values():\n indicator.samples = np.random.normal(\n indicator.mean * np.array(n[1][\"rv\"].dataset),\n scale=0.01,\n )", "docstring": "Initialize the executable AnalysisGraph with a config file.\n\nArgs:\nconfig_file\n\nReturns:\nAnalysisGraph", "source": "juraj-google-style"} {"code": "def find_base_model_checkpoint(model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]]=None) -> str:\n if model_files is None:\n model_files = get_model_files(model_type)\n module_files = model_files['model_files']\n for fname in module_files:\n if 'modeling' not in str(fname):\n continue\n with open(fname, 'r', encoding='utf-8') as f:\n content = f.read()\n if _re_checkpoint_for_doc.search(content) is not None:\n checkpoint = _re_checkpoint_for_doc.search(content).groups()[0]\n checkpoint = checkpoint.replace('\"', '')\n checkpoint = checkpoint.replace(\"'\", '')\n return checkpoint\n return ''", "docstring": "Finds the model checkpoint used in the docstrings for a given model.\n\nArgs:\nmodel_type (`str`): A valid model type (like \"bert\" or \"gpt2\")\nmodel_files (`Dict[str, Union[Path, List[Path]]`, *optional*):\nThe files associated to `model_type`. Can be passed to speed up the function, otherwise will be computed.\n\nReturns:\n`str`: The checkpoint used.", "source": "github-repos"} {"code": "def _to_tensor_list_helper(encode_fn, element_spec, element):\n nest.assert_same_structure(element_spec, element)\n\n def reduce_fn(state, value):\n spec, component = value\n if isinstance(spec, internal.TensorSpec):\n try:\n component = ops.convert_to_tensor(component, spec.dtype)\n except (TypeError, ValueError):\n raise ValueError(f'Value {component} is not convertible to a tensor with dtype {spec.dtype} and shape {spec.shape}.')\n if not component.shape.is_compatible_with(spec.shape):\n raise ValueError(f'Value {component} is not convertible to a tensor with dtype {spec.dtype} and shape {spec.shape}.')\n return encode_fn(state, spec, component)\n return functools.reduce(reduce_fn, zip(nest.flatten(element_spec), nest.flatten(element)), [])", "docstring": "Returns a tensor list representation of the element.\n\nArgs:\nencode_fn: Method that constructs a tensor list representation from the\ngiven element spec and element.\nelement_spec: A nested structure of `tf.TypeSpec` objects representing to\nelement type specification.\nelement: The element to convert to tensor list representation.\n\nReturns:\nA tensor list representation of `element`.\n\nRaises:\nValueError: If `element_spec` and `element` do not have the same number of\nelements or if the two structures are not nested in the same way.\nTypeError: If `element_spec` and `element` differ in the type of sequence\nin any of their substructures.", "source": "github-repos"} {"code": "def to_frame(data_list, exc_cols=None, **kwargs):\n from collections import OrderedDict\n return pd.DataFrame(pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs).drop(columns=([] if (exc_cols is None) else exc_cols))", "docstring": "Dict in Python 3.6 keeps insertion order, but cannot be relied upon\nThis method is to keep column names in order\nIn Python 3.7 this method is redundant\n\nArgs:\ndata_list: list of dict\nexc_cols: exclude columns\n\nReturns:\npd.DataFrame\n\nExample:\n>>> d_list = [\n... dict(sid=1, symbol='1 HK', price=89),\n... dict(sid=700, symbol='700 HK', price=350)\n... ]\n>>> to_frame(d_list)\nsid symbol price\n0 1 1 HK 89\n1 700 700 HK 350\n>>> to_frame(d_list, exc_cols=['price'])\nsid symbol\n0 1 1 HK\n1 700 700 HK", "source": "codesearchnet"} {"code": "def emit_obj_snapshot(self, category: str, name: str, timestamp: int, pid: int, tid: int, object_id: int, snapshot: Dict[str, Any]) -> None:\n event = self._create_event('O', category, name, pid, tid, timestamp)\n event['id'] = object_id\n event['args'] = {'snapshot': snapshot}\n self._events.append(event)", "docstring": "Adds an object snapshot event to the trace.\n\nArgs:\ncategory: The event category as a string.\nname: The event name as a string.\ntimestamp: The timestamp of this event as a long integer.\npid: Identifier of the process generating this event as an integer.\ntid: Identifier of the thread generating this event as an integer.\nobject_id: Identifier of the object as an integer.\nsnapshot: A JSON-compatible representation of the object.", "source": "github-repos"} {"code": "def create_additional_charge(self, *, subscription_id, description, plan_value, plan_tax, plan_tax_return_base,\n currency):\n \n payload = {\n \"description\": description,\n \"additionalValues\": [\n {\n \"name\": \"ITEM_VALUE\",\n \"value\": plan_value,\n \"currency\": currency\n },\n {\n \"name\": \"ITEM_TAX\",\n \"value\": plan_tax,\n \"currency\": currency\n },\n {\n \"name\": \"ITEM_TAX_RETURN_BASE\",\n \"value\": plan_tax_return_base,\n \"currency\": currency\n }\n ]\n }\n fmt = 'subscriptions/{}/recurringBillItems'.format(subscription_id)\n return self.client._post(self.url + fmt, json=payload, headers=self.get_headers())", "docstring": "Adds extra charges to the respective invoice for the current period.\n\nArgs:\nsubscription_id: Identification of the subscription\ndescription:\nplan_value:\nplan_tax:\nplan_tax_return_base:\ncurrency:\n\nReturns:", "source": "juraj-google-style"} {"code": "def calc_clusters(returns, n=None, plot=False):\n \n \n corr = returns.corr()\n\n \n diss = 1 - corr\n\n \n \n \n mds = sklearn.manifold.MDS(dissimilarity='precomputed')\n xy = mds.fit_transform(diss)\n\n def routine(k):\n \n km = sklearn.cluster.KMeans(n_clusters=k)\n km_fit = km.fit(xy)\n labels = km_fit.labels_\n centers = km_fit.cluster_centers_\n\n \n mappings = dict(zip(returns.columns, labels))\n\n \n totss = 0\n withinss = 0\n \n avg = np.array([np.mean(xy[:, 0]), np.mean(xy[:, 1])])\n for idx, lbl in enumerate(labels):\n withinss += sum((xy[idx] - centers[lbl]) ** 2)\n totss += sum((xy[idx] - avg) ** 2)\n pvar_expl = 1.0 - withinss / totss\n\n return mappings, pvar_expl, labels\n\n if n:\n result = routine(n)\n else:\n n = len(returns.columns)\n n1 = int(np.ceil(n * 0.6666666666))\n for i in range(2, n1 + 1):\n result = routine(i)\n if result[1] > 0.9:\n break\n\n if plot:\n fig, ax = plt.subplots()\n ax.scatter(xy[:, 0], xy[:, 1], c=result[2], s=90)\n for i, txt in enumerate(returns.columns):\n ax.annotate(txt, (xy[i, 0], xy[i, 1]), size=14)\n\n \n tmp = result[0]\n \n inv_map = {}\n for k, v in iteritems(tmp):\n inv_map[v] = inv_map.get(v, [])\n inv_map[v].append(k)\n\n return inv_map", "docstring": "Calculates the clusters based on k-means\nclustering.\n\nArgs:\n* returns (pd.DataFrame): DataFrame of returns\n* n (int): Specify # of clusters. If None, this\nwill be automatically determined\n* plot (bool): Show plot?\n\nReturns:\n* dict with structure: {cluster# : [col names]}", "source": "juraj-google-style"} {"code": "def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):\n \n \n iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)\n if iso3 is not None:\n return cls.get_country_name_from_iso3(iso3, exception=exception)\n return None", "docstring": "Get country name from ISO2 code\n\nArgs:\niso2 (str): ISO2 code for which to get country name\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: Country name", "source": "juraj-google-style"} {"code": "def create_report(self, uri, timeout=(- 1)):\n logger.debug('Creating Report (uri = %s)'.format(uri))\n (task, _) = self._connection.post(uri, {})\n if (not task):\n raise exceptions.HPOneViewException(RESOURCE_CLIENT_TASK_EXPECTED)\n task = self._task_monitor.get_completed_task(task, timeout)\n return task['taskOutput']", "docstring": "Creates a report and returns the output.\n\nArgs:\nuri: URI\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\nlist:", "source": "codesearchnet"} {"code": "def go_in(self, vertex):\n \n if self.vertex_in:\n self.vertex_in.edges_in.remove(self)\n self.vertex_in = vertex\n vertex.edges_in.add(self)", "docstring": "Tell the edge to go into this vertex.\n\nArgs:\nvertex (Vertex): vertex to go into.", "source": "juraj-google-style"} {"code": "def recipe_anonymize_query(config, auth_read, from_project, from_dataset, from_query, to_project, to_dataset, to_table):\n anonymize(config, {'auth': auth_read, 'bigquery': {'from': {'project': from_project, 'dataset': from_dataset, 'query': from_query}, 'to': {'project': to_project, 'dataset': to_dataset, 'table': to_table}}})", "docstring": "Runs a query and anynonamizes all rows. Used to create sample table for\ndashboards.\n\nArgs:\nauth_read (authentication) - Credentials used.\nfrom_project (string) - Original project to read from.\nfrom_dataset (string) - Original dataset to read from.\nfrom_query (string) - Query to read data.\nto_project (string) - Anonymous data will be writen to.\nto_dataset (string) - Anonymous data will be writen to.\nto_table (string) - Anonymous data will be writen to.", "source": "github-repos"} {"code": "def get_all_keys(tweet, parent_key=''):\n items = []\n for (k, v) in tweet.items():\n new_key = ((parent_key + ' ') + k)\n if isinstance(v, dict):\n items.extend(get_all_keys(v, parent_key=new_key))\n else:\n items.append(new_key.strip(' '))\n return items", "docstring": "Takes a tweet object and recursively returns a list of all keys contained\nin this level and all nexstted levels of the tweet.\n\nArgs:\ntweet (Tweet): the tweet dict\nparent_key (str): key from which this process will start, e.g., you can\nget keys only under some key that is not the top-level key.\n\nReturns:\nlist of all keys in nested dicts.\n\nExample:\n>>> import tweet_parser.tweet_checking as tc\n>>> tweet = {\"created_at\": 124125125125, \"text\": \"just setting up my twttr\",\n... \"nested_field\": {\"nested_1\": \"field\", \"nested_2\": \"field2\"}}\n>>> tc.get_all_keys(tweet)\n['created_at', 'text', 'nested_field nested_1', 'nested_field nested_2']", "source": "codesearchnet"} {"code": "def _CreateAnalysisPlugins(self, options):\n \n if not self._analysis_plugins:\n return {}\n\n analysis_plugins = (\n analysis_manager.AnalysisPluginManager.GetPluginObjects(\n self._analysis_plugins))\n\n for analysis_plugin in analysis_plugins.values():\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, analysis_plugin)\n\n return analysis_plugins", "docstring": "Creates the analysis plugins.\n\nArgs:\noptions (argparse.Namespace): command line arguments.\n\nReturns:\ndict[str, AnalysisPlugin]: analysis plugins and their names.", "source": "juraj-google-style"} {"code": "def all(script, face=True, vert=True):\n \n filter_xml = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(script, filter_xml)\n return None", "docstring": "Select all the faces of the current mesh\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nfaces (bool): If True the filter will select all the faces.\nverts (bool): If True the filter will select all the vertices.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "juraj-google-style"} {"code": "def _ParseRecurseKeys(self, parser_mediator, root_key):\n \n for registry_key in root_key.RecurseKeys():\n if parser_mediator.abort:\n break\n\n self._ParseKey(parser_mediator, registry_key)", "docstring": "Parses the Registry keys recursively.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nroot_key (dfwinreg.WinRegistryKey): root Windows Registry key.", "source": "juraj-google-style"} {"code": "def vol_tetra(vt1, vt2, vt3, vt4):\n \n vol_tetra = np.abs(np.dot((vt1 - vt4),\n np.cross((vt2 - vt4), (vt3 - vt4)))) / 6\n return vol_tetra", "docstring": "Calculate the volume of a tetrahedron, given the four vertices of vt1,\nvt2, vt3 and vt4.\nArgs:\nvt1 (array-like): coordinates of vertex 1.\nvt2 (array-like): coordinates of vertex 2.\nvt3 (array-like): coordinates of vertex 3.\nvt4 (array-like): coordinates of vertex 4.\nReturns:\n(float): volume of the tetrahedron.", "source": "juraj-google-style"} {"code": "def _check(cls, name, val, can_be_zero=False, val_type=float):\n \n valid_types = [val_type]\n if val_type is float:\n valid_types.append(int)\n\n if type(val) not in valid_types:\n raise TypeError(\n 'Expect type %s for parameter %s' % (val_type.__name__, name))\n if val < 0:\n raise ValueError(\n 'Value for parameter %s has to be greater than 0' % name)\n if not can_be_zero and val == 0:\n raise ValueError(\n 'Value for parameter %s can not be 0' % name)\n return val", "docstring": "Check init arguments.\n\nArgs:\nname: name of the argument. For logging purpose.\nval: value. Value has to be non negative number.\ncan_be_zero: whether value can be zero.\nval_type: Python type of the value.\n\nReturns:\nThe value.\n\nRaises:\nValueError: when invalid value is passed in.\nTypeError: when invalid value type is passed in.", "source": "juraj-google-style"} {"code": "def while_stmt(test, body, get_state, set_state, symbol_names, opts):\n with func_graph.FuncGraph('tmp').as_default():\n init_test = test()\n if tensors.is_dense_tensor(init_test):\n _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts)\n return\n if not init_test:\n return\n body()\n _py_while_stmt(test, body, get_state, set_state, opts)", "docstring": "Functional form of a while statement.\n\nThe loop operates on a so-called state, which includes all symbols that are\nvariant across loop iterations. In what follows we refer to state as either\na tuple of entities that represent an actual state, or a list of arguments\nof the corresponding types.\n\nThe inputs and outputs of the callables representing the loop blocks are not\nexplicit - instead, these functions must use nonlocal/global for side effects.\nThe inputs and outputs are instead controlled by the set_state/get_state\nfunctions.\n\nArgs:\ntest: Callable with boolean return type. The loop condition.\nbody: Callable representing the actual loop body.\nget_state: Additional callable which can capture additional state (such as\nthe values of composite symbols). This is only useful when staging the\nloop.\nset_state: Additional callable which save values captured by get_state back\ninto the Python environment. This is only useful when staging the loop.\nsymbol_names: Tuple containing the names of all loop variables.\nopts: Optional dict of extra loop parameters.\n\nReturns:\nTuple containing the final state.", "source": "github-repos"} {"code": "def thumbnail(self, image: 'torch.Tensor', size: SizeDict) -> 'torch.Tensor':\n input_height, input_width = image.shape[-2:]\n output_height, output_width = (size.height, size.width)\n height = min(input_height, output_height)\n width = min(input_width, output_width)\n if height == input_height and width == input_width:\n return image\n if input_height > input_width:\n width = int(input_width * height / input_height)\n elif input_width > input_height:\n height = int(input_height * width / input_width)\n return self.resize(image, size=SizeDict(width=width, height=height), interpolation=F.InterpolationMode.BICUBIC)", "docstring": "Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any\ncorresponding dimension of the specified size.\n\nArgs:\nimage (`torch.Tensor`):\nThe image to be resized.\nsize (`Dict[str, int]`):\nThe size `{\"height\": h, \"width\": w}` to resize the image to.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nThe resampling filter to use.\ndata_format (`Optional[Union[str, ChannelDimension]]`, *optional*):\nThe data format of the output image. If unset, the same format as the input image is used.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"} {"code": "def __init__(self, save_steps=None, save_secs=None, output_dir='', show_dataflow=True, show_memory=False):\n self._output_file = os.path.join(output_dir, 'timeline-{}.json')\n self._file_writer = SummaryWriterCache.get(output_dir)\n self._show_dataflow = show_dataflow\n self._show_memory = show_memory\n self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)", "docstring": "Initializes a hook that takes periodic profiling snapshots.\n\n`options.run_metadata` argument of `tf.Session.Run` is used to collect\nmetadata about execution. This hook sets the metadata and dumps it in Chrome\nTrace format.\n\n\nArgs:\nsave_steps: `int`, save profile traces every N steps. Exactly one of\n`save_secs` and `save_steps` should be set.\nsave_secs: `int` or `float`, save profile traces every N seconds.\noutput_dir: `string`, the directory to save the profile traces to.\nDefaults to the current directory.\nshow_dataflow: `bool`, if True, add flow events to the trace connecting\nproducers and consumers of tensors.\nshow_memory: `bool`, if True, add object snapshot events to the trace\nshowing the sizes and lifetimes of tensors.", "source": "github-repos"} {"code": "def acquire(self):\n \n if os.path.exists(self.path):\n try:\n pid = None\n\n with open(self.path, 'r') as f:\n line = f.readline().strip()\n pid = int(line)\n\n \n \n if not psutil.pid_exists(pid):\n os.remove(self.path)\n\n except ValueError as e:\n \n os.remove(self.path)\n\n except IOError as e:\n \n \n pass\n\n try:\n self.fd = os.open(self.path, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n\n \n \n to_write = '%s%s' % (os.getpid(), os.linesep)\n os.write(self.fd, to_write.encode())\n\n except OSError as e:\n if not os.path.exists(self.path):\n raise\n return False\n\n self.acquired = True\n return True", "docstring": "Attempts to acquire a lock for the J-Link lockfile.\n\nIf the lockfile exists but does not correspond to an active process,\nthe lockfile is first removed, before an attempt is made to acquire it.\n\nArgs:\nself (Jlock): the ``JLock`` instance\n\nReturns:\n``True`` if the lock was acquired, otherwise ``False``.\n\nRaises:\nOSError: on file errors.", "source": "juraj-google-style"} {"code": "def get_service_health(service_id: str) -> str:\n \n \n if DC.get_replicas(service_id) != DC.get_actual_replica(service_id):\n health_status = \"Unhealthy\"\n else:\n health_status = \"Healthy\"\n\n return health_status", "docstring": "Get the health of a service using service_id.\n\nArgs:\nservice_id\n\nReturns:\nstr, health status", "source": "juraj-google-style"} {"code": "def bbox_clip(bboxes, img_shape):\n \n assert bboxes.shape[-1] % 4 == 0\n clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)\n clipped_bboxes[..., 0::2] = np.maximum(\n np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0)\n clipped_bboxes[..., 1::2] = np.maximum(\n np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0)\n return clipped_bboxes", "docstring": "Clip bboxes to fit the image shape.\n\nArgs:\nbboxes (ndarray): Shape (..., 4*k)\nimg_shape (tuple): (height, width) of the image.\n\nReturns:\nndarray: Clipped bboxes.", "source": "juraj-google-style"} {"code": "def intersect(self, other):\n \n self.automaton = fst.intersect(self.automaton, other.automaton)\n return self", "docstring": "Constructs an unminimized DFA recognizing\nthe intersection of the languages of two given DFAs.\nArgs:\nother (DFA): The other DFA that will be used\nfor the intersect operation\nReturns:\nReturns:\nDFA: The resulting DFA", "source": "juraj-google-style"} {"code": "def make_innermost_getter(getter):\n\n @functools.wraps(getter)\n def _new_getter(kernel_results, *args, **kwargs):\n 'Wrapped getter.'\n results_stack = []\n while hasattr(kernel_results, 'inner_results'):\n results_stack.append(kernel_results)\n kernel_results = kernel_results.inner_results\n return getter(kernel_results, *args, **kwargs)\n return _new_getter", "docstring": "Wraps a getter so it applies to the inner-most results in `kernel_results`.\n\nThe wrapped getter unwraps `kernel_results` and returns the return value of\n`getter` called with the first results without an `inner_results` attribute.\n\nArgs:\ngetter: A callable that takes Kernel results and returns some value.\n\nReturns:\nnew_getter: A wrapped `getter`.", "source": "codesearchnet"} {"code": "def _op_in_graph_mode(tensor):\n if context.executing_eagerly():\n return tensor\n return tensor.op", "docstring": "Returns the tensor's op in graph mode, or the tensor in eager mode.\n\nThis is useful because sometimes an op is needed in graph mode instead of a\ntensor. In eager mode, there are no ops.\n\nArgs:\ntensor: A tensor.\n\nReturns:\nThe tensor's op in graph mode. The tensor in eager mode.", "source": "github-repos"} {"code": "def new_from_json(cls, json_data):\n json_data_as_unicode = _helpers._from_bytes(json_data)\n data = json.loads(json_data_as_unicode)\n module_name = data['_module']\n try:\n module_obj = __import__(module_name)\n except ImportError:\n module_name = module_name.replace('.googleapiclient', '')\n module_obj = __import__(module_name)\n module_obj = __import__(module_name, fromlist=module_name.split('.')[:(- 1)])\n kls = getattr(module_obj, data['_class'])\n return kls.from_json(json_data_as_unicode)", "docstring": "Utility class method to instantiate a Credentials subclass from JSON.\n\nExpects the JSON string to have been produced by to_json().\n\nArgs:\njson_data: string or bytes, JSON from to_json().\n\nReturns:\nAn instance of the subclass of Credentials that was serialized with\nto_json().", "source": "codesearchnet"} {"code": "def _find_channel_index(data_format):\n \n for i, c in enumerate(data_format):\n if c == \"C\":\n return i\n raise ValueError(\"data_format requires a channel dimension. Got: {}\"\n .format(data_format))", "docstring": "Returns the index of the channel dimension.\n\nArgs:\ndata_format: A string of characters corresponding to Tensor dimensionality.\n\nReturns:\nchannel_index: An integer indicating the channel dimension.\n\nRaises:\nValueError: If no channel dimension was found.", "source": "juraj-google-style"} {"code": "def is_distributed(partition_column, lower_bound, upper_bound):\n if ((partition_column is not None) and (lower_bound is not None) and (upper_bound is not None)):\n if (upper_bound > lower_bound):\n return True\n else:\n raise InvalidArguments('upper_bound must be greater than lower_bound.')\n elif ((partition_column is None) and (lower_bound is None) and (upper_bound is None)):\n return False\n else:\n raise InvalidArguments('Invalid combination of partition_column, lower_bound, upper_bound.All these arguments should be passed (distributed) or none of them (standard pandas).')", "docstring": "Check if is possible distribute a query given that args\n\nArgs:\npartition_column: column used to share the data between the workers\nlower_bound: the minimum value to be requested from the partition_column\nupper_bound: the maximum value to be requested from the partition_column\n\nReturns:\nTrue for distributed or False if not", "source": "codesearchnet"} {"code": "def convert_constant(params, w_name, scope_name, inputs, layers, weights, names):\n print('Converting constant ...')\n params_list = params['value'].numpy()\n\n def target_layer(x, value=params_list):\n return tf.constant(value.tolist(), shape=value.shape)\n lambda_layer = keras.layers.Lambda(target_layer)\n layers[(scope_name + '_np')] = params_list\n layers[scope_name] = lambda_layer(layers[list(layers.keys())[0]])", "docstring": "Convert constant layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"} {"code": "def file_move(filename, settings):\n \n if len(settings) != 1:\n raise ValueError(\"Settings must only contain one item with key \"\n \"'dest'.\")\n for k, v in settings.items():\n if k == \"dest\":\n shutil.move(filename, v)", "docstring": "Moves a file. {'_file_move': {'dest': 'new_file_name'}}\n\nArgs:\nfilename (str): Filename.\nsettings (dict): Must be {\"dest\": path of new file}", "source": "juraj-google-style"} {"code": "def metropolis_hastings_step(current_state: State, proposed_state: State, energy_change: FloatTensor, seed=None) -> Tuple[(State, tf.Tensor, tf.Tensor)]:\n flat_current = tf.nest.flatten(current_state)\n flat_proposed = nest.flatten_up_to(current_state, proposed_state)\n flat_current = [(p if (c is None) else c) for (p, c) in zip(flat_proposed, flat_current)]\n current_state = tf.nest.pack_sequence_as(current_state, flat_current)\n current_state = tf.nest.map_structure(tf.convert_to_tensor, current_state)\n proposed_state = tf.nest.map_structure(tf.convert_to_tensor, proposed_state)\n energy_change = tf.convert_to_tensor(value=energy_change)\n log_accept_ratio = (- energy_change)\n log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(input=log_accept_ratio), dtype=log_accept_ratio.dtype.base_dtype, seed=seed))\n is_accepted = (log_uniform < log_accept_ratio)\n next_state = mcmc_util.choose(is_accepted, proposed_state, current_state, name='choose_next_state')\n return (next_state, is_accepted, log_uniform)", "docstring": "Metropolis-Hastings step.\n\nThis probabilistically chooses between `current_state` and `proposed_state`\nbased on the `energy_change` so as to preserve detailed balance.\n\nEnergy change is the negative of `log_accept_ratio`.\n\nArgs:\ncurrent_state: Current state.\nproposed_state: Proposed state.\nenergy_change: E(proposed_state) - E(previous_state).\nseed: For reproducibility.\n\nReturns:\nnew_state: The chosen state.\nis_accepted: Whether the proposed state was accepted.\nlog_uniform: The random number that was used to select between the two\nstates.", "source": "codesearchnet"} {"code": "def expand_unique_results(y, idx):\n expanded = tf.gather(y, idx, axis=0)\n return expanded", "docstring": "Inverse of unique_bitstrings_with_counts.\n\nArgs:\ny: Values to pick according to `idx`.\nidx: The index at which to place each value of `y` in the output.\n\nReturns:\nexpanded: `tf.Tensor` such that `expanded[i] == y[idx[i]]`.", "source": "github-repos"} {"code": "def _weight_generator(self, reviewers):\n scores = [r.anomalous_score for r in reviewers]\n mu = np.average(scores)\n sigma = np.std(scores)\n if sigma:\n\n def w(v):\n 'Compute a weight for the given reviewer.\\n\\n Args:\\n v: anomalous score of a reviewer.\\n Returns:\\n weight of the given anomalous score.\\n '\n try:\n exp = math.exp(((self.alpha * (v - mu)) / sigma))\n return (1.0 / (1.0 + exp))\n except OverflowError:\n return 0.0\n return w\n else:\n return (lambda v: 1.0)", "docstring": "Compute a weight function for the given reviewers.\n\nArgs:\nreviewers: a set of reviewers to compute weight function.\n\nReturns:\na function computing a weight for a reviewer.", "source": "codesearchnet"} {"code": "def _run_graph_for_calibration_eager_mode(model_dir: str, tags: Collection[str], representative_dataset_map: rd.RepresentativeDatasetMapping) -> None:\n root: autotrackable.AutoTrackable = load.load(model_dir, tags)\n for signature_key, repr_ds in representative_dataset_map.items():\n try:\n _run_function_for_calibration_eager_mode(func=root.signatures[signature_key], representative_dataset=repr_ds)\n except Exception as ex:\n raise ValueError(f'Failed to run representative dataset through the function with the signature key: {signature_key}.') from ex", "docstring": "Runs the graph for calibration in eager mode.\n\nThis function assumes _eager mode_ (enabled in TF2 by default) when running\nthe graph. This step is used in order to collect the statistics in\nCustomAggregatorOp for quantization using the representative dataset for the\nactual data provided for inference.\n\nArgs:\nmodel_dir: Path to SavedModel directory.\ntags: Collection of tags identifying the MetaGraphDef within the SavedModel.\nrepresentative_dataset_map: A map where signature keys are mapped to\ncorresponding representative datasets.\n\nRaises:\nValueError: When running the function with the representative dataset fails.", "source": "github-repos"} {"code": "def delete(self, personId):\n check_type(personId, basestring, may_be_none=False)\n self._session.delete(((API_ENDPOINT + '/') + personId))", "docstring": "Remove a person from the system.\n\nOnly an admin can remove a person.\n\nArgs:\npersonId(basestring): The ID of the person to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"} {"code": "def get_module_names(p):\n \n mods = list()\n mods = [f.split('.')[0] for f in listdir(p)\n if isfile(join(p, f)) and not f.endswith('.pyc') and not f.startswith('__')]\n print len(mods)\n return mods", "docstring": "Accepts a path to search for modules. The method will filter on files\nthat end in .pyc or files that start with __.\n\nArguments:\np (string): The path to search\nReturns:\nlist of file names", "source": "juraj-google-style"} {"code": "def build(bucket_name, version, force, verbose):\n if verbose:\n log.setLevel('DEBUG')\n if (not version):\n version = setuptools_scm.get_version()\n release = ('dev' if ('dev' in version) else 'release')\n tarball = TARBALL_FORMAT.format(version)\n tarball_path = os.path.join(tempfile.gettempdir(), tarball)\n s3_key = os.path.join(release, tarball)\n try:\n run('npm i')\n run('./node_modules/.bin/gulp build.prod')\n except ExecutionError:\n log.exception('Failed executing command')\n return\n log.debug('Creating archive')\n tar = tarfile.open(tarball_path, 'w:gz')\n for (root, dirnames, filenames) in os.walk('dist'):\n for f in filenames:\n tar.add(os.path.join(root, f), recursive=False, filter=strip_path)\n tar.close()\n log.debug('Uploading {} to s3:\n try:\n bucket = get_bucket_resource(bucket_name)\n if (s3_file_exists(bucket, s3_key) and (not force)):\n log.error('File already exists in S3, use --force to overwrite')\n return\n bucket.upload_file(tarball_path, os.path.join(release, tarball))\n except ClientError:\n log.exception('AWS API failure')", "docstring": "Build and upload a new tarball\n\nArgs:\nbucket_name (str): Name of the bucket to upload to\nversion (str): Override build version. Defaults to using SCM based versioning (git tags)\nforce (bool): Overwrite existing files in S3, if present\nverbose (bool): Verbose output", "source": "codesearchnet"} {"code": "def plot_conductivity_mu(self, temp=600, output='eig',\n relaxation_time=1e-14, xlim=None):\n \n import matplotlib.pyplot as plt\n cond = self._bz.get_conductivity(relaxation_time=relaxation_time,\n output=output, doping_levels=False)[\n temp]\n plt.figure(figsize=(9, 7))\n plt.semilogy(self._bz.mu_steps, cond, linewidth=3.0)\n self._plot_bg_limits()\n self._plot_doping(temp)\n if output == 'eig':\n plt.legend(['$\\\\Sigma_1$', '$\\\\Sigma_2$', '$\\\\Sigma_3$'])\n if xlim is None:\n plt.xlim(-0.5, self._bz.gap + 0.5)\n else:\n plt.xlim(xlim)\n plt.ylim([1e13 * relaxation_time, 1e20 * relaxation_time])\n plt.ylabel(\"conductivity,\\n $\\\\Sigma$ (1/($\\\\Omega$ m))\", fontsize=30.0)\n plt.xlabel(\"E-E$_f$ (eV)\", fontsize=30.0)\n plt.xticks(fontsize=25)\n plt.yticks(fontsize=25)\n plt.tight_layout()\n return plt", "docstring": "Plot the conductivity in function of Fermi level. Semi-log plot\n\nArgs:\ntemp: the temperature\nxlim: a list of min and max fermi energy by default (0, and band\ngap)\ntau: A relaxation time in s. By default none and the plot is by\nunits of relaxation time\n\nReturns:\na matplotlib object", "source": "juraj-google-style"} {"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n vision_data = {}\n if image_sizes is not None:\n num_image_tokens = [self.image_seq_length] * len(image_sizes)\n num_image_patches = [1] * len(image_sizes)\n vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (List[List[str]], *optional*):\nThe input sizes formatted as (height, width) per each image.\nReturns:\nDict[str, List[int]]: A dictionary mapping each modality (\"image\", \"video\", \"audio\")\nto a list containing the number of placeholder tokens required. If the model doesn't accept\na certain modality or no input sizes are provided, the dict value is set to an empty list.", "source": "github-repos"} {"code": "def parse_kegg_gene_metadata(infile):\n metadata = defaultdict(str)\n with open(infile) as mf:\n kegg_parsed = bs_kegg.parse(mf.read())\n if ('DBLINKS' in kegg_parsed.keys()):\n if ('UniProt' in kegg_parsed['DBLINKS']):\n unis = str(kegg_parsed['DBLINKS']['UniProt']).split(' ')\n if isinstance(unis, list):\n metadata['uniprot'] = unis[0]\n else:\n metadata['uniprot'] = unis\n if ('NCBI-ProteinID' in kegg_parsed['DBLINKS']):\n metadata['refseq'] = str(kegg_parsed['DBLINKS']['NCBI-ProteinID'])\n if ('STRUCTURE' in kegg_parsed.keys()):\n metadata['pdbs'] = str(kegg_parsed['STRUCTURE']['PDB']).split(' ')\n else:\n metadata['pdbs'] = None\n if ('ORGANISM' in kegg_parsed.keys()):\n metadata['taxonomy'] = str(kegg_parsed['ORGANISM'])\n return metadata", "docstring": "Parse the KEGG flatfile and return a dictionary of metadata.\n\nDictionary keys are:\nrefseq\nuniprot\npdbs\ntaxonomy\n\nArgs:\ninfile: Path to KEGG flatfile\n\nReturns:\ndict: Dictionary of metadata", "source": "codesearchnet"} {"code": "def cubic_jacobian_polynomial(nodes):\n jac_parts = _helpers.matrix_product(nodes, _CUBIC_JACOBIAN_HELPER)\n jac_at_nodes = np.empty((1, 15), order='F')\n jac_at_nodes[(0, 0)] = two_by_two_det(jac_parts[(:, :2)])\n jac_at_nodes[(0, 1)] = two_by_two_det(jac_parts[(:, 2:4)])\n jac_at_nodes[(0, 2)] = two_by_two_det(jac_parts[(:, 4:6)])\n jac_at_nodes[(0, 3)] = two_by_two_det(jac_parts[(:, 6:8)])\n jac_at_nodes[(0, 4)] = two_by_two_det(jac_parts[(:, 8:10)])\n jac_at_nodes[(0, 5)] = two_by_two_det(jac_parts[(:, 10:12)])\n jac_at_nodes[(0, 6)] = two_by_two_det(jac_parts[(:, 12:14)])\n jac_at_nodes[(0, 7)] = two_by_two_det(jac_parts[(:, 14:16)])\n jac_at_nodes[(0, 8)] = two_by_two_det(jac_parts[(:, 16:18)])\n jac_at_nodes[(0, 9)] = two_by_two_det(jac_parts[(:, 18:20)])\n jac_at_nodes[(0, 10)] = two_by_two_det(jac_parts[(:, 20:22)])\n jac_at_nodes[(0, 11)] = two_by_two_det(jac_parts[(:, 22:24)])\n jac_at_nodes[(0, 12)] = two_by_two_det(jac_parts[(:, 24:26)])\n jac_at_nodes[(0, 13)] = two_by_two_det(jac_parts[(:, 26:28)])\n jac_at_nodes[(0, 14)] = two_by_two_det(jac_parts[(:, 28:)])\n bernstein = _helpers.matrix_product(jac_at_nodes, _QUARTIC_TO_BERNSTEIN)\n bernstein /= _QUARTIC_BERNSTEIN_FACTOR\n return bernstein", "docstring": "r\"\"\"Compute the Jacobian determinant of a cubic surface.\n\n.. note::\n\nThis is used **only** by :meth:`Surface._compute_valid` (which is\nin turn used to compute / cache the :attr:`Surface.is_valid`\nproperty).\n\nConverts :math:`\\det(J(s, t))` to a polynomial on the reference\ntriangle and represents it as a surface object.\n\n.. note::\n\nThis assumes that ``nodes`` is ``2 x 10`` but doesn't verify this.\n(However, the right multiplication by ``_CUBIC_JACOBIAN_HELPER``\nwould fail if ``nodes`` wasn't ``R x 10`` and then the ensuing\ndeterminants would fail if there weren't 2 rows.)\n\nArgs:\nnodes (numpy.ndarray): A 2 x 10 array of nodes in a surface.\n\nReturns:\nnumpy.ndarray: 1 x 15 array, coefficients in Bernstein basis.", "source": "codesearchnet"} {"code": "def iaf_flow(one_hot_assignments, scale_weights, scale_bias, num_codes, summary=True, name=None):\n with tf.name_scope(name, default_name='iaf'):\n padded_assignments = tf.pad(one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[(:, :, :(- 1), :)]\n scale_bijector = tfp.distributions.bijectors.Affine(scale_tril=tfp.distributions.fill_triangular(scale_weights))\n scale = scale_bijector.forward(tf.transpose(padded_assignments, [0, 1, 3, 2]))\n scale = tf.transpose(scale, [0, 1, 3, 2])\n scale = tf.nn.softplus(scale)\n scale = (scale + tf.nn.softplus(scale_bias[(tf.newaxis, tf.newaxis, ...)]))\n scale = scale[(..., :(- 1))]\n z = one_hot_assignments[(..., :(- 1))]\n unnormalized_probs = tf.concat([(z * scale), one_hot_assignments[(..., (- 1), tf.newaxis)]], axis=(- 1))\n normalizer = tf.reduce_sum(unnormalized_probs, axis=(- 1))\n flow_output = (unnormalized_probs / normalizer[(..., tf.newaxis)])\n inverse_log_det_jacobian = ((- tf.reduce_sum(tf.log(scale), axis=(- 1))) + (num_codes * tf.log(normalizer)))\n if summary:\n tf.summary.histogram('iaf/scale', tf.reshape(scale, [(- 1)]))\n tf.summary.histogram('iaf/inverse_log_det_jacobian', tf.reshape(inverse_log_det_jacobian, [(- 1)]))\n return (flow_output, inverse_log_det_jacobian)", "docstring": "Performs a single IAF flow using scale and normalization transformations.\n\nArgs:\none_hot_assignments: Assignments Tensor with shape [num_samples, batch_size,\nlatent_size, num_codes].\nscale_weights: Tensor corresponding to lower triangular matrix used to\nautoregressively generate scale matrix from assignments. To ensure the\nlower-triangular matrix has length of latent_size, scale_weights should\nbe a rank-one tensor with size latent_size * (latent_size + 1) / 2.\nscale_bias: Bias tensor to be added to scale tensor, with shape\n[latent_size, num_codes]. If scale weights are zero, initialize scale_bias\nto be log(exp(1.) / 2. - 1) so initial transformation is identity.\nnum_codes: Number of codes in codebook.\nsummary: Whether to save summaries.\nname: String used for name scope.\n\nReturns:\nflow_output: Transformed one-hot assignments.\ninverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding\nto transformation.", "source": "codesearchnet"} {"code": "def build_global(self, global_node):\n\n \n config_block_lines = self.__build_config_block(\n global_node.config_block)\n return config.Global(config_block=config_block_lines)", "docstring": "parse `global` section, and return the config.Global\n\nArgs:\nglobal_node (TreeNode): `global` section treenode\n\nReturns:\nconfig.Global: an object", "source": "juraj-google-style"} {"code": "def register_many(self, *args):\n \n params = []\n for name in args:\n params.append(self.register(name))\n\n return params", "docstring": "Register many configuration names.\n\nArguments:\n*args: Config names as strings.\n\nReturns:\nlist: List of registered configs.", "source": "juraj-google-style"} {"code": "def _send(self, email_message):\n \n pre_send.send(self.__class__, message=email_message)\n\n if not email_message.recipients():\n return False\n\n from_email = sanitize_address(email_message.from_email,\n email_message.encoding)\n recipients = [sanitize_address(addr, email_message.encoding)\n for addr in email_message.recipients()]\n message = email_message.message().as_bytes(linesep='\\r\\n')\n\n try:\n result = self.conn.send_raw_email(\n Source=from_email,\n Destinations=recipients,\n RawMessage={\n 'Data': message\n }\n )\n message_id = result['MessageId']\n post_send.send(\n self.__class__,\n message=email_message,\n message_id=message_id\n )\n except ClientError:\n if not self.fail_silently:\n raise\n return False\n return True", "docstring": "Sends an individual message via the Amazon SES HTTP API.\n\nArgs:\nemail_message: A single Django EmailMessage object.\nReturns:\nTrue if the EmailMessage was sent successfully, otherwise False.\nRaises:\nClientError: An interaction with the Amazon SES HTTP API\nfailed.", "source": "juraj-google-style"} {"code": "def map(self, key_pattern, func, all_args, timeout=None):\n results = []\n keys = [make_key(key_pattern, func, args, {}) for args in all_args]\n cached = dict(zip(keys, self.get_many(keys)))\n cache_to_add = {}\n for (key, args) in zip(keys, all_args):\n val = cached[key]\n if (val is None):\n val = func(*args)\n cache_to_add[key] = (val if (val is not None) else NONE_RESULT)\n if (val == NONE_RESULT):\n val = None\n results.append(val)\n if cache_to_add:\n self.set_many(cache_to_add, timeout)\n return results", "docstring": "Cache return value of multiple calls.\n\nArgs:\nkey_pattern (str): the key pattern to use for generating\nkeys for caches of the decorated function.\nfunc (function): the function to call.\nall_args (list): a list of args to be used to make calls to\nthe function.\ntimeout (int): the cache timeout\n\nReturns:\nA list of the return values of the calls.\n\nExample::\n\ndef add(a, b):\nreturn a + b\n\ncache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7]", "source": "codesearchnet"} {"code": "def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor):\n mean = loc.transpose(-1, -2)\n mean = mean.unsqueeze(-2)\n mean = mean.repeat(1, 1, self.num_patches, 1)\n stdev = scale.transpose(-1, -2)\n stdev = stdev.unsqueeze(-2)\n stdev = stdev.repeat(1, 1, self.num_patches, 1)\n concat_stats = torch.cat([mean, stdev], dim=-1)\n concat_stats = self.map_scale_expansion(concat_stats)\n concat_stats = self.map_scale_compression(concat_stats)\n inputs = torch.cat([inputs, concat_stats], dim=-1)\n inputs = self.inverse_trans_expansion(inputs)\n inputs = self.inverse_trans_compression(inputs)\n return inputs", "docstring": "Args:\ninputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`)\nloc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`)\nscale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`)\nReturns:\n`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`", "source": "github-repos"} {"code": "def _validate_ids(self, resource_ids):\n for resource_id in resource_ids:\n if (self._id_regex.fullmatch(resource_id) is None):\n LOGGER.debug('Invalid resource id requested: %s', resource_id)\n raise _ResponseFailed(self._status.INVALID_ID)", "docstring": "Validates a list of ids, raising a ResponseFailed error if invalid.\n\nArgs:\nresource_id (list of str): The ids to validate\n\nRaises:\nResponseFailed: The id was invalid, and a status of INVALID_ID\nwill be sent with the response.", "source": "codesearchnet"} {"code": "def _find_children_hints(call, graph_def):\n name_to_input_name, _, _ = _extract_graph_summary(graph_def)\n input_names, output_names = call.flattened_inputs_and_outputs()\n reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)\n reachable_by_output = _bfs_for_reachable_nodes(output_names, name_to_input_name)\n output_nodes_set = set(output_names)\n children_hints = []\n out = _graph_pb2.GraphDef()\n out.library.CopyFrom(graph_def.library)\n out.versions.CopyFrom(graph_def.versions)\n function_def_nodes = set()\n for node in graph_def.node:\n out.node.extend([_copy.deepcopy(node)])\n n = _tensor_name_base(node.name)\n if n in reachable_by_output:\n if n not in reachable_by_input and n not in output_nodes_set:\n if node.op == 'While' or node.op == 'StatelessWhile':\n body_name = node.attr['body'].func.name\n inputs_outside_loop = node.input\n for function_def in graph_def.library.function:\n if function_def.signature.name == body_name:\n function_inputs = function_def.signature.input_arg\n assert len(inputs_outside_loop) == len(function_inputs)\n nodes_mapping = {}\n for i, function_input in enumerate(function_inputs):\n nodes_mapping[function_input.name] = inputs_outside_loop[i]\n children_hints_in_loop, new_nodes = _find_children_hints_in_while_loop(function_def, nodes_mapping)\n function_def_nodes.update([x.name for x in new_nodes])\n children_hints.extend(children_hints_in_loop)\n out.node.extend(new_nodes)\n return (children_hints, out, function_def_nodes)", "docstring": "Find all children hints.\n\nFor a given OpHint, we find all children hints inside it, we also copy all the\nnodes inside function defs (if applicable) to the original graph_def, they are\nreturned in a list as well.\n\nArgs:\ncall: Parent OpHint that contains children ophints.\ngraph_def: Original graph def.\n\nReturns:\nOrdered children hints inside the parent ophint; new graph def that contains\nnodes inside function defs (if applicable); nodes inside function defs.", "source": "github-repos"} {"code": "def Lock(fd, path, blocking):\n \n operation = fcntl.LOCK_EX if blocking else fcntl.LOCK_EX | fcntl.LOCK_NB\n try:\n fcntl.flock(fd, operation)\n except IOError as e:\n if e.errno == errno.EWOULDBLOCK:\n raise IOError('Exception locking %s. File already locked.' % path)\n else:\n raise IOError('Exception locking %s. %s.' % (path, str(e)))", "docstring": "Lock the provided file descriptor.\n\nArgs:\nfd: int, the file descriptor of the file to lock.\npath: string, the name of the file to lock.\nblocking: bool, whether the function should return immediately.\n\nRaises:\nIOError, raised from flock while attempting to lock a file.", "source": "juraj-google-style"} {"code": "def cycle_iters(iters: t.List[t.Iterator], take: int=1) -> t.Iterator:\n while iters:\n for i, it in enumerate(iters):\n try:\n for j in range(take):\n logger.debug(f'yielding item {j!r} from iterable {i!r}.')\n yield next(it)\n except StopIteration:\n iters.remove(it)", "docstring": "Evenly cycle through a list of iterators.\n\nArgs:\niters: A list of iterators to evely cycle through.\ntake: Yield N items at a time. When not set to 1, this will yield\nmultiple items from the same collection.\n\nReturns:\nAn iteration across several iterators in a round-robin order.", "source": "github-repos"} {"code": "def _index(array, item, key=None):\n for (i, el) in enumerate(array):\n resolved_el = (key(el) if key else el)\n if (resolved_el == item):\n return i\n return (- 1)", "docstring": "Array search function.\n\nWritten, because ``.index()`` method for array doesn't have `key` parameter\nand raises `ValueError`, if the item is not found.\n\nArgs:\narray (list): List of items, which will be searched.\nitem (whatever): Item, which will be matched to elements in `array`.\nkey (function, default None): Function, which will be used for lookup\ninto each element in `array`.\n\nReturn:\nIndex of `item` in `array`, if the `item` is in `array`, else `-1`.", "source": "codesearchnet"} {"code": "def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n del fused_op_name, output_index, out_graphdef\n raise RuntimeError('Unimplemented abstract method.')", "docstring": "Add node(s) to graph representing output operands and returns type.\n\nArgs:\nfused_op_name: name of the fused op stub name.\noutput_index: Output index that we are currently processing from stub.\nout_graphdef: The destination graphdef we are currently building up.\n\nReturns:\nThe datatype of this identity.\n\nRaises:\nRuntimeError: if the method is not implemented.", "source": "github-repos"} {"code": "def fixed_gaussian_prior_builder(getter, name, dtype=None, *args, **kwargs):\n del getter\n del args\n del kwargs\n loc = tf.constant(0.0, shape=(), dtype=dtype)\n scale = tf.constant(0.01, shape=(), dtype=dtype)\n return tfp.distributions.Normal(loc=loc, scale=scale, name='{}_prior_dist'.format(name))", "docstring": "A pre-canned builder for fixed gaussian prior distributions.\n\nGiven a true `getter` function and arguments forwarded from `tf.get_variable`,\nreturn a distribution object for a scalar-valued fixed gaussian prior which\nwill be broadcast over a variable of the requisite shape.\n\nArgs:\ngetter: The `getter` passed to a `custom_getter`. Please see the\ndocumentation for `tf.get_variable`.\nname: The `name` argument passed to `tf.get_variable`.\ndtype: The `dtype` argument passed to `tf.get_variable`.\n*args: See positional arguments passed to `tf.get_variable`.\n**kwargs: See keyword arguments passed to `tf.get_variable`.\n\nReturns:\nAn instance of `tfp.distributions.Normal` representing the prior\ndistribution over the variable in question.", "source": "codesearchnet"} {"code": "def fetch_raw(self, method, url, params=None, headers=None, data=None):\n if (not urllib.parse.urlparse(url).hostname.endswith('.google.com')):\n raise Exception('expected google.com domain')\n headers = (headers or {})\n headers.update(self._authorization_headers)\n return self._session.request(method, url, params=params, headers=headers, data=data, proxy=self._proxy)", "docstring": "Make an HTTP request using aiohttp directly.\n\nAutomatically uses configured HTTP proxy, and adds Google authorization\nheader and cookies.\n\nArgs:\nmethod (str): Request method.\nurl (str): Request URL.\nparams (dict): (optional) Request query string parameters.\nheaders (dict): (optional) Request headers.\ndata: (str): (optional) Request body data.\n\nReturns:\naiohttp._RequestContextManager: ContextManager for a HTTP response.\n\nRaises:\nSee ``aiohttp.ClientSession.request``.", "source": "codesearchnet"} {"code": "def set_charge_and_spin(self, charge, spin_multiplicity=None):\n self._charge = charge\n nelectrons = 0\n for site in self._sites:\n for (sp, amt) in site.species.items():\n if (not isinstance(sp, DummySpecie)):\n nelectrons += (sp.Z * amt)\n nelectrons -= charge\n self._nelectrons = nelectrons\n if spin_multiplicity:\n if (((nelectrons + spin_multiplicity) % 2) != 1):\n raise ValueError('Charge of {} and spin multiplicity of {} is not possible for this molecule'.format(self._charge, spin_multiplicity))\n self._spin_multiplicity = spin_multiplicity\n else:\n self._spin_multiplicity = (1 if ((nelectrons % 2) == 0) else 2)", "docstring": "Set the charge and spin multiplicity.\n\nArgs:\ncharge (int): Charge for the molecule. Defaults to 0.\nspin_multiplicity (int): Spin multiplicity for molecule.\nDefaults to None, which means that the spin multiplicity is\nset to 1 if the molecule has no unpaired electrons and to 2\nif there are unpaired electrons.", "source": "codesearchnet"} {"code": "def apply(self, s, active=None):\n \n if active is None:\n active = self.active\n return self.group.apply(s, active=active)", "docstring": "Apply the REPP's rewrite rules to the input string *s*.\n\nArgs:\ns (str): the input string to process\nactive (optional): a collection of external module names\nthat may be applied if called\nReturns:\na :class:`REPPResult` object containing the processed\nstring and characterization maps", "source": "juraj-google-style"} {"code": "def state_province_region(self, value=None):\n \n if value is not None:\n try:\n value = str(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type str '\n 'for field `state_province_region`'.format(value))\n if ',' in value:\n raise ValueError('value should not contain a comma '\n 'for field `state_province_region`')\n\n self._state_province_region = value", "docstring": "Corresponds to IDD Field `state_province_region`\n\nArgs:\nvalue (str): value for IDD Field `state_province_region`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def get_artifact_url(context, task_id, path):\n if path.startswith('public/'):\n url = context.queue.buildUrl('getLatestArtifact', task_id, path)\n else:\n url = context.queue.buildSignedUrl('getLatestArtifact', task_id, path)\n return url", "docstring": "Get a TaskCluster artifact url.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context\ntask_id (str): the task id of the task that published the artifact\npath (str): the relative path of the artifact\n\nReturns:\nstr: the artifact url\n\nRaises:\nTaskClusterFailure: on failure.", "source": "codesearchnet"} {"code": "def alignment(layer, decay_ratio=2):\n\n def inner(T):\n batch_n = T(layer).get_shape().as_list()[0]\n arr = T(layer)\n accum = 0\n for d in [1, 2, 3, 4]:\n for i in range((batch_n - d)):\n (a, b) = (i, (i + d))\n (arr1, arr2) = (arr[a], arr[b])\n accum += (tf.reduce_mean(((arr1 - arr2) ** 2)) / (decay_ratio ** float(d)))\n return (- accum)\n return inner", "docstring": "Encourage neighboring images to be similar.\n\nWhen visualizing the interpolation between two objectives, it's often\ndesireable to encourage analagous boejcts to be drawn in the same position,\nto make them more comparable.\n\nThis term penalizes L2 distance between neighboring images, as evaluated at\nlayer.\n\nIn general, we find this most effective if used with a paramaterization that\nshares across the batch. (In fact, that works quite well by iteself, so this\nfunction may just be obselete.)\n\nArgs:\nlayer: layer to penalize at.\ndecay_ratio: how much to decay penalty as images move apart in batch.\n\nReturns:\nObjective.", "source": "codesearchnet"} {"code": "def as_text(bytes_or_text, encoding='utf-8'):\n \n if isinstance(bytes_or_text, _six.text_type):\n return bytes_or_text\n elif isinstance(bytes_or_text, bytes):\n return bytes_or_text.decode(encoding)\n else:\n raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)", "docstring": "Returns the given argument as a unicode string.\nArgs:\nbytes_or_text: A `bytes`, `str, or `unicode` object.\nencoding: A string indicating the charset for decoding unicode.\nReturns:\nA `unicode` (Python 2) or `str` (Python 3) object.\nRaises:\nTypeError: If `bytes_or_text` is not a binary or unicode string.", "source": "juraj-google-style"} {"code": "def _read_range(self, start, end=0):\n \n \n response = self._client.request(\n 'GET', self.name, headers=dict(Range=self._http_range(start, end)),\n timeout=self._TIMEOUT)\n\n if response.status_code == 416:\n \n return b''\n\n \n return _handle_http_errors(response).content", "docstring": "Read a range of bytes in stream.\n\nArgs:\nstart (int): Start stream position.\nend (int): End stream position.\n0 To not specify end.\n\nReturns:\nbytes: number of bytes read", "source": "juraj-google-style"} {"code": "def _cast_to_type_if_compatible(name, param_type, value):\n fail_msg = (\"Could not cast hparam '%s' of type '%s' from value %r\" % (name, param_type, value))\n if issubclass(param_type, type(None)):\n return value\n if (issubclass(param_type, (six.string_types, six.binary_type)) and (not isinstance(value, (six.string_types, six.binary_type)))):\n raise ValueError(fail_msg)\n if (issubclass(param_type, bool) != isinstance(value, bool)):\n raise ValueError(fail_msg)\n if (issubclass(param_type, numbers.Integral) and (not isinstance(value, numbers.Integral))):\n raise ValueError(fail_msg)\n if (issubclass(param_type, numbers.Number) and (not isinstance(value, numbers.Number))):\n raise ValueError(fail_msg)\n return param_type(value)", "docstring": "Cast hparam to the provided type, if compatible.\n\nArgs:\nname: Name of the hparam to be cast.\nparam_type: The type of the hparam.\nvalue: The value to be cast, if compatible.\n\nReturns:\nThe result of casting `value` to `param_type`.\n\nRaises:\nValueError: If the type of `value` is not compatible with param_type.\n* If `param_type` is a string type, but `value` is not.\n* If `param_type` is a boolean, but `value` is not, or vice versa.\n* If `param_type` is an integer type, but `value` is not.\n* If `param_type` is a float type, but `value` is not a numeric type.", "source": "codesearchnet"} {"code": "def prepare(self, variables):\n initializedsteps = []\n if (variables is None):\n variables = dict()\n for (step, params, _resources, _files) in self.steps:\n new_params = _complete_parameters(params, variables)\n initializedsteps.append(step(new_params))\n return initializedsteps", "docstring": "Initialize all steps in this recipe using their parameters.\n\nArgs:\nvariables (dict): A dictionary of global variable definitions\nthat may be used to replace or augment the parameters given\nto each step.\n\nReturns:\nlist of RecipeActionObject like instances: The list of instantiated\nsteps that can be used to execute this recipe.", "source": "codesearchnet"} {"code": "def parse(raw_config):\n config_dict = yaml_to_ordered_dict(raw_config)\n if config_dict:\n for top_level_key in ['stacks', 'pre_build', 'post_build', 'pre_destroy', 'post_destroy']:\n top_level_value = config_dict.get(top_level_key)\n if isinstance(top_level_value, dict):\n tmp_list = []\n for (key, value) in top_level_value.items():\n tmp_dict = copy.deepcopy(value)\n if (top_level_key == 'stacks'):\n tmp_dict['name'] = key\n tmp_list.append(tmp_dict)\n config_dict[top_level_key] = tmp_list\n try:\n return Config(config_dict, strict=True)\n except SchematicsError as e:\n raise exceptions.InvalidConfig(e.errors)", "docstring": "Parse a raw yaml formatted stacker config.\n\nArgs:\nraw_config (str): the raw stacker configuration string in yaml format.\n\nReturns:\n:class:`Config`: the parsed stacker config.", "source": "codesearchnet"} {"code": "def iterable(obj, strok=False):\n try:\n iter(obj)\n except Exception:\n return False\n else:\n return (strok or (not isinstance(obj, six.string_types)))", "docstring": "Checks if the input implements the iterator interface. An exception is made\nfor strings, which return False unless `strok` is True\n\nArgs:\nobj (object): a scalar or iterable input\n\nstrok (bool): if True allow strings to be interpreted as iterable\n\nReturns:\nbool: True if the input is iterable\n\nExample:\n>>> obj_list = [3, [3], '3', (3,), [3, 4, 5], {}]\n>>> result = [iterable(obj) for obj in obj_list]\n>>> assert result == [False, True, False, True, True, True]\n>>> result = [iterable(obj, strok=True) for obj in obj_list]\n>>> assert result == [False, True, True, True, True, True]", "source": "codesearchnet"} {"code": "def __get_first_available_id(self):\n traps = self.get_all()\n if traps:\n used_ids = [0]\n for trap in traps:\n used_uris = trap.get('uri')\n used_ids.append(int(used_uris.split('/')[(- 1)]))\n used_ids.sort()\n return self.__findFirstMissing(used_ids, 0, (len(used_ids) - 1))\n else:\n return 1", "docstring": "Private method to get the first available id.\nThe id can only be an integer greater than 0.\n\nReturns:\nint: The first available id", "source": "codesearchnet"} {"code": "def get_thread(self, thread_id, update_if_cached=True, raise_404=False):\n \n \n cached_thread = self._thread_cache.get(thread_id)\n if cached_thread:\n if update_if_cached:\n cached_thread.update()\n return cached_thread\n\n res = self._requests_session.get(\n self._url.thread_api_url(\n thread_id = thread_id\n )\n )\n\n \n if raise_404:\n res.raise_for_status()\n elif not res.ok:\n return None\n\n thread = Thread._from_request(self, res, thread_id)\n self._thread_cache[thread_id] = thread\n\n return thread", "docstring": "Get a thread from 4chan via 4chan API.\n\nArgs:\nthread_id (int): Thread ID\nupdate_if_cached (bool): Whether the thread should be updated if it's already in our cache\nraise_404 (bool): Raise an Exception if thread has 404'd\n\nReturns:\n:class:`basc_py4chan.Thread`: Thread object", "source": "juraj-google-style"} {"code": "def configure_interface(self, name, commands):\n \n commands = make_iterable(commands)\n commands.insert(0, 'interface %s' % name)\n return self.configure(commands)", "docstring": "Configures the specified interface with the commands\n\nArgs:\nname (str): The interface name to configure\ncommands: The commands to configure in the interface\n\nReturns:\nTrue if the commands completed successfully", "source": "juraj-google-style"} {"code": "def convert_timedelta_type(obj):\n if isinstance(obj, dt.timedelta):\n return (obj.total_seconds() * 1000.0)\n elif isinstance(obj, np.timedelta64):\n return (obj / NP_MS_DELTA)", "docstring": "Convert any recognized timedelta value to floating point absolute\nmilliseconds.\n\nArg:\nobj (object) : the object to convert\n\nReturns:\nfloat : milliseconds", "source": "codesearchnet"} {"code": "def get_path_spec(self, path, action=None):\n path_spec = None\n path_name = None\n for base_path in self.paths.keys():\n if (path == base_path):\n path_spec = self.paths[base_path]\n path_name = base_path\n if (path_spec is None):\n for base_path in self.paths.keys():\n regex_from_path = re.compile((re.sub('{[^/]*}', '([^/]*)', base_path) + '$'))\n if re.match(regex_from_path, path):\n path_spec = self.paths[base_path]\n path_name = base_path\n if ((path_spec is not None) and (action is not None)):\n if (action not in path_spec.keys()):\n return (None, None)\n else:\n path_spec = path_spec[action]\n return (path_name, path_spec)", "docstring": "Get the specification matching with the given path.\n\nArgs:\npath: path we want the specification.\naction: get the specification for the given action.\n\nReturns:\nA tuple with the base name of the path and the specification.\nOr (None, None) if no specification is found.", "source": "codesearchnet"} {"code": "def ParseFileLNKFile(\n self, parser_mediator, file_object, display_name):\n \n lnk_file = pylnk.file()\n lnk_file.set_ascii_codepage(parser_mediator.codepage)\n\n try:\n lnk_file.open_file_object(file_object)\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to open file with error: {0!s}'.format(exception))\n return\n\n link_target = None\n if lnk_file.link_target_identifier_data:\n \n \n display_name = parser_mediator.GetFilename()\n shell_items_parser = shell_items.ShellItemsParser(display_name)\n shell_items_parser.ParseByteStream(\n parser_mediator, lnk_file.link_target_identifier_data,\n codepage=parser_mediator.codepage)\n\n link_target = shell_items_parser.CopyToPath()\n\n event_data = WinLnkLinkEventData()\n event_data.birth_droid_file_identifier = (\n lnk_file.birth_droid_file_identifier)\n event_data.birth_droid_volume_identifier = (\n lnk_file.birth_droid_volume_identifier)\n event_data.command_line_arguments = lnk_file.command_line_arguments\n event_data.description = lnk_file.description\n event_data.drive_serial_number = lnk_file.drive_serial_number\n event_data.drive_type = lnk_file.drive_type\n event_data.droid_file_identifier = lnk_file.droid_file_identifier\n event_data.droid_volume_identifier = lnk_file.droid_volume_identifier\n event_data.env_var_location = lnk_file.environment_variables_location\n event_data.file_attribute_flags = lnk_file.file_attribute_flags\n event_data.file_size = lnk_file.file_size\n event_data.icon_location = lnk_file.icon_location\n event_data.link_target = link_target\n event_data.local_path = lnk_file.local_path\n event_data.network_path = lnk_file.network_path\n event_data.relative_path = lnk_file.relative_path\n event_data.volume_label = lnk_file.volume_label\n event_data.working_directory = lnk_file.working_directory\n\n access_time = lnk_file.get_file_access_time_as_integer()\n if access_time != 0:\n date_time = dfdatetime_filetime.Filetime(timestamp=access_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n creation_time = lnk_file.get_file_creation_time_as_integer()\n if creation_time != 0:\n date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n modification_time = lnk_file.get_file_modification_time_as_integer()\n if modification_time != 0:\n date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if access_time == 0 and creation_time == 0 and modification_time == 0:\n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if lnk_file.droid_file_identifier:\n try:\n self._ParseDistributedTrackingIdentifier(\n parser_mediator, lnk_file.droid_file_identifier, display_name)\n except (TypeError, ValueError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to read droid file identifier with error: {0!s}.'.format(\n exception))\n\n if lnk_file.birth_droid_file_identifier:\n try:\n self._ParseDistributedTrackingIdentifier(\n parser_mediator, lnk_file.birth_droid_file_identifier, display_name)\n except (TypeError, ValueError) as exception:\n parser_mediator.ProduceExtractionWarning((\n 'unable to read birth droid file identifier with error: '\n '{0!s}.').format(exception))\n\n lnk_file.close()", "docstring": "Parses a Windows Shortcut (LNK) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.\ndisplay_name (str): display name.", "source": "juraj-google-style"} {"code": "def merge(prior, latest):\n if (not _buckets_nearly_equal(prior, latest)):\n _logger.error(u'Bucket options do not match. From %s To: %s', prior, latest)\n raise ValueError(u'Bucket options do not match')\n if (len(prior.bucketCounts) != len(latest.bucketCounts)):\n _logger.error(u'Bucket count sizes do not match. From %s To: %s', prior, latest)\n raise ValueError(u'Bucket count sizes do not match')\n if (prior.count <= 0):\n return\n old_count = latest.count\n old_mean = latest.mean\n old_summed_variance = latest.sumOfSquaredDeviation\n bucket_counts = latest.bucketCounts\n latest.count += prior.count\n latest.maximum = max(prior.maximum, latest.maximum)\n latest.minimum = min(prior.minimum, latest.minimum)\n latest.mean = (((old_count * old_mean) + (prior.count * prior.mean)) / latest.count)\n latest.sumOfSquaredDeviation = (((old_summed_variance + prior.sumOfSquaredDeviation) + (old_count * ((latest.mean - old_mean) ** 2))) + (prior.count * ((latest.mean - prior.mean) ** 2)))\n for (i, (x, y)) in enumerate(zip(prior.bucketCounts, bucket_counts)):\n bucket_counts[i] = (x + y)", "docstring": "Merge `prior` into `latest`.\n\nN.B, this mutates latest. It ensures that the statistics and histogram are\nupdated to correctly include the original values from both instances.\n\nArgs:\nprior (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nan instance\nlatest (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):\nan instance to be updated\n\nRaises:\nValueError: if the bucket options of `prior` and `latest` do not match\nValueError: if the bucket counts of `prior` and `latest` do not match", "source": "codesearchnet"} {"code": "def GrabFileSystem(self, path_spec):\n identifier = self._GetFileSystemCacheIdentifier(path_spec)\n self._file_system_cache.GrabObject(identifier)", "docstring": "Grabs a cached file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.", "source": "codesearchnet"} {"code": "def credits(self, **kwargs):\n \n path = self._get_id_path('credits')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the cast and crew information for a specific movie id.\n\nArgs:\nappend_to_response: (optional) Comma separated, any movie method.\n\nReturns:\nA dict representation of the JSON returned from the API.", "source": "juraj-google-style"} {"code": "def ias60(msg):\n \n d = hex2bin(data(msg))\n\n if d[12] == '0':\n return None\n\n ias = bin2int(d[13:23]) \n return ias", "docstring": "Indicated airspeed\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nint: indicated airspeed in knots", "source": "juraj-google-style"} {"code": "def sparse_retain(sp_input, to_retain):\n sp_input = _convert_to_sparse_tensor(sp_input)\n to_retain = ops.convert_to_tensor(to_retain)\n retain_shape = to_retain.get_shape()\n retain_shape.assert_has_rank(1)\n if sp_input.values.get_shape().dims is not None:\n sp_input.values.get_shape().dims[0].assert_is_compatible_with(tensor_shape.dimension_at_index(retain_shape, 0))\n where_true = array_ops.reshape(array_ops.where_v2(to_retain), [-1])\n new_indices = array_ops.gather(sp_input.indices, where_true)\n new_values = array_ops.gather(sp_input.values, where_true)\n return sparse_tensor.SparseTensor(new_indices, new_values, array_ops.identity(sp_input.dense_shape))", "docstring": "Retains specified non-empty values within a `SparseTensor`.\n\nFor example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:\n\n[0, 1]: a\n[0, 3]: b\n[2, 0]: c\n[3, 1]: d\n\nand `to_retain = [True, False, False, True]`, then the output will\nbe a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:\n\n[0, 1]: a\n[3, 1]: d\n\nArgs:\nsp_input: The input `SparseTensor` with `N` non-empty elements.\nto_retain: A bool vector of length `N` with `M` true values.\n\nReturns:\nA `SparseTensor` with the same shape as the input and `M` non-empty\nelements corresponding to the true positions in `to_retain`.\n\nRaises:\nTypeError: If `sp_input` is not a `SparseTensor`.", "source": "github-repos"} {"code": "def get_submission_ids(self, tournament=1):\n \n query = \n arguments = {'tournament': tournament}\n data = self.raw_query(query, arguments)['data']['rounds'][0]\n if data is None:\n return None\n mapping = {item['username']: item['submissionId']\n for item in data['leaderboard']}\n return mapping", "docstring": "Get dict with username->submission_id mapping.\n\nArgs:\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\ndict: username->submission_id mapping, string->string\n\nExample:\n>>> NumerAPI().get_submission_ids()\n{'1337ai': '93c46857-fed9-4594-981e-82db2b358daf',\n'1x0r': '108c7601-822c-4910-835d-241da93e2e24',\n...\n}", "source": "juraj-google-style"} {"code": "def _delete_example(self, request):\n \n index = int(request.args.get('index'))\n if index >= len(self.examples):\n return http_util.Respond(request, {'error': 'invalid index provided'},\n 'application/json', code=400)\n del self.examples[index]\n self.updated_example_indices = set([\n i if i < index else i - 1 for i in self.updated_example_indices])\n self.generate_sprite([ex.SerializeToString() for ex in self.examples])\n return http_util.Respond(request, {}, 'application/json')", "docstring": "Deletes the specified example.\n\nArgs:\nrequest: A request that should contain 'index'.\n\nReturns:\nAn empty response.", "source": "juraj-google-style"} {"code": "def restore_app_connection(self, port=None):", "docstring": "Reconnects to the app after device USB was disconnected.\n\nInstead of creating new instance of the client:\n- Uses the given port (or finds a new available host_port if none is\ngiven).\n- Tries to connect to remote server with selected port.\n\nMust be implemented by subclasses.\n\nArgs:\nport: If given, this is the host port from which to connect to remote\ndevice port. If not provided, find a new available port as host\nport.\n\nRaises:\nAppRestoreConnectionError: When the app was not able to be\nreconnected.", "source": "github-repos"} {"code": "def nac_p(msg):\n \n tc = typecode(msg)\n\n if tc not in [29, 31]:\n raise RuntimeError(\"%s: Not a target state and status message, \\\n or operation status message, expecting TC = 29 or 31\" % msg)\n\n msgbin = common.hex2bin(msg)\n\n if tc == 29:\n NACp = common.bin2int(msgbin[71:75])\n elif tc == 31:\n NACp = common.bin2int(msgbin[76:80])\n\n try:\n EPU = uncertainty.NACp[NACp]['EPU']\n VEPU = uncertainty.NACp[NACp]['VEPU']\n except KeyError:\n EPU, VEPU = uncertainty.NA, uncertainty.NA\n\n return EPU, VEPU", "docstring": "Calculate NACp, Navigation Accuracy Category - Position\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC = 29 or 31\n\nReturns:\nint or string: 95% horizontal accuracy bounds, Estimated Position Uncertainty\nint or string: 95% vertical accuracy bounds, Vertical Estimated Position Uncertainty", "source": "juraj-google-style"} {"code": "def register_module_for_export(module, export_name):\n for (used_name, _) in tf_v1.get_collection(_EXPORT_MODULES_COLLECTION):\n if (used_name == export_name):\n raise ValueError(('There is already a module registered to be exported as %r' % export_name))\n tf_v1.add_to_collection(_EXPORT_MODULES_COLLECTION, (export_name, module))", "docstring": "Register a Module to be exported under `export_name`.\n\n\nThis function registers `module` to be exported by `LatestModuleExporter`\nunder a subdirectory named `export_name`.\n\nNote that `export_name` must be unique for each module exported from the\ncurrent graph. It only controls the export subdirectory name and it has\nno scope effects such as the `name` parameter during Module instantiation.\n\nArgs:\nmodule: Module instance to be exported.\nexport_name: subdirectory name to use when performing the export.\n\nRaises:\nValueError: if `export_name` is already taken in the current graph.", "source": "codesearchnet"} {"code": "def verify_signature(public_key, signature, hash, hash_algo):\n \n hash_algo = _hash_algorithms[hash_algo]\n try:\n return get_publickey(public_key).verify(\n signature,\n hash,\n padding.PKCS1v15(),\n utils.Prehashed(hash_algo),\n ) is None\n except InvalidSignature:\n return False", "docstring": "Verify the given signature is correct for the given hash and public key.\n\nArgs:\npublic_key (str): PEM encoded public key\nsignature (bytes): signature to verify\nhash (bytes): hash of data\nhash_algo (str): hash algorithm used\n\nReturns:\nTrue if the signature is valid, False otherwise", "source": "juraj-google-style"} {"code": "def get_video_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Union[int, List[int]], vision_feature_select_strategy: str):\n batch_size, frames, channels, height, width = pixel_values.shape\n pixel_values = pixel_values.view(batch_size * frames, channels, height, width)\n video_features = self.vision_tower(pixel_values, output_hidden_states=True)\n if isinstance(vision_feature_layer, int):\n selected_video_feature = video_features.hidden_states[vision_feature_layer]\n else:\n hs_pool = [video_features.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n selected_video_feature = torch.cat(hs_pool, dim=-1)\n if vision_feature_select_strategy == 'default':\n selected_video_feature = selected_video_feature[:, 1:]\n elif vision_feature_select_strategy == 'full':\n selected_video_feature = selected_video_feature\n video_features = self.multi_modal_projector(selected_video_feature)\n video_features = self.apply_pooling(video_features)\n video_features = video_features.reshape(batch_size, frames * video_features.shape[1], -1)\n return video_features", "docstring": "Obtains video last hidden states from the vision tower, apply multimodal projection and pooling.\n\nArgs:\npixel_values (`torch.FloatTensor]` of shape `(batch_size, num_frames, channels, height, width)`)\nThe tensors corresponding to the input video.\nvision_feature_layer (`Union[int, List[int]], *optional*, defaults to -2`):\nThe index of the layer to select the vision feature. If multiple indices are provided,\nthe vision feature of the corresponding indices will be concatenated to form the\nvision features.\nvision_feature_select_strategy (`str`):\nThe feature selection strategy used to select the vision feature from the vision backbone.\nCan be one of `\"default\"` or `\"full\"`\nReturns:\nvideo_features (List[`torch.Tensor`]): List of video feature tensor, each contains all the visual feature of all patches\nand are of shape `(num_videos, video_length, embed_dim)`).", "source": "github-repos"} {"code": "def extract_paths(disk_path, disk_root, paths, ignore_nopath):\n with guestfs_conn_mount_ro(disk_path, disk_root) as conn:\n for (guest_path, host_path) in paths:\n msg = 'Extracting guestfs:\n LOGGER.debug(msg)\n try:\n _copy_path(conn, guest_path, host_path)\n except ExtractPathNoPathError as err:\n if ignore_nopath:\n LOGGER.debug('%s - ignoring', err)\n else:\n raise", "docstring": "Extract paths from a disk using guestfs\n\nArgs:\ndisk_path(str): path to the disk\ndisk_root(str): root partition\npaths(list of tuples): files to extract in\n`[(src1, dst1), (src2, dst2)...]` format, if ``srcN`` is a\ndirectory in the guest, and ``dstN`` does not exist on the host,\nit will be created. If ``srcN`` is a file on the guest, it will be\ncopied exactly to ``dstN``\nignore_nopath(bool): If set to True, ignore paths in the guest that\ndo not exit\n\nReturns:\nNone\n\nRaises:\n:exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing\npath was found on the guest, and `ignore_nopath` is False.\n:exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.", "source": "codesearchnet"} {"code": "def delete_item(self, key: InstanceKey) -> \"InstanceNode\":\n \n if not isinstance(self.value, StructuredValue):\n raise InstanceValueError(self.json_pointer(), \"scalar value\")\n newval = self.value.copy()\n try:\n del newval[key]\n except (KeyError, IndexError, TypeError):\n raise NonexistentInstance(self.json_pointer(),\n f\"item '{key}'\") from None\n return self._copy(newval)", "docstring": "Delete an item (member or entry) from receiver's value.\n\nArgs:\nkey: Key of the item (instance name or index).\n\nRaises:\nNonexistentInstance: If receiver's value doesn't contain the item.\nInstanceValueError: If the receiver's value is a scalar.", "source": "juraj-google-style"} {"code": "def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True):\n if not isinstance(shape, RaggedTensorDynamicShape):\n raise TypeError('shape must be a RaggedTensorDynamicShape')\n rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n if shape.num_partitioned_dimensions == 0:\n return _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions)\n else:\n return _broadcast_to_ragged_shape(rt_input, shape, broadcast_inner_dimensions)", "docstring": "Broadcasts a potentially ragged tensor to a ragged shape.\n\nTiles `rt_input` as necessary to match the given shape.\n\nBehavior is undefined if `rt_input` is not broadcast-compatible with `shape`.\n\nArgs:\nrt_input: The potentially ragged tensor to broadcast.\nshape: A `RaggedTensorDynamicShape`\nbroadcast_inner_dimensions: If false, then inner dimensions will not be\ntiled.\n\nReturns:\nA potentially ragged tensor whose values are taken from\n`rt_input`, and whose shape matches `shape`.", "source": "github-repos"} {"code": "def _make_cloud_datastore_context(app_id, external_app_ids=()):\n from . import model\n if (not datastore_pbs._CLOUD_DATASTORE_ENABLED):\n raise datastore_errors.BadArgumentError(datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)\n import googledatastore\n try:\n from google.appengine.datastore import cloud_datastore_v1_remote_stub\n except ImportError:\n from google3.apphosting.datastore import cloud_datastore_v1_remote_stub\n current_app_id = os.environ.get('APPLICATION_ID', None)\n if (current_app_id and (current_app_id != app_id)):\n raise ValueError(('Cannot create a Cloud Datastore context that connects to an application (%s) that differs from the application already connected to (%s).' % (app_id, current_app_id)))\n os.environ['APPLICATION_ID'] = app_id\n id_resolver = datastore_pbs.IdResolver(((app_id,) + tuple(external_app_ids)))\n project_id = id_resolver.resolve_project_id(app_id)\n endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)\n datastore = googledatastore.Datastore(project_endpoint=endpoint, credentials=googledatastore.helper.get_credentials_from_env())\n conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1, _id_resolver=id_resolver)\n try:\n stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)\n apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1, stub)\n except:\n pass\n try:\n apiproxy_stub_map.apiproxy.RegisterStub('memcache', _ThrowingStub())\n except:\n pass\n try:\n apiproxy_stub_map.apiproxy.RegisterStub('taskqueue', _ThrowingStub())\n except:\n pass\n return make_context(conn=conn)", "docstring": "Creates a new context to connect to a remote Cloud Datastore instance.\n\nThis should only be used outside of Google App Engine.\n\nArgs:\napp_id: The application id to connect to. This differs from the project\nid as it may have an additional prefix, e.g. \"s~\" or \"e~\".\nexternal_app_ids: A list of apps that may be referenced by data in your\napplication. For example, if you are connected to s~my-app and store keys\nfor s~my-other-app, you should include s~my-other-app in the external_apps\nlist.\nReturns:\nAn ndb.Context that can connect to a Remote Cloud Datastore. You can use\nthis context by passing it to ndb.set_context.", "source": "codesearchnet"} {"code": "def _get_mu_tensor(self):\n root = self._get_cubic_root()\n dr = (self._h_max / self._h_min)\n mu = tf.maximum((root ** 2), (((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1)) ** 2))\n return mu", "docstring": "Get the min mu which minimize the surrogate.\n\nReturns:\nThe mu_t.", "source": "codesearchnet"} {"code": "def exec_inspect(self, exec_id):\n if isinstance(exec_id, dict):\n exec_id = exec_id.get('Id')\n res = self._get(self._url('/exec/{0}/json', exec_id))\n return self._result(res, True)", "docstring": "Return low-level information about an exec command.\n\nArgs:\nexec_id (str): ID of the exec instance\n\nReturns:\n(dict): Dictionary of values returned by the endpoint.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"} {"code": "def remove_binding(site, hostheader='', ipaddress='*', port=80):\n \n name = _get_binding_info(hostheader, ipaddress, port)\n current_bindings = list_bindings(site)\n\n if name not in current_bindings:\n log.debug('Binding already absent: %s', name)\n return True\n ps_cmd = ['Remove-WebBinding',\n '-HostHeader', \"'{0}'\".format(hostheader),\n '-IpAddress', \"'{0}'\".format(ipaddress),\n '-Port', \"'{0}'\".format(port)]\n\n cmd_ret = _srvmgr(ps_cmd)\n\n if cmd_ret['retcode'] != 0:\n msg = 'Unable to remove binding: {0}\\nError: {1}' \\\n ''.format(site, cmd_ret['stderr'])\n raise CommandExecutionError(msg)\n\n if name not in list_bindings(site):\n log.debug('Binding removed successfully: %s', site)\n return True\n\n log.error('Unable to remove binding: %s', site)\n return False", "docstring": "Remove an IIS binding.\n\nArgs:\nsite (str): The IIS site name.\nhostheader (str): The host header of the binding.\nipaddress (str): The IP address of the binding.\nport (int): The TCP port of the binding.\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_binding site='site0' hostheader='example.com' ipaddress='*' port='80'", "source": "juraj-google-style"} {"code": "def get_config(self, name):\n if (name not in self.registry):\n msg = \"Given config name '{}' is not registered.\"\n raise NotRegisteredError(msg.format(name))\n return copy.deepcopy(self.registry[name])", "docstring": "Return a registred configuration for given config name.\n\nArguments:\nname (string): A registred config name.\n\nRaises:\nNotRegisteredError: If given config name does not exist in\nregistry.\n\nReturns:\ndict: Configuration.", "source": "codesearchnet"} {"code": "def read(self, key):\n \n key = quote(key, safe='~')\n url = '/internal/playbooks/keyValue/{}'.format(key)\n r = self.tcex.session.get(url)\n data = r.content\n if data is not None and not isinstance(data, str):\n data = str(r.content, 'utf-8')\n return data", "docstring": "Read data from remote KV store for the provided key.\n\nArgs:\nkey (string): The key to read in remote KV store.\n\nReturns:\n(any): The response data from the remote KV store.", "source": "juraj-google-style"} {"code": "def _handle_offset_response(self, future, response):\n timestamp_offset_map = {}\n for (topic, part_data) in response.topics:\n for partition_info in part_data:\n (partition, error_code) = partition_info[:2]\n partition = TopicPartition(topic, partition)\n error_type = Errors.for_code(error_code)\n if (error_type is Errors.NoError):\n if (response.API_VERSION == 0):\n offsets = partition_info[2]\n assert (len(offsets) <= 1), 'Expected OffsetResponse with one offset'\n if (not offsets):\n offset = UNKNOWN_OFFSET\n else:\n offset = offsets[0]\n log.debug('Handling v0 ListOffsetResponse response for %s. Fetched offset %s', partition, offset)\n if (offset != UNKNOWN_OFFSET):\n timestamp_offset_map[partition] = (offset, None)\n else:\n (timestamp, offset) = partition_info[2:]\n log.debug('Handling ListOffsetResponse response for %s. Fetched offset %s, timestamp %s', partition, offset, timestamp)\n if (offset != UNKNOWN_OFFSET):\n timestamp_offset_map[partition] = (offset, timestamp)\n elif (error_type is Errors.UnsupportedForMessageFormatError):\n log.debug('Cannot search by timestamp for partition %s because the message format version is before 0.10.0', partition)\n elif (error_type is Errors.NotLeaderForPartitionError):\n log.debug('Attempt to fetch offsets for partition %s failed due to obsolete leadership information, retrying.', partition)\n future.failure(error_type(partition))\n return\n elif (error_type is Errors.UnknownTopicOrPartitionError):\n log.warning(('Received unknown topic or partition error in ListOffset request for partition %s. The topic/partition ' + 'may not exist or the user may not have Describe access to it.'), partition)\n future.failure(error_type(partition))\n return\n else:\n log.warning('Attempt to fetch offsets for partition %s failed due to: %s', partition, error_type)\n future.failure(error_type(partition))\n return\n if (not future.is_done):\n future.success(timestamp_offset_map)", "docstring": "Callback for the response of the list offset call above.\n\nArguments:\nfuture (Future): the future to update based on response\nresponse (OffsetResponse): response from the server\n\nRaises:\nAssertionError: if response does not match partition", "source": "codesearchnet"} {"code": "def get_full_url(self, url):\n \n \n request = Request('GET', url)\n preparedrequest = self.session.prepare_request(request)\n return preparedrequest.url", "docstring": "Get full url including any additional parameters\n\nArgs:\nurl (str): URL for which to get full url\n\nReturns:\nstr: Full url including any additional parameters", "source": "juraj-google-style"} {"code": "def business_days_between(self, from_dates, to_dates):\n from_biz, from_is_bizday = self._to_biz_space(dt.convert_to_date_tensor(from_dates).ordinal())\n to_biz, to_is_bizday = self._to_biz_space(dt.convert_to_date_tensor(to_dates).ordinal())\n from_biz = tf.where(from_is_bizday, from_biz, from_biz + 1)\n to_biz = tf.where(to_is_bizday, to_biz, to_biz + 1)\n return tf.math.maximum(to_biz - from_biz, 0)", "docstring": "Calculates number of business between pairs of dates.\n\nFor each pair, the initial date is included in the difference, and the final\ndate is excluded. If the final date is the same or earlier than the initial\ndate, zero is returned.\n\nArgs:\nfrom_dates: `DateTensor` of initial dates.\nto_dates: `DateTensor` of final dates, should be broadcastable to\n`from_dates`.\n\nReturns:\nAn int32 Tensor with the number of business days between the\ncorresponding pairs of dates.", "source": "github-repos"} {"code": "def get_optional_artifacts_per_task_id(upstream_artifacts):\n \n \n \n optional_artifacts_per_task_id = {}\n\n for artifact_definition in upstream_artifacts:\n if artifact_definition.get('optional', False) is True:\n task_id = artifact_definition['taskId']\n artifacts_paths = artifact_definition['paths']\n\n add_enumerable_item_to_dict(\n dict_=optional_artifacts_per_task_id,\n key=task_id, item=artifacts_paths\n )\n\n return optional_artifacts_per_task_id", "docstring": "Return every optional artifact defined in ``upstream_artifacts``, ordered by taskId.\n\nArgs:\nupstream_artifacts: the list of upstream artifact definitions\n\nReturns:\ndict: list of paths to downloaded artifacts ordered by taskId", "source": "juraj-google-style"} {"code": "def find_executable(cls, name, check_syspaths=False):\n \n exe = which(name)\n\n if not exe and check_syspaths:\n paths = cls.get_syspaths()\n env = os.environ.copy()\n env[\"PATH\"] = os.pathsep.join(paths)\n exe = which(name, env=env)\n\n if not exe:\n raise RuntimeError(\"Couldn't find executable '%s'.\" % name)\n return exe", "docstring": "Find an executable.\n\nArgs:\nname (str): Program name.\ncheck_syspaths (bool): If True, check the standard system paths as\nwell, if program was not found on current $PATH.\n\nReturns:\nstr: Full filepath of executable.", "source": "juraj-google-style"} {"code": "def wait_for_other_workers(self):\n if not self._worker_barrier:\n return\n self._worker_barrier.wait()", "docstring": "Waits for other workers to reach the same call to this method.\n\nRaises:\nValueError: if `worker_barrier` is not passed to the __init__ method.", "source": "github-repos"} {"code": "def _validate_user_inputs(self, attributes=None, event_tags=None):\n if (attributes and (not validator.are_attributes_valid(attributes))):\n self.logger.error('Provided attributes are in an invalid format.')\n self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_FORMAT))\n return False\n if (event_tags and (not validator.are_event_tags_valid(event_tags))):\n self.logger.error('Provided event tags are in an invalid format.')\n self.error_handler.handle_error(exceptions.InvalidEventTagException(enums.Errors.INVALID_EVENT_TAG_FORMAT))\n return False\n return True", "docstring": "Helper method to validate user inputs.\n\nArgs:\nattributes: Dict representing user attributes.\nevent_tags: Dict representing metadata associated with an event.\n\nReturns:\nBoolean True if inputs are valid. False otherwise.", "source": "codesearchnet"} {"code": "def _process_params(self):\n self._sort_to_str()\n if ('rows' not in self._solr_params):\n self._solr_params['rows'] = self._cfg['row_size']\n for (key, val) in self._solr_params.items():\n if (isinstance(val, str) and six.PY2):\n self._solr_params[key] = val.encode(encoding='UTF-8')\n return self._solr_params", "docstring": "Adds default row size if it's not given in the query.\nConverts param values into unicode strings.\n\nReturns:\nProcessed self._solr_params dict.", "source": "codesearchnet"} {"code": "def jobs_get(self, job_id, project_id=None):\n \n if project_id is None:\n project_id = self._project_id\n url = Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id))\n return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a job.\n\nArgs:\njob_id: the id of the job\nproject_id: the project id to use to fetch the results; use None for the default project.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "juraj-google-style"} {"code": "def _get_event_handlers():\n import os\n import importlib\n event_handlers = {'on_ready': [], 'on_resume': [], 'on_error': [], 'on_message': [], 'on_socket_raw_receive': [], 'on_socket_raw_send': [], 'on_message_delete': [], 'on_message_edit': [], 'on_reaction_add': [], 'on_reaction_remove': [], 'on_reaction_clear': [], 'on_channel_delete': [], 'on_channel_create': [], 'on_channel_update': [], 'on_member_join': [], 'on_member_remove': [], 'on_member_update': [], 'on_server_join': [], 'on_server_remove': [], 'on_server_update': [], 'on_server_role_create': [], 'on_server_role_delete': [], 'on_server_role_update': [], 'on_server_emojis_update': [], 'on_server_available': [], 'on_server_unavailable': [], 'on_voice_state_update': [], 'on_member_ban': [], 'on_member_unban': [], 'on_typing': [], 'on_group_join': [], 'on_group_remove': []}\n database_dir = '{}/modules'.format(os.path.dirname(os.path.realpath(__file__)))\n for module_name in os.listdir(database_dir):\n module_dir = '{}/{}'.format(database_dir, module_name)\n if (os.path.isdir(module_dir) and (not module_name.startswith('_'))):\n module_event_handlers = os.listdir(module_dir)\n for event_handler in event_handlers.keys():\n if ('{}.py'.format(event_handler) in module_event_handlers):\n import_name = '.discord_modis.modules.{}.{}'.format(module_name, event_handler)\n logger.debug('Found event handler {}'.format(import_name[23:]))\n try:\n event_handlers[event_handler].append(importlib.import_module(import_name, 'modis'))\n except Exception as e:\n logger.exception(e)\n return event_handlers", "docstring": "Gets dictionary of event handlers and the modules that define them\n\nReturns:\nevent_handlers (dict): Contains \"all\", \"on_ready\", \"on_message\", \"on_reaction_add\", \"on_error\"", "source": "codesearchnet"} {"code": "def _execute(self, connection, query, fetch=True):\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n except Exception as e:\n from ambry.mprlib.exceptions import BadSQLError\n raise BadSQLError('Failed to execute query: {}; {}'.format(query, e))\n if fetch:\n return cursor.fetchall()\n else:\n return cursor", "docstring": "Executes given query using given connection.\n\nArgs:\nconnection (apsw.Connection): connection to the sqlite db who stores mpr data.\nquery (str): sql query\nfetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch.\n\nReturns:\niterable with query result.", "source": "codesearchnet"} {"code": "def allan_variance(data, dt, tmax=10):\n \n allanvar = []\n nmax = len(data) if len(data) < tmax / dt else int(tmax / dt)\n for i in range(1, nmax+1):\n databis = data[len(data) % i:]\n y = databis.reshape(len(data)\n allanvar.append(((y[1:] - y[:-1])**2).mean() / 2)\n return dt * np.arange(1, nmax+1), np.array(allanvar)", "docstring": "Calculate Allan variance.\n\nArgs:\ndata (np.ndarray): Input data.\ndt (float): Time between each data.\ntmax (float): Maximum time.\n\nReturns:\nvk (np.ndarray): Frequency.\nallanvar (np.ndarray): Allan variance.", "source": "juraj-google-style"} {"code": "def bgr2gray(img, keepdim=False):\n \n out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if keepdim:\n out_img = out_img[..., None]\n return out_img", "docstring": "Convert a BGR image to grayscale image.\n\nArgs:\nimg (ndarray): The input image.\nkeepdim (bool): If False (by default), then return the grayscale image\nwith 2 dims, otherwise 3 dims.\n\nReturns:\nndarray: The converted grayscale image.", "source": "juraj-google-style"} {"code": "def to_url(self, site='amazon', country='us'):\n try:\n try:\n (url, tlds) = URL_MAP[site]\n except ValueError:\n tlds = None\n url = URL_MAP[site]\n except KeyError:\n raise SiteError(site)\n inject = {'isbn': self._isbn}\n if tlds:\n if (country not in tlds):\n raise CountryError(country)\n tld = tlds[country]\n if (not tld):\n tld = country\n inject['tld'] = tld\n return (url % inject)", "docstring": "Generate a link to an online book site.\n\nArgs:\nsite (str): Site to create link to\ncountry (str): Country specific version of ``site``\n\nReturns:\n``str``: URL on ``site`` for book\n\nRaises:\nSiteError: Unknown site value\nCountryError: Unknown country value", "source": "codesearchnet"} {"code": "def deduplicate_readonly_buffers(tflite_model):\n model = flatbuffer_utils.convert_bytearray_to_object(tflite_model)\n read_only_buffer_indices = set()\n for subgraph in model.subgraphs:\n read_only_input_tensor_indices = set()\n for op in subgraph.operators:\n if op.inputs is None:\n continue\n for i, input_tensor_idx in enumerate(op.inputs):\n if op.mutatingVariableInputs is not None:\n if i < len(op.mutatingVariableInputs) and op.mutatingVariableInputs[i]:\n continue\n if subgraph.tensors[input_tensor_idx].isVariable:\n continue\n read_only_input_tensor_indices.add(input_tensor_idx)\n for op in subgraph.operators:\n if op.outputs is not None:\n for output_tensor_idx in op.outputs:\n read_only_input_tensor_indices.discard(output_tensor_idx)\n if op.intermediates is not None:\n for intermediate_tensor_idx in op.intermediates:\n read_only_input_tensor_indices.discard(intermediate_tensor_idx)\n if subgraph.inputs is not None:\n for input_tensor_idx in subgraph.inputs:\n read_only_input_tensor_indices.discard(input_tensor_idx)\n if subgraph.outputs is not None:\n for output_tensor_idx in subgraph.outputs:\n read_only_input_tensor_indices.discard(output_tensor_idx)\n for tensor_idx in read_only_input_tensor_indices:\n read_only_buffer_indices.add(subgraph.tensors[tensor_idx].buffer)\n for buffer_idx in read_only_buffer_indices.copy():\n if buffer_idx < 0 or (model.buffers[buffer_idx].data is None or isinstance(model.buffers[buffer_idx].data, list) or model.buffers[buffer_idx].data.size == 0):\n read_only_buffer_indices.discard(buffer_idx)\n\n class BufferIndex:\n \n\n def __init__(self, idx, size, hash_value):\n self.idx = idx\n self.size = size\n self.hash_value = hash_value\n read_only_buffers = list(map(lambda index: BufferIndex(index, model.buffers[index].data.size, hashlib.md5(model.buffers[index].data.data.tobytes()).hexdigest()), read_only_buffer_indices))\n read_only_buffers = sorted(read_only_buffers, key=lambda buffer: (buffer.size, buffer.hash_value), reverse=True)\n duplicate_buffer_map = {}\n for i, buffer_i in enumerate(read_only_buffers):\n if buffer_i.idx in duplicate_buffer_map:\n continue\n for buffer_j in read_only_buffers[i + 1:]:\n if buffer_j.idx in duplicate_buffer_map:\n continue\n if buffer_i.size != buffer_j.size:\n break\n if buffer_i.hash_value != buffer_j.hash_value:\n continue\n duplicate_buffer_map[buffer_j.idx] = buffer_i.idx\n for subgraph in model.subgraphs:\n for op in subgraph.operators:\n if op.inputs is None:\n continue\n for input_tensor in op.inputs:\n buffer_idx = subgraph.tensors[input_tensor].buffer\n if buffer_idx in duplicate_buffer_map:\n subgraph.tensors[input_tensor].buffer = duplicate_buffer_map[buffer_idx]\n for idx in duplicate_buffer_map:\n model.buffers[idx].data = None\n return flatbuffer_utils.convert_object_to_bytearray(model)", "docstring": "Generates a new model byte array after deduplicating readonly buffers.\n\nThis function should be invoked after the model optimization toolkit. The\nmodel optimization toolkit assumes that each tensor object owns its each\nbuffer separately.\n\nArgs:\ntflite_model: TFLite flatbuffer in a byte array to be deduplicated.\n\nReturns:\nTFLite flatbuffer in a bytes array, processed with the deduplication method.", "source": "github-repos"} {"code": "def on_train_batch_begin(self, batch, logs=None):\n if self._should_call_train_batch_hooks:\n self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)", "docstring": "Calls the `on_train_batch_begin` methods of its callbacks.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict, contains the return value of `model.train_step`. Typically,\nthe values of the `Model`'s metrics are returned. Example:\n`{'loss': 0.2, 'accuracy': 0.7}`.", "source": "github-repos"} {"code": "def _get_update_method(self):\n if getattr(self, '_update_uses_post', False):\n http_method = self.gitlab.http_post\n else:\n http_method = self.gitlab.http_put\n return http_method", "docstring": "Return the HTTP method to use.\n\nReturns:\nobject: http_put (default) or http_post", "source": "codesearchnet"} {"code": "def _ScanEncryptedVolume(self, scan_context, scan_node):\n \n if not scan_node or not scan_node.path_spec:\n raise errors.ScannerError('Invalid or missing scan node.')\n\n credentials = credentials_manager.CredentialsManager.GetCredentials(\n scan_node.path_spec)\n if not credentials:\n raise errors.ScannerError('Missing credentials for scan node.')\n\n if not self._mediator:\n raise errors.ScannerError(\n 'Unable to proceed. Encrypted volume found but no mediator to '\n 'determine how it should be unlocked.')\n\n if self._mediator.UnlockEncryptedVolume(\n self._source_scanner, scan_context, scan_node, credentials):\n self._source_scanner.Scan(\n scan_context, scan_path_spec=scan_node.path_spec)", "docstring": "Scans an encrypted volume scan node for volume and file systems.\n\nArgs:\nscan_context (SourceScannerContext): source scanner context.\nscan_node (SourceScanNode): volume scan node.\n\nRaises:\nScannerError: if the format of or within the source is not supported,\nthe scan node is invalid, there are no credentials defined for\nthe format or no mediator is provided and a locked scan node was\nfound, e.g. an encrypted volume,", "source": "juraj-google-style"} {"code": "def check_output_variable(self, variable):\n \n match = False\n if variable in self.out_variables:\n match = True\n return match", "docstring": "Check to see if output variable was requested by downstream app.\n\nUsing the auto generated dictionary of output variables check to see if provided\nvariable was requested by downstream app.\n\nArgs:\nvariable (string): The variable name, not the full variable.\n\nReturns:\n(boolean): Boolean value indicator whether a match was found.", "source": "juraj-google-style"} {"code": "def _PairwiseCheck(self, pair_comparator, strict=False):\n i = iter(self._actual)\n try:\n prev = next(i)\n while True:\n current = next(i)\n if not pair_comparator(prev, current):\n strictly = 'strictly ' if strict else ''\n self._FailComparingValues('is {0}ordered'.format(strictly), (prev, current))\n prev = current\n except StopIteration:\n pass", "docstring": "Iterates over this subject and compares adjacent elements.\n\nFor example, compares element 0 with element 1, 1 with 2, ... n-1 with n.\n\nArgs:\npair_comparator: A function accepting two arguments. If the arguments are\nordered as expected, the function should return True, otherwise False.\nstrict: whether the pair comparator function is strict.", "source": "github-repos"} {"code": "def generate_sjson_from_srt(srt_subs):\n \n sub_starts = []\n sub_ends = []\n sub_texts = []\n for sub in srt_subs:\n sub_starts.append(sub.start.ordinal)\n sub_ends.append(sub.end.ordinal)\n sub_texts.append(sub.text.replace('\\n', ' '))\n\n sjson_subs = {\n 'start': sub_starts,\n 'end': sub_ends,\n 'text': sub_texts\n }\n return sjson_subs", "docstring": "Generate transcripts from sjson to SubRip (*.srt).\n\nArguments:\nsrt_subs(SubRip): \"SRT\" subs object\n\nReturns:\nSubs converted to \"SJSON\" format.", "source": "juraj-google-style"} {"code": "def add_callback(self, name, func):\n if (name == 'on_scan'):\n events = ['device_seen']\n\n def callback(_conn_string, _conn_id, _name, event):\n func(self.id, event, event.get('validity_period', 60))\n elif (name == 'on_report'):\n events = ['report', 'broadcast']\n\n def callback(_conn_string, conn_id, _name, event):\n func(conn_id, event)\n elif (name == 'on_trace'):\n events = ['trace']\n\n def callback(_conn_string, conn_id, _name, event):\n func(conn_id, event)\n elif (name == 'on_disconnect'):\n events = ['disconnection']\n\n def callback(_conn_string, conn_id, _name, _event):\n func(self.id, conn_id)\n else:\n raise ArgumentError('Unknown callback type {}'.format(name))\n self._adapter.register_monitor([None], events, callback)", "docstring": "Add a callback when device events happen.\n\nArgs:\nname (str): currently support 'on_scan' and 'on_disconnect'\nfunc (callable): the function that should be called", "source": "codesearchnet"} {"code": "def write_auth(msg_type, profile_name, auth, cfg):\n \n key_fmt = profile_name + \"_\" + msg_type\n pwd = []\n for k, v in CONFIG[msg_type][\"auth\"].items():\n pwd.append(auth[k])\n\n if len(pwd) > 1:\n cfg.pwd[key_fmt] = \" :: \".join(pwd)\n else:\n cfg.pwd[key_fmt] = pwd[0]", "docstring": "Write the settings into the auth portion of the cfg.\n\nArgs:\n:msg_type: (str) message type to create config entry.\n:profile_name: (str) name of the profile entry\n:auth: (dict) auth parameters\n:cfg: (jsonconfig.Config) config instance.", "source": "juraj-google-style"} {"code": "def _indexed_slices_to_tensor(value, dtype=None, name=None, as_ref=False):\n _ = as_ref\n if dtype and (not dtype.is_compatible_with(value.dtype)):\n raise ValueError(f'Incompatible tensor conversion requested to `dtype` {dtype.name} for IndexedSlices ({value}) with dtype {value.dtype.name}')\n if value.dense_shape is None:\n raise ValueError(f'Tensor conversion requested for IndexedSlices for argument `value` without dense_shape: {value!s}')\n if not context.executing_eagerly():\n dense_shape_value = tensor_util.constant_value(value.dense_shape)\n if dense_shape_value is not None:\n num_elements = np.prod(dense_shape_value)\n if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:\n warnings.warn('Converting sparse IndexedSlices to a dense Tensor with %d elements. This may consume a large amount of memory.' % num_elements)\n return gen_math_ops.unsorted_segment_sum(value.values, value.indices, value.dense_shape[0], name=name)", "docstring": "Converts an IndexedSlices object `value` to a Tensor.\n\nNOTE(mrry): This function is potentially expensive.\n\nArgs:\nvalue: An ops.IndexedSlices object.\ndtype: The dtype of the Tensor to be returned.\nname: Optional name to use for the returned Tensor.\nas_ref: True if a ref is requested.\n\nReturns:\nA dense Tensor representing the values in the given IndexedSlices.\n\nRaises:\nValueError: If the IndexedSlices does not have the same dtype.", "source": "github-repos"} {"code": "def __init__(self, time, status, latitude, longitude, speed, track, date,\n variation, mode=None):\n \n super(Position, self).__init__(latitude, longitude)\n self.time = time\n self.status = status\n self.speed = speed\n self.track = track\n self.date = date\n self.variation = variation\n self.mode = mode", "docstring": "Initialise a new ``Position`` object.\n\nArgs:\ntime (datetime.time): Time the fix was taken\nstatus (bool): Whether the data is active\nlatitude (float): Fix's latitude\nlongitude (float): Fix's longitude\nspeed (float): Ground speed\ntrack (float): Track angle\ndate (datetime.date): Date when position was taken\nvariation (float): Magnetic variation\nmode (str): Type of reading", "source": "juraj-google-style"} {"code": "def writeOutput(self, session, directory, name):\n self.project_directory = directory\n with tmp_chdir(directory):\n batchDirectory = self._getBatchDirectory(directory)\n self._writeReplacementFiles(session=session, directory=directory, name=name)\n self.write(session=session, directory=directory, name=name)\n self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)\n self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name)", "docstring": "Write only output files for a GSSHA project from the database to file.\n\nArgs:\nsession (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database\ndirectory (str): Directory where the files will be written.\nname (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project\nnaming convention will be given this name with the appropriate extension (e.g.: 'example.prj',\n'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original\nfile names.", "source": "codesearchnet"} {"code": "def WriteManyToPath(objs, filepath):\n with io.open(filepath, mode='w', encoding='utf-8') as filedesc:\n WriteManyToFile(objs, filedesc)", "docstring": "Serializes and writes given Python objects to a multi-document YAML file.\n\nArgs:\nobjs: An iterable of Python objects to serialize.\nfilepath: A path to the file into which the object is to be written.", "source": "codesearchnet"} {"code": "def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1):\n hparams = trainer_lib.create_hparams(hparams_set, data_dir=data_dir, problem_name=problem_name)\n translate_model = registry.model(model_name)(hparams, tf.estimator.ModeKeys.EVAL)\n inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name='inputs')\n targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name='targets')\n translate_model({'inputs': inputs, 'targets': targets})\n att_mats = get_att_mats(translate_model)\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n samples = translate_model.infer({'inputs': inputs}, beam_size=beam_size)['outputs']\n return (inputs, targets, samples, att_mats)", "docstring": "Build the graph required to fetch the attention weights.\n\nArgs:\nhparams_set: HParams set to build the model with.\nmodel_name: Name of model.\ndata_dir: Path to directory containing training data.\nproblem_name: Name of problem.\nbeam_size: (Optional) Number of beams to use when decoding a translation.\nIf set to 1 (default) then greedy decoding is used.\n\nReturns:\nTuple of (\ninputs: Input placeholder to feed in ids to be translated.\ntargets: Targets placeholder to feed to translation when fetching\nattention weights.\nsamples: Tensor representing the ids of the translation.\natt_mats: Tensors representing the attention weights.\n)", "source": "codesearchnet"} {"code": "def setup_colorbars(self, plot_call_sign):\n \n self.fig.colorbar(plot_call_sign, cax=self.cbar_ax,\n ticks=self.cbar_ticks, orientation=self.cbar_orientation)\n \n (getattr(self.cbar_ax, 'set_' + self.cbar_var + 'ticklabels')\n (self.cbar_tick_labels, fontsize=self.cbar_ticks_fontsize))\n (getattr(self.cbar_ax, 'set_' + self.cbar_var + 'label')\n (self.cbar_label, fontsize=self.cbar_label_fontsize, labelpad=self.cbar_label_pad))\n\n return", "docstring": "Setup colorbars for each type of plot.\n\nTake all of the optional performed during ``__init__`` method and makes the colorbar.\n\nArgs:\nplot_call_sign (obj): Plot instance of ax.contourf with colormapping to\nadd as a colorbar.", "source": "juraj-google-style"} {"code": "def add_oxidation_state_by_site_fraction(structure, oxidation_states):\n \n try:\n for i, site in enumerate(structure):\n new_sp = collections.defaultdict(float)\n for j, (el, occu) in enumerate(get_z_ordered_elmap(site\n .species)):\n specie = Specie(el.symbol, oxidation_states[i][j])\n new_sp[specie] += occu\n structure[i] = new_sp\n return structure\n except IndexError:\n raise ValueError(\"Oxidation state of all sites must be \"\n \"specified in the list.\")", "docstring": "Add oxidation states to a structure by fractional site.\n\nArgs:\noxidation_states (list): List of list of oxidation states for each\nsite fraction for each site.\nE.g., [[2, 4], [3], [-2], [-2], [-2]]", "source": "juraj-google-style"} {"code": "def collate(self, merge_type=None, drop=[], drop_constant=False):\n from .element import Collator\n merge_type = (merge_type if merge_type else self.__class__)\n return Collator(self, merge_type=merge_type, drop=drop, drop_constant=drop_constant)()", "docstring": "Collate allows reordering nested containers\n\nCollation allows collapsing nested mapping types by merging\ntheir dimensions. In simple terms in merges nested containers\ninto a single merged type.\n\nIn the simple case a HoloMap containing other HoloMaps can\neasily be joined in this way. However collation is\nparticularly useful when the objects being joined are deeply\nnested, e.g. you want to join multiple Layouts recorded at\ndifferent times, collation will return one Layout containing\nHoloMaps indexed by Time. Changing the merge_type will allow\nmerging the outer Dimension into any other UniformNdMapping\ntype.\n\nArgs:\nmerge_type: Type of the object to merge with\ndrop: List of dimensions to drop\ndrop_constant: Drop constant dimensions automatically\n\nReturns:\nCollated Layout or HoloMap", "source": "codesearchnet"} {"code": "def get_thumbnail(self, mxcurl, width, height, method='scale', allow_remote=True):\n if (method not in ['scale', 'crop']):\n raise ValueError((\"Unsupported thumb method '%s'\" % method))\n query_params = {'width': width, 'height': height, 'method': method}\n if (not allow_remote):\n query_params['allow_remote'] = False\n if mxcurl.startswith('mxc:\n return self._send('GET', mxcurl[6:], query_params=query_params, api_path='/_matrix/media/r0/thumbnail/', return_json=False)\n else:\n raise ValueError((\"MXC URL '%s' did not begin with 'mxc:", "docstring": "Download raw media thumbnail from provided mxc URL.\n\nArgs:\nmxcurl (str): mxc media URL\nwidth (int): desired thumbnail width\nheight (int): desired thumbnail height\nmethod (str): thumb creation method. Must be\nin ['scale', 'crop']. Default 'scale'.\nallow_remote (bool): indicates to the server that it should not\nattempt to fetch the media if it is deemed remote. Defaults\nto true if not provided.", "source": "codesearchnet"} {"code": "def _handle_per_output_metrics(self, metrics_dict, y_true, y_pred, mask, weights=None):\n metric_results = []\n for metric_name, metric_fn in metrics_dict.items():\n with backend.name_scope(metric_name):\n metric_result = training_utils_v1.call_metric_function(metric_fn, y_true, y_pred, weights=weights, mask=mask)\n metric_results.append(metric_result)\n return metric_results", "docstring": "Calls metric functions for a single output.\n\nArgs:\nmetrics_dict: A dict with metric names as keys and metric fns as values.\ny_true: Target output.\ny_pred: Predicted output.\nmask: Computed mask value for the current output.\nweights: Weights to be applied on the current output.\n\nReturns:\nA list of metric result tensors.", "source": "github-repos"} {"code": "def seek(self, n):\n \n if self._mode != \"r\":\n raise UnsupportedOperation(\"not available in 'w' mode\")\n\n if 0 <= n < self._nb_markers:\n self._n = n\n self._bed.seek(self._get_seek_position(n))\n\n else:\n \n raise ValueError(\"invalid position in BED: {}\".format(n))", "docstring": "Gets to a certain marker position in the BED file.\n\nArgs:\nn (int): The index of the marker to seek to.", "source": "juraj-google-style"} {"code": "def _verify_barycentric(lambda1, lambda2, lambda3):\n weights_total = ((lambda1 + lambda2) + lambda3)\n if (not np.allclose(weights_total, 1.0, atol=0.0)):\n raise ValueError('Weights do not sum to 1', lambda1, lambda2, lambda3)\n if ((lambda1 < 0.0) or (lambda2 < 0.0) or (lambda3 < 0.0)):\n raise ValueError('Weights must be positive', lambda1, lambda2, lambda3)", "docstring": "Verifies that weights are barycentric and on the reference triangle.\n\nI.e., checks that they sum to one and are all non-negative.\n\nArgs:\nlambda1 (float): Parameter along the reference triangle.\nlambda2 (float): Parameter along the reference triangle.\nlambda3 (float): Parameter along the reference triangle.\n\nRaises:\nValueError: If the weights are not valid barycentric\ncoordinates, i.e. they don't sum to ``1``.\nValueError: If some weights are negative.", "source": "codesearchnet"} {"code": "def _insource_jedi_vim_test(data, ibs):\n \n \n data\n ibs\n import utool as ut\n xdata = ut.ColumnLists()\n xdata\n import ibeis\n xibs = ibeis.IBEISController()\n xibs", "docstring": "If jedi-vim supports google style docstrings you should be able to\nautocomplete ColumnLists methods for `data`\n\nArgs:\ndata (utool.ColumnLists): a column list objct\nibs (ibeis.IBEISController): an object", "source": "juraj-google-style"} {"code": "def filter_benchmarks(benchmarks, bench_funcs, base_ver):\n for bm in list(benchmarks):\n func = bench_funcs[bm]\n if (getattr(func, '_python2_only', False) and ((3, 0) <= base_ver)):\n benchmarks.discard(bm)\n logging.info(('Skipping Python2-only benchmark %s; not compatible with Python %s' % (bm, base_ver)))\n continue\n return benchmarks", "docstring": "Filters out benchmarks not supported by both Pythons.\n\nArgs:\nbenchmarks: a set() of benchmark names\nbench_funcs: dict mapping benchmark names to functions\npython: the interpereter commands (as lists)\n\nReturns:\nThe filtered set of benchmark names", "source": "codesearchnet"} {"code": "async def retry_request(*args, retry_exceptions=(asyncio.TimeoutError, ScriptWorkerRetryException), retry_async_kwargs=None, **kwargs):\n retry_async_kwargs = (retry_async_kwargs or {})\n return (await retry_async(request, retry_exceptions=retry_exceptions, args=args, kwargs=kwargs, **retry_async_kwargs))", "docstring": "Retry the ``request`` function.\n\nArgs:\n*args: the args to send to request() through retry_async().\nretry_exceptions (list, optional): the exceptions to retry on.\nDefaults to (ScriptWorkerRetryException, ).\nretry_async_kwargs (dict, optional): the kwargs for retry_async.\nIf None, use {}. Defaults to None.\n**kwargs: the kwargs to send to request() through retry_async().\n\nReturns:\nobject: the value from request().", "source": "codesearchnet"} {"code": "def terminate_and_create_image(name):\n \n node = _host_node()\n operation = _gcp().instances().delete(project=DEFAULT_PROJECT, zone=DEFAULT_ZONE,\n instance=node['real_name']).execute()\n while True:\n status = get_zone_operation_status(operation=operation)\n if status == 'DONE':\n break\n\n print 'Terminating instance [OPERATION %s]' % status\n time.sleep(5)\n\n body = {\n 'name': name,\n 'sourceDisk': node['source_disk'],\n }\n\n operation = _gcp().images().insert(project=DEFAULT_PROJECT, body=body).execute()\n while True:\n status = get_global_operation_status(operation=operation)\n if status == 'DONE':\n break\n\n print 'Creating image [OPERATION %s]' % status\n time.sleep(5)\n\n print 'Created image: %s' % operation['targetLink']", "docstring": "Create an image from a terminated host (with auto_delete_boot_disk=False)\n\nArgs:\nname: The name of the image", "source": "juraj-google-style"} {"code": "def count_ops(self):\n count_ops = {}\n for (instr, _, _) in self.data:\n if (instr.name in count_ops.keys()):\n count_ops[instr.name] += 1\n else:\n count_ops[instr.name] = 1\n return count_ops", "docstring": "Count each operation kind in the circuit.\n\nReturns:\ndict: a breakdown of how many operations of each kind.", "source": "codesearchnet"} {"code": "def rmod(self, other, axis=\"columns\", level=None, fill_value=None):\n \n return self._binary_op(\n \"rmod\", other, axis=axis, level=level, fill_value=fill_value\n )", "docstring": "Mod this DataFrame against another DataFrame/Series/scalar.\n\nArgs:\nother: The object to use to apply the div against this.\naxis: The axis to div over.\nlevel: The Multilevel index level to apply div over.\nfill_value: The value to fill NaNs with.\n\nReturns:\nA new DataFrame with the rdiv applied.", "source": "juraj-google-style"} {"code": "def save_as(self, filename=None):\n \n if filename is None:\n filename = self.filename\n if filename is None:\n filename = self.default_filename\n if filename is None:\n raise RuntimeError(\"Class '{}' has no default filename\".format(self.__class__.__name__))\n self._do_save_as(filename)\n self.filename = filename", "docstring": "Dumps object contents into file on disk.\n\nArgs:\nfilename (optional): defaults to self.filename. If passed, self.filename\nwill be updated to filename.", "source": "juraj-google-style"} {"code": "def _KeyToFilePath(key, api_version):\n\n def _ReplaceCapsWithDash(matchobj):\n match = matchobj.group(0)\n return '-%s' % match.lower()\n case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)\n api_folder = _API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1\n if key.startswith('tensorflow.experimental.numpy'):\n api_folder = os.path.join(api_folder, '..', '..', '..', '..', '../third_party', 'py', 'numpy', 'tf_numpy_api')\n api_folder = os.path.normpath(api_folder)\n return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)", "docstring": "From a given key, construct a filepath.\n\nFilepath will be inside golden folder for api_version.\n\nArgs:\nkey: a string used to determine the file path\napi_version: a number indicating the tensorflow API version, e.g. 1 or 2.\n\nReturns:\nA string of file path to the pbtxt file which describes the public API", "source": "github-repos"} {"code": "def set_contents(self, contents, encoding=None):\n self.encoding = encoding\n changed = self._set_initial_contents(contents)\n if (self._side_effect is not None):\n self._side_effect(self)\n return changed", "docstring": "Sets the file contents and size and increases the modification time.\nAlso executes the side_effects if available.\n\nArgs:\ncontents: (str, bytes, unicode) new content of file.\nencoding: (str) the encoding to be used for writing the contents\nif they are a unicode string.\nIf not given, the locale preferred encoding is used.\n\nRaises:\nIOError: if `st_size` is not a non-negative integer,\nor if it exceeds the available file system space.", "source": "codesearchnet"} {"code": "def saveAsTFRecords(df, output_dir):\n tf_rdd = df.rdd.mapPartitions(toTFExample(df.dtypes))\n tf_rdd.saveAsNewAPIHadoopFile(output_dir, 'org.tensorflow.hadoop.io.TFRecordFileOutputFormat', keyClass='org.apache.hadoop.io.BytesWritable', valueClass='org.apache.hadoop.io.NullWritable')", "docstring": "Save a Spark DataFrame as TFRecords.\n\nThis will convert the DataFrame rows to TFRecords prior to saving.\n\nArgs:\n:df: Spark DataFrame\n:output_dir: Path to save TFRecords", "source": "codesearchnet"} {"code": "def most_by_uncertain(self, y):\n \n return self.most_uncertain_by_mask((self.ds.y == y), y)", "docstring": "Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class.\n\nArguments:\ny (int): the selected class\n\nReturns:\nidxs (numpy.ndarray): An array of indexes (numpy.ndarray)", "source": "juraj-google-style"} {"code": "def delete_knowledge_base(project_id, knowledge_base_id):\n import dialogflow_v2beta1 as dialogflow\n client = dialogflow.KnowledgeBasesClient()\n knowledge_base_path = client.knowledge_base_path(project_id, knowledge_base_id)\n response = client.delete_knowledge_base(knowledge_base_path)\n print('Knowledge Base deleted.'.format(response))", "docstring": "Deletes a specific Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.", "source": "codesearchnet"} {"code": "def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str):\n tensors_to_transpose = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')\n var_map = (('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'))\n if not os.path.isdir(ckpt_dir):\n os.makedirs(ckpt_dir)\n state_dict = model.state_dict()\n\n def to_tf_var_name(name: str):\n for patt, repl in iter(var_map):\n name = name.replace(patt, repl)\n return f'bert/{name}'\n\n def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):\n tf_dtype = tf.dtypes.as_dtype(tensor.dtype)\n tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())\n session.run(tf.variables_initializer([tf_var]))\n session.run(tf_var)\n return tf_var\n tf.reset_default_graph()\n with tf.Session() as session:\n for var_name in state_dict:\n tf_name = to_tf_var_name(var_name)\n torch_tensor = state_dict[var_name].numpy()\n if any((x in var_name for x in tensors_to_transpose)):\n torch_tensor = torch_tensor.T\n tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)\n tf_var.assign(tf.cast(torch_tensor, tf_var.dtype))\n tf_weight = session.run(tf_var)\n print(f'Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}')\n saver = tf.train.Saver(tf.trainable_variables())\n saver.save(session, os.path.join(ckpt_dir, model_name.replace('-', '_') + '.ckpt'))", "docstring": "Args:\nmodel: BertModel Pytorch model instance to be converted\nckpt_dir: Tensorflow model directory\nmodel_name: model name\n\nCurrently supported HF models:\n\n- Y BertModel\n- N BertForMaskedLM\n- N BertForPreTraining\n- N BertForMultipleChoice\n- N BertForNextSentencePrediction\n- N BertForSequenceClassification\n- N BertForQuestionAnswering", "source": "github-repos"} {"code": "def Dump(obj, sort_keys=False, encoder=None):\n text = json.dumps(obj, indent=2, sort_keys=sort_keys, ensure_ascii=False, cls=encoder, separators=_SEPARATORS)\n if (compatibility.PY2 and isinstance(text, bytes)):\n text = text.decode('utf-8')\n return text", "docstring": "Stringifies a Python object into its JSON representation.\n\nArgs:\nobj: A Python object to convert to JSON.\nsort_keys: If True, output dictionaries keys in sorted (ascending) order.\nencoder: An (optional) encoder class to use.\n\nReturns:\nA JSON representation of the given object.", "source": "codesearchnet"} {"code": "def clear_list(self, **kwargs):\n \n path = self._get_id_path('clear')\n kwargs.update({'session_id': self.session_id})\n\n payload = {}\n\n response = self._POST(path, kwargs, payload)\n self._set_attrs_to_values(response)\n return response", "docstring": "Clears all of the items within a list. This is an irreversible action\nand should be treated with caution.\n\nA valid session id is required.\n\nArgs:\nconfirm: True (do it) | False (don't do it)\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"} {"code": "def CreateShowcaseAd(client, adgroup, expanded_image_filepath,\n collapsed_image_filepath):\n \n ad_group_ad_service = client.GetService('AdGroupAdService', 'v201809')\n\n showcase_ad = {\n 'adGroupId': adgroup['id'],\n 'ad': {\n 'xsi_type': 'ShowcaseAd',\n 'Ad.Type': 'ShowcaseAd',\n \n 'name': 'Showcase ad \n 'finalUrls': 'http:\n 'displayUrl': 'example.com',\n \n 'expandedImage': {\n 'mediaId': UploadImage(client, expanded_image_filepath)['mediaId']\n },\n \n 'collapsedImage': {\n 'mediaId':\n UploadImage(client, collapsed_image_filepath)['mediaId']\n }\n }\n }\n\n ad_operation = {\n 'operator': 'ADD',\n 'operand': showcase_ad\n }\n\n \n showcase_ad = ad_group_ad_service.mutate([ad_operation])['value'][0]\n\n print 'ShowcaseAd with ID \"%s\" was added.' % showcase_ad['ad']['id']\n\n return showcase_ad", "docstring": "Creates a showcase add for the given AdGroup with the given images.\n\nArgs:\nclient: an AdWordsClient instance.\nadgroup: a dict or suds object defining an AdGroup for a Shopping Campaign.\nexpanded_image_filepath: a str filepath to a .jpg file that will be used as\nthe Showcase Ad's expandedImage.\ncollapsed_image_filepath: a str filepath to a .jpg file that will be used as\nthe Showcase Ad's collapsedImage.\n\nReturns:\nThe created Showcase Ad as a sudsobject.", "source": "juraj-google-style"} {"code": "def _ReadLabels(self, artifact_definition_values, artifact_definition, name):\n labels = artifact_definition_values.get('labels', [])\n undefined_labels = set(labels).difference(self.labels)\n if undefined_labels:\n raise errors.FormatError('Artifact definition: {0:s} found undefined labels: {1:s}.'.format(name, ', '.join(undefined_labels)))\n artifact_definition.labels = labels", "docstring": "Reads the optional artifact definition labels.\n\nArgs:\nartifact_definition_values (dict[str, object]): artifact definition\nvalues.\nartifact_definition (ArtifactDefinition): an artifact definition.\nname (str): name of the artifact definition.\n\nRaises:\nFormatError: if there are undefined labels.", "source": "codesearchnet"} {"code": "def vals2bins(vals,res=100):\n \n \n if any(isinstance(el, list) for el in vals):\n vals = list(itertools.chain(*vals))\n return list(np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1)", "docstring": "Maps values to bins\nArgs:\nvalues (list or list of lists) - list of values to map to colors\nres (int) - resolution of the color map (default: 100)\nReturns:\nlist of numbers representing bins", "source": "juraj-google-style"} {"code": "def If(cond, inputs, then_branch, else_branch, name=None):\n if isinstance(then_branch, function._DefinedFunction):\n tlist = [_.type for _ in then_branch.definition.signature.output_arg]\n return gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n then_out = then_branch.structured_outputs\n else_out = else_branch.structured_outputs\n nest.assert_same_structure(then_out, else_out, expand_composites=True)\n tlist = nest.flatten(then_branch.output_dtypes)\n ret = gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n return nest.pack_sequence_as(then_out, ret, expand_composites=True)", "docstring": "output = Cond(inputs) ?\n\nthen_branch(inputs) : else_branch(inputs).\n\nArgs:\ncond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is\nconverted to a boolean according to the following rule: if the scalar is a\nnumerical value, non-zero means True and zero means False; if the scalar\nis a string, non-empty means True and empty means False.\ninputs: A list of input tensors.\nthen_branch: A function takes 'inputs' and returns a list of tensors, whose\ntypes are the same as what else_branch returns.\nelse_branch: A function takes 'inputs' and returns a list of tensors. whose\ntypes are the same as what then_branch returns.\nname: A name for the operation (optional).\n\nReturns:\nA list of tensors returned by either then_branch(inputs)\nor else_branch(inputs).", "source": "github-repos"} {"code": "def __str__(self):\n \n manu = self.manufacturer\n return '%s ' % (self.name, self.Core, manu)", "docstring": "Returns a string representation of this instance.\n\nArgs:\nself (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance\n\nReturns:\nReturns a string specifying the device name, core, and manufacturer.", "source": "juraj-google-style"} {"code": "def _detect_gce_environment():\n http = transport.get_http_object(timeout=GCE_METADATA_TIMEOUT)\n try:\n (response, _) = transport.request(http, _GCE_METADATA_URI, headers=_GCE_HEADERS)\n return ((response.status == http_client.OK) and (response.get(_METADATA_FLAVOR_HEADER) == _DESIRED_METADATA_FLAVOR))\n except socket.error:\n logger.info('Timeout attempting to reach GCE metadata service.')\n return False", "docstring": "Determine if the current environment is Compute Engine.\n\nReturns:\nBoolean indicating whether or not the current environment is Google\nCompute Engine.", "source": "codesearchnet"} {"code": "def require_attribute(self, attribute: str, typ: Type = _Any) -> None:\n \n attr_nodes = [\n value_node for key_node, value_node in self.yaml_node.value\n if key_node.value == attribute\n ]\n if len(attr_nodes) == 0:\n raise RecognitionError(\n ('{}{}Missing required attribute {}').format(\n self.yaml_node.start_mark, os.linesep, attribute))\n attr_node = attr_nodes[0]\n\n if typ != _Any:\n recognized_types, message = self.__recognizer.recognize(\n attr_node, cast(Type, typ))\n if len(recognized_types) == 0:\n raise RecognitionError(message)", "docstring": "Require an attribute on the node to exist.\n\nIf `typ` is given, the attribute must have this type.\n\nArgs:\nattribute: The name of the attribute / mapping key.\ntyp: The type the attribute must have.", "source": "juraj-google-style"} {"code": "def list_adb_devices():\n out = adb.AdbProxy().devices()\n return parse_device_list(out, 'device')", "docstring": "List all android devices connected to the computer that are detected by\nadb.\n\nReturns:\nA list of android device serials. Empty if there's none.", "source": "github-repos"} {"code": "def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename):\n if (not tf.gfile.Exists(data_dir)):\n tf.gfile.MakeDirs(data_dir)\n file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL)\n tar_all = tarfile.open(file_path)\n tar_all.extractall(tmp_dir)\n tar_all.close()\n tar_train = tarfile.open(os.path.join(tmp_dir, 'train-novels.tar'))\n tar_train.extractall(tmp_dir)\n tar_train.close()\n vocab_path = os.path.join(data_dir, vocab_filename)\n if (not tf.gfile.Exists(vocab_path)):\n with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), 'r') as infile:\n reader = csv.reader(infile, delimiter='\\t')\n words = [row[0] for row in reader]\n words = ([_UNK] + words[:vocab_size])\n with tf.gfile.GFile(vocab_path, 'w') as outfile:\n outfile.write('\\n'.join(words))", "docstring": "Downloading and preparing the dataset.\n\nArgs:\ntmp_dir: tem directory\ndata_dir: data directory\nvocab_size: size of vocabulary\nvocab_filename: name of vocab file", "source": "codesearchnet"} {"code": "def Bernoulli(cls, mean: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]:\n probs = mean.tensor\n dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)\n batch = mean.batch\n if ((not batch) and (batch_size is not None)):\n t = dist.sample(batch_size)\n batch = True\n else:\n t = dist.sample()\n scope = mean.scope.as_list()\n return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Bernoulli sampling op with given mean parameter.\n\nArgs:\nmean: The mean parameter of the Bernoulli distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Bernoulli distribution and a TensorFluent sample drawn from the distribution.", "source": "codesearchnet"} {"code": "def _map_across_full_axis_select_indices(self, axis, func, indices, keep_remaining=False):\n return self.data.apply_func_to_select_indices_along_full_axis(axis, func, indices, keep_remaining)", "docstring": "Maps function to select indices along full axis.\n\nArgs:\naxis: 0 for columns and 1 for rows.\nfunc: Callable mapping function over the BlockParitions.\nindices: indices along axis to map over.\nkeep_remaining: True if keep indices where function was not applied.\n\nReturns:\nBaseFrameManager containing the result of mapping func over axis on indices.", "source": "codesearchnet"} {"code": "def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY):\n if isinstance(dataset_ref, str):\n dataset_ref = DatasetReference.from_string(dataset_ref, default_project=self.project)\n api_response = self._call_api(retry, method='GET', path=dataset_ref.path)\n return Dataset.from_api_repr(api_response)", "docstring": "Fetch the dataset referenced by ``dataset_ref``\n\nArgs:\ndataset_ref (Union[ \\\n:class:`~google.cloud.bigquery.dataset.DatasetReference`, \\\nstr, \\\n]):\nA reference to the dataset to fetch from the BigQuery API.\nIf a string is passed in, this method attempts to create a\ndataset reference from a string using\n:func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`.\nretry (:class:`google.api_core.retry.Retry`):\n(Optional) How to retry the RPC.\n\nReturns:\ngoogle.cloud.bigquery.dataset.Dataset:\nA ``Dataset`` instance.", "source": "codesearchnet"} {"code": "def download(self, task, default_ext, timeout=5, max_retry=3, overwrite=False, **kwargs):\n file_url = task['file_url']\n task['success'] = False\n task['filename'] = None\n retry = max_retry\n if (not overwrite):\n with self.lock:\n self.fetched_num += 1\n filename = self.get_filename(task, default_ext)\n if self.storage.exists(filename):\n self.logger.info('skip downloading file %s', filename)\n return\n self.fetched_num -= 1\n while ((retry > 0) and (not self.signal.get('reach_max_num'))):\n try:\n response = self.session.get(file_url, timeout=timeout)\n except Exception as e:\n self.logger.error('Exception caught when downloading file %s, error: %s, remaining retry times: %d', file_url, e, (retry - 1))\n else:\n if self.reach_max_num():\n self.signal.set(reach_max_num=True)\n break\n elif (response.status_code != 200):\n self.logger.error('Response status code %d, file %s', response.status_code, file_url)\n break\n elif (not self.keep_file(task, response, **kwargs)):\n break\n with self.lock:\n self.fetched_num += 1\n filename = self.get_filename(task, default_ext)\n self.logger.info('image \n self.storage.write(filename, response.content)\n task['success'] = True\n task['filename'] = filename\n break\n finally:\n retry -= 1", "docstring": "Download the image and save it to the corresponding path.\n\nArgs:\ntask (dict): The task dict got from ``task_queue``.\ntimeout (int): Timeout of making requests for downloading images.\nmax_retry (int): the max retry times if the request fails.\n**kwargs: reserved arguments for overriding.", "source": "codesearchnet"} {"code": "def openResultsInBrowser(res):\n \n print(emphasis(\"\\n\\tOpening URIs in the default web browser...\"))\n\n urisToBrowser([\"https:\n \n time.sleep(2)\n\n uris = []\n for r in res:\n for att in r[\"attributes\"]:\n if att[\"type\"] == \"i3visio.uri\":\n uris.append(att[\"value\"])\n\n urisToBrowser(uris)", "docstring": "Method that collects the URI from a list of entities and opens them\n\nArgs:\n-----\nres: A list containing several i3visio entities.", "source": "juraj-google-style"} {"code": "def list_summaries(logdir):\n result = _SummaryFile()\n for dirpath, _, filenames in os.walk(logdir):\n for filename in filenames:\n if not filename.startswith('events.out.'):\n continue\n path = os.path.join(dirpath, filename)\n for event in _SummaryIterator(path):\n if event.graph_def:\n result.graph_defs.append(event.graph_def)\n if not event.summary:\n continue\n for value in event.summary.value:\n tag = value.tag\n kind = value.WhichOneof('value')\n container = {'simple_value': result.scalars, 'image': result.images, 'histo': result.histograms, 'tensor': result.tensors}.get(kind)\n if container is None:\n raise ValueError('Unexpected summary kind %r in event file %s:\\n%r' % (kind, path, event))\n elif kind == 'tensor' and tag != 'keras':\n plugin_name = value.metadata.plugin_data.plugin_name\n container = {'images': result.images, 'histograms': result.histograms, 'scalars': result.scalars}.get(plugin_name)\n if container is not None:\n result.convert_from_v2_summary_proto = True\n else:\n container = result.tensors\n container.add(_ObservedSummary(logdir=dirpath, tag=tag))\n return result", "docstring": "Read all summaries under the logdir into a `_SummaryFile`.\n\nArgs:\nlogdir: A path to a directory that contains zero or more event\nfiles, either as direct children or in transitive subdirectories.\nSummaries in these events must only contain old-style scalars,\nimages, and histograms. Non-summary events, like `graph_def`s, are\nignored.\n\nReturns:\nA `_SummaryFile` object reflecting all summaries written to any\nevent files in the logdir or any of its descendant directories.\n\nRaises:\nValueError: If an event file contains an summary of unexpected kind.", "source": "github-repos"} {"code": "def _clone_functional_model(model, clone_function, input_tensors=None, call_function=None):\n if not callable(clone_function):\n raise ValueError(f'Expected `clone_function` argument to be a callable. Received: clone_function={clone_function}')\n if not isinstance(model, Functional):\n raise ValueError(f'Expected `model` argument to be a Functional Model instance. Received: model={model}')\n if input_tensors is not None:\n if not all((isinstance(x, backend.KerasTensor) for x in tree.flatten(input_tensors))):\n raise ValueError(f'All entries in `input_tensors` must be KerasTensors. Received invalid values: inputs_tensors={input_tensors}')\n try:\n tree.assert_same_structure(input_tensors, model.input)\n except ValueError as e:\n raise ValueError(f'`input_tensors` must have the same structure as model.input\\nReference structure: {model.input}\\nReceived structure: {input_tensors}') from e\n else:\n input_tensors = tree.map_structure(lambda x: Input(batch_shape=x.shape, dtype=x.dtype, name=x.name), model.input)\n\n def operation_fn(layer):\n new_layer = clone_function(layer)\n return new_layer\n output_tensors = model._run_through_graph(input_tensors, operation_fn=operation_fn, call_fn=call_function)\n if functional_like_constructor(model.__class__):\n new_model = model.__class__(input_tensors, output_tensors, name=model.name)\n else:\n new_model = Functional(input_tensors, output_tensors, name=model.name)\n if model.compiled:\n compiled_config = model.get_compile_config()\n new_model.compile_from_config(compiled_config)\n return new_model", "docstring": "Clone a `Functional` model instance.\n\nModel cloning is similar to calling a model on new inputs,\nexcept that it creates new layers (and thus new weights) instead\nof sharing the weights of the existing layers.\n\nInput layers are always cloned.\n\nArgs:\nmodel: Instance of `Functional`.\ninput_tensors: optional list of input tensors\nto build the model upon. If not provided,\nplaceholders will be created.\nclone_function: callable to be applied on non-input layers in the model.\nBy default, it clones the layer (without copying the weights).\n\nReturns:\nAn instance of `Functional` reproducing the behavior\nof the original model, on top of new inputs tensors,\nusing newly instantiated weights.", "source": "github-repos"} {"code": "def _make_token_async(scopes, service_account_id):\n \n rpc = app_identity.create_rpc()\n app_identity.make_get_access_token_call(rpc, scopes, service_account_id)\n token, expires_at = yield rpc\n raise ndb.Return((token, expires_at))", "docstring": "Get a fresh authentication token.\n\nArgs:\nscopes: A list of scopes.\nservice_account_id: Internal-use only.\n\nRaises:\nAn ndb.Return with a tuple (token, expiration_time) where expiration_time is\nseconds since the epoch.", "source": "juraj-google-style"} {"code": "def listdir(path='.'):\n \n return [name.rstrip('/') for name, _ in\n get_instance(path).list_objects(path, first_level=True)]", "docstring": "Return a list containing the names of the entries in the directory given by\npath.\n\nEquivalent to \"os.listdir\".\n\nArgs:\npath (path-like object): Path or URL.\n\nReturns:\nlist of str: Entries names.", "source": "juraj-google-style"} {"code": "def render_latex(latex: str) -> PIL.Image:\n tmpfilename = 'circ'\n with tempfile.TemporaryDirectory() as tmpdirname:\n tmppath = os.path.join(tmpdirname, tmpfilename)\n with open((tmppath + '.tex'), 'w') as latex_file:\n latex_file.write(latex)\n subprocess.run(['pdflatex', '-halt-on-error', '-output-directory={}'.format(tmpdirname), '{}'.format((tmpfilename + '.tex'))], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True)\n subprocess.run(['pdftocairo', '-singlefile', '-png', '-q', (tmppath + '.pdf'), tmppath])\n img = PIL.Image.open((tmppath + '.png'))\n return img", "docstring": "Convert a single page LaTeX document into an image.\n\nTo display the returned image, `img.show()`\n\n\nRequired external dependencies: `pdflatex` (with `qcircuit` package),\nand `poppler` (for `pdftocairo`).\n\nArgs:\nA LaTeX document as a string.\n\nReturns:\nA PIL Image\n\nRaises:\nOSError: If an external dependency is not installed.", "source": "codesearchnet"} {"code": "def get_propagate_status(self, token, channel):\n \n url = self.url('sd/{}/{}/getPropagate/'.format(token, channel))\n req = self.remote_utils.get_url(url)\n if req.status_code is not 200:\n raise ValueError('Bad pair: {}/{}'.format(token, channel))\n return req.text", "docstring": "Get the propagate status for a token/channel pair.\n\nArguments:\ntoken (str): The token to check\nchannel (str): The channel to check\n\nReturns:\nstr: The status code", "source": "juraj-google-style"} {"code": "def get_dataset(self):\n raise NotImplementedError", "docstring": "Get a dataset instance for the current DataAdapter.\n\nNote that the dataset returned does not repeat for epoch, so caller might\nneed to create new iterator for the same dataset at the beginning of the\nepoch. This behavior might change in future.\n\nReturns:\nAn tf.dataset.Dataset. Caller might use the dataset in different\ncontext, eg iter(dataset) in eager to get the value directly, or in graph\nmode, provide the iterator tensor to Keras model function.", "source": "github-repos"} {"code": "def infer_transportation_mode(self, clf, min_time):\n \n self.transportation_modes = speed_clustering(clf, self.points, min_time)\n return self", "docstring": "In-place transportation mode inferring\n\nSee infer_transportation_mode function\n\nArgs:\nReturns:\n:obj:`Segment`: self", "source": "juraj-google-style"} {"code": "def parse_radl(data):\n \n\n if data is None:\n return None\n elif os.path.isfile(data):\n f = open(data)\n data = \"\".join(f.readlines())\n f.close()\n elif data.strip() == \"\":\n return RADL()\n data = data + \"\\n\"\n\n parser = RADLParser(lextab='radl')\n return parser.parse(data)", "docstring": "Parse a RADL document.\n\nArgs:\n- data(str): filepath to a RADL content or a string with content.\n\nReturn: RADL object.", "source": "juraj-google-style"} {"code": "def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'):\n if (edges is None):\n edges = G.edges()\n if (on_missing is None):\n on_missing = 'error'\n if (on_keyerr is None):\n on_keyerr = 'default'\n if ((default is util_const.NoParam) and (on_keyerr == 'default')):\n on_keyerr = 'error'\n if (on_missing == 'error'):\n data_iter = (G.adj[u][v] for (u, v) in edges)\n elif (on_missing == 'default'):\n data_iter = ((G.adj[u][v] if G.has_edge(u, v) else {}) for (u, v) in edges)\n else:\n raise KeyError('on_missing={} must be error, filter or default'.format(on_missing))\n if (on_keyerr == 'error'):\n value_iter = (d[key] for d in data_iter)\n elif (on_keyerr == 'default'):\n value_iter = (d.get(key, default) for d in data_iter)\n else:\n raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))\n return value_iter", "docstring": "Generates attributes values of specific edges\n\nArgs:\non_missing (str): Strategy for handling nodes missing from G.\nCan be {'error', 'default'}. defaults to 'error'.\non_keyerr (str): Strategy for handling keys missing from node dicts.\nCan be {'error', 'default'}. defaults to 'default'\nif default is specified, otherwise defaults to 'error'.", "source": "codesearchnet"} {"code": "def _flush(self, buffer):\n \n with _handle_oss_error():\n self._bucket.put_object(key=self._key, data=buffer.tobytes())", "docstring": "Flush the write buffers of the stream if applicable.\n\nArgs:\nbuffer (memoryview): Buffer content.", "source": "juraj-google-style"} {"code": "def get_gates(self, x):\n \n\n \n x = tf.stop_gradient(x)\n \n x = tf.matmul(x, self.t_vectors)\n \n x = tf.sign(x) \n\n \n \n\n x = tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes\n \n \n \n x = tf.argmax(x, axis=-1)\n \n \n x = tf.one_hot(x, self.nb_buckets)\n \n return x", "docstring": "Return the bucket id of the given tensor.\n\nArgs:\nx (tf.Tensor): float32 of shape [length, depth]\n\nReturns:\ntf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]\ncontaining the id of the bucket", "source": "juraj-google-style"} {"code": "def currentSelected(self):\n if self.commaRadioButton.isChecked():\n return ','\n elif self.semicolonRadioButton.isChecked():\n return ';'\n elif self.tabRadioButton.isChecked():\n return '\\t'\n elif self.otherRadioButton.isChecked():\n return self.otherSeparatorLineEdit.text()\n return", "docstring": "Returns the currently selected delimiter character.\n\nReturns:\nstr: One of `,`, `;`, `\\t`, `*other*`.", "source": "codesearchnet"} {"code": "def _player_step_tuple(self, envs_step_tuples):\n \n ob_real, reward_real, _, _ = envs_step_tuples[\"real_env\"]\n ob_sim, reward_sim, _, _ = envs_step_tuples[\"sim_env\"]\n ob_err = absolute_hinge_difference(ob_sim, ob_real)\n\n ob_real_aug = self._augment_observation(ob_real, reward_real,\n self.cumulative_real_reward)\n ob_sim_aug = self._augment_observation(ob_sim, reward_sim,\n self.cumulative_sim_reward)\n ob_err_aug = self._augment_observation(\n ob_err, reward_sim - reward_real,\n self.cumulative_sim_reward - self.cumulative_real_reward\n )\n ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1)\n _, reward, done, info = envs_step_tuples[\"real_env\"]\n return ob, reward, done, info", "docstring": "Construct observation, return usual step tuple.\n\nArgs:\nenvs_step_tuples: tuples.\n\nReturns:\nStep tuple: ob, reward, done, info\nob: concatenated images [simulated observation, real observation,\ndifference], with additional informations in header.\nreward: real environment reward\ndone: True iff. envs_step_tuples['real_env'][2] is True\ninfo: real environment info", "source": "juraj-google-style"} {"code": "def _remove_trailing_new_line(l):\n for n in sorted(new_lines_bytes, key=(lambda x: len(x)), reverse=True):\n if l.endswith(n):\n remove_new_line = slice(None, (- len(n)))\n return l[remove_new_line]\n return l", "docstring": "Remove a single instance of new line at the end of l if it exists.\n\nReturns:\nbytestring", "source": "codesearchnet"} {"code": "def _check_tf1_flags(flags, unparsed):\n\n def _get_message_unparsed(flag, orig_flag, new_flag):\n if flag.startswith(orig_flag):\n return '\\n Use {0} instead of {1}'.format(new_flag, orig_flag)\n return ''\n if unparsed:\n output = ''\n for flag in unparsed:\n output += _get_message_unparsed(flag, '--input_file', '--graph_def_file')\n output += _get_message_unparsed(flag, '--savedmodel_directory', '--saved_model_dir')\n output += _get_message_unparsed(flag, '--std_value', '--std_dev_values')\n output += _get_message_unparsed(flag, '--batch_size', '--input_shapes')\n output += _get_message_unparsed(flag, '--dump_graphviz', '--dump_graphviz_dir')\n if output:\n raise ValueError(output)\n if flags.graph_def_file and (not flags.input_arrays or not flags.output_arrays):\n raise ValueError('--input_arrays and --output_arrays are required with --graph_def_file')\n if flags.input_shapes:\n if not flags.input_arrays:\n raise ValueError('--input_shapes must be used with --input_arrays')\n if flags.input_shapes.count(':') != flags.input_arrays.count(','):\n raise ValueError('--input_shapes and --input_arrays must have the same number of items')\n if flags.std_dev_values or flags.mean_values:\n if bool(flags.std_dev_values) != bool(flags.mean_values):\n raise ValueError('--std_dev_values and --mean_values must be used together')\n if flags.std_dev_values.count(',') != flags.mean_values.count(','):\n raise ValueError('--std_dev_values, --mean_values must have the same number of items')\n if (flags.default_ranges_min is None) != (flags.default_ranges_max is None):\n raise ValueError('--default_ranges_min and --default_ranges_max must be used together')\n if flags.dump_graphviz_video and (not flags.dump_graphviz_dir):\n raise ValueError('--dump_graphviz_video must be used with --dump_graphviz_dir')\n if flags.custom_opdefs and (not flags.experimental_new_converter):\n raise ValueError('--custom_opdefs must be used with --experimental_new_converter')\n if flags.custom_opdefs and (not flags.allow_custom_ops):\n raise ValueError('--custom_opdefs must be used with --allow_custom_ops')\n if flags.experimental_select_user_tf_ops and (not flags.experimental_new_converter):\n raise ValueError('--experimental_select_user_tf_ops must be used with --experimental_new_converter')", "docstring": "Checks the parsed and unparsed flags to ensure they are valid in 1.X.\n\nRaises an error if previously support unparsed flags are found. Raises an\nerror for parsed flags that don't meet the required conditions.\n\nArgs:\nflags: argparse.Namespace object containing TFLite flags.\nunparsed: List of unparsed flags.\n\nRaises:\nValueError: Invalid flags.", "source": "github-repos"} {"code": "def reserve(self, *args, **kwargs):\n \n data = self.get_data('floating_ips/',\n type=POST,\n params={'region': self.region_slug})\n\n if data:\n self.ip = data['floating_ip']['ip']\n self.region = data['floating_ip']['region']\n\n return self", "docstring": "Creates a FloatingIP in a region without assigning\nit to a specific Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\nregion_slug: str - region's slug (e.g. 'nyc3')", "source": "juraj-google-style"} {"code": "def __init__(self, filename, f_start=None, f_stop=None, t_start=None, t_stop=None, load_data=True, max_load=1.):\n \n super(H5Reader, self).__init__()\n\n if filename and os.path.isfile(filename) and h5py.is_hdf5(filename):\n\n \n self.freq_axis = 2\n self.time_axis = 0\n self.beam_axis = 1 \n self.stokes_axis = 4 \n\n self.filename = filename\n self.filestat = os.stat(filename)\n self.filesize = self.filestat.st_size/(1024.0**2)\n self.load_data = load_data\n self.h5 = h5py.File(self.filename)\n self.read_header()\n self.file_size_bytes = os.path.getsize(self.filename) \n self.n_ints_in_file = self.h5[\"data\"].shape[self.time_axis] \n self.n_channels_in_file = self.h5[\"data\"].shape[self.freq_axis] \n self.n_beams_in_file = self.header[b'nifs'] \n self.n_pols_in_file = 1 \n self._n_bytes = int(self.header[b'nbits'] / 8) \n self._d_type = self._setup_dtype()\n self.file_shape = (self.n_ints_in_file,self.n_beams_in_file,self.n_channels_in_file)\n\n if self.header[b'foff'] < 0:\n self.f_end = self.header[b'fch1']\n self.f_begin = self.f_end + self.n_channels_in_file*self.header[b'foff']\n else:\n self.f_begin = self.header[b'fch1']\n self.f_end = self.f_begin + self.n_channels_in_file*self.header[b'foff']\n\n self.t_begin = 0\n self.t_end = self.n_ints_in_file\n\n \n self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, init=True)\n \n self._setup_chans()\n \n self._setup_freqs()\n\n \n if max_load is not None:\n if max_load > 1.0:\n logger.warning('Setting data limit > 1GB, please handle with care!')\n self.MAX_DATA_ARRAY_SIZE = max_load * MAX_DATA_ARRAY_SIZE_UNIT\n else:\n self.MAX_DATA_ARRAY_SIZE = MAX_DATA_ARRAY_SIZE_UNIT\n\n if self.file_size_bytes > self.MAX_DATA_ARRAY_SIZE:\n self.large_file = True\n else:\n self.large_file = False\n\n if self.load_data:\n if self.large_file:\n \n if self.f_start or self.f_stop or self.t_start or self.t_stop:\n if self.isheavy():\n logger.warning(\"Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded, please try another (t,v) selection.\" % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))\n self._init_empty_selection()\n else:\n self.read_data()\n else:\n logger.warning(\"The file is of size %.2f GB, exceeding our size limit %.2f GB. Instance created, header loaded, but data not loaded. You could try another (t,v) selection.\"%(self.file_size_bytes/(1024.**3), self.MAX_DATA_ARRAY_SIZE/(1024.**3)))\n self._init_empty_selection()\n else:\n self.read_data()\n else:\n logger.info(\"Skipping loading data ...\")\n self._init_empty_selection()\n else:\n raise IOError(\"Need a file to open, please give me one!\")", "docstring": "Constructor.\n\nArgs:\nfilename (str): filename of blimpy file.\nf_start (float): start frequency, in MHz\nf_stop (float): stop frequency, in MHz\nt_start (int): start time bin\nt_stop (int): stop time bin", "source": "juraj-google-style"} {"code": "def git_branch_rename(new_name):\n curr_name = git.current_branch(refresh=True).name\n if (curr_name not in git.protected_branches()):\n log.info('Renaming branch from <33>{}<32> to <33>{}'.format(curr_name, new_name))\n shell.run('git branch -m {}'.format(new_name))", "docstring": "Rename the current branch\n\nArgs:\nnew_name (str):\nNew name for the current branch.", "source": "codesearchnet"} {"code": "def _format_field_value(self, field_name) -> str:\n field_name = self._normalize_field_name(field_name)\n field = self._get_model_field(field_name)\n return SQLInsertCompiler.prepare_value(self, field, getattr(self.query.objs[0], field.attname))", "docstring": "Formats a field's value for usage in SQL.\n\nArguments:\nfield_name:\nThe name of the field to format\nthe value of.\n\nReturns:\nThe field's value formatted for usage\nin SQL.", "source": "codesearchnet"} {"code": "def _merge_hdx_update(self, object_type, id_field_name, file_to_upload=None, **kwargs):\n \n \n merge_two_dictionaries(self.data, self.old_data)\n if 'batch_mode' in kwargs: \n self.data['batch_mode'] = kwargs['batch_mode']\n if 'skip_validation' in kwargs: \n self.data['skip_validation'] = kwargs['skip_validation']\n ignore_field = self.configuration['%s' % object_type].get('ignore_on_update')\n self.check_required_fields(ignore_fields=[ignore_field])\n operation = kwargs.get('operation', 'update')\n self._save_to_hdx(operation, id_field_name, file_to_upload)", "docstring": "Helper method to check if HDX object exists and update it\n\nArgs:\nobject_type (str): Description of HDX object type (for messages)\nid_field_name (str): Name of field containing HDX object identifier\nfile_to_upload (Optional[str]): File to upload to HDX\n**kwargs: See below\noperation (string): Operation to perform eg. patch. Defaults to update.\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def html_serialize(self, attributes, max_length=None):\n \n doc = ET.Element('span')\n for chunk in self:\n if (chunk.has_cjk() and\n not (max_length and len(chunk.word) > max_length)):\n ele = ET.Element('span')\n ele.text = chunk.word\n for key, val in attributes.items():\n ele.attrib[key] = val\n doc.append(ele)\n else:\n \n \n if doc.getchildren():\n if doc.getchildren()[-1].tail is None:\n doc.getchildren()[-1].tail = chunk.word\n else:\n doc.getchildren()[-1].tail += chunk.word\n else:\n if doc.text is None:\n doc.text = chunk.word\n else:\n doc.text += chunk.word\n result = ET.tostring(doc, encoding='utf-8').decode('utf-8')\n result = html5lib.serialize(\n html5lib.parseFragment(result), sanitize=True,\n quote_attr_values='always')\n return result", "docstring": "Returns concatenated HTML code with SPAN tag.\n\nArgs:\nattributes (dict): A map of name-value pairs for attributes of output\nSPAN tags.\nmax_length (:obj:`int`, optional): Maximum length of span enclosed chunk.\n\nReturns:\nThe organized HTML code. (str)", "source": "juraj-google-style"} {"code": "async def send_files_preconf(filepaths, config_path=CONFIG_PATH):\n config = read_config(config_path)\n subject = 'PDF files from pdfebc'\n message = ''\n (await send_with_attachments(subject, message, filepaths, config))", "docstring": "Send files using the config.ini settings.\n\nArgs:\nfilepaths (list(str)): A list of filepaths.", "source": "codesearchnet"} {"code": "def extend_args(function_signature, args, kwargs):\n arg_names = function_signature.arg_names\n arg_defaults = function_signature.arg_defaults\n arg_is_positionals = function_signature.arg_is_positionals\n keyword_names = function_signature.keyword_names\n function_name = function_signature.function_name\n args = list(args)\n for keyword_name in kwargs:\n if (keyword_name not in keyword_names):\n raise Exception(\"The name '{}' is not a valid keyword argument for the function '{}'.\".format(keyword_name, function_name))\n for skipped_name in arg_names[0:len(args)]:\n if (skipped_name in kwargs):\n raise Exception(\"Positional and keyword value provided for the argument '{}' for the function '{}'\".format(keyword_name, function_name))\n zipped_info = zip(arg_names, arg_defaults, arg_is_positionals)\n zipped_info = list(zipped_info)[len(args):]\n for (keyword_name, default_value, is_positional) in zipped_info:\n if (keyword_name in kwargs):\n args.append(kwargs[keyword_name])\n elif (default_value != funcsigs._empty):\n args.append(default_value)\n elif (not is_positional):\n raise Exception(\"No value was provided for the argument '{}' for the function '{}'.\".format(keyword_name, function_name))\n no_positionals = ((len(arg_is_positionals) == 0) or (not arg_is_positionals[(- 1)]))\n too_many_arguments = ((len(args) > len(arg_names)) and no_positionals)\n if too_many_arguments:\n raise Exception(\"Too many arguments were passed to the function '{}'\".format(function_name))\n return args", "docstring": "Extend the arguments that were passed into a function.\n\nThis extends the arguments that were passed into a function with the\ndefault arguments provided in the function definition.\n\nArgs:\nfunction_signature: The function signature of the function being\ncalled.\nargs: The non-keyword arguments passed into the function.\nkwargs: The keyword arguments passed into the function.\n\nReturns:\nAn extended list of arguments to pass into the function.\n\nRaises:\nException: An exception may be raised if the function cannot be called\nwith these arguments.", "source": "codesearchnet"} {"code": "def decode(self, token_ids: Union[int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor'], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, **kwargs) -> str:\n token_ids = to_py_obj(token_ids)\n return self._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)", "docstring": "Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\ntokens and clean up tokenization spaces.\n\nSimilar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\nArgs:\ntoken_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\nList of tokenized input ids. Can be obtained using the `__call__` method.\nskip_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not to remove special tokens in the decoding.\nclean_up_tokenization_spaces (`bool`, *optional*):\nWhether or not to clean up the tokenization spaces. If `None`, will default to\n`self.clean_up_tokenization_spaces`.\nkwargs (additional keyword arguments, *optional*):\nWill be passed to the underlying model specific decode method.\n\nReturns:\n`str`: The decoded sentence.", "source": "github-repos"} {"code": "def create(self, *args, **kwargs):\n data = self.get_data('floating_ips/', type=POST, params={'droplet_id': self.droplet_id})\n if data:\n self.ip = data['floating_ip']['ip']\n self.region = data['floating_ip']['region']\n return self", "docstring": "Creates a FloatingIP and assigns it to a Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\ndroplet_id: int - droplet id", "source": "codesearchnet"} {"code": "def fit(self, sents, **kwargs):\n \n tokens = list(itertools.chain.from_iterable(sents))\n counter = Counter(tokens)\n self.vocab = self.build_vocab(counter, **kwargs)", "docstring": "Builds a vocabulary object based on the tokens in the input.\n\nArgs:\nsents: A list of lists of tokens (representing sentences)\n\nVocab kwargs include:\nmax_size\nmin_freq\nspecials\nunk_init", "source": "juraj-google-style"} {"code": "def _MergeScalarField(self, tokenizer, message, field):\n \n _ = self.allow_unknown_extension\n value = None\n\n if field.type in (descriptor.FieldDescriptor.TYPE_INT32,\n descriptor.FieldDescriptor.TYPE_SINT32,\n descriptor.FieldDescriptor.TYPE_SFIXED32):\n value = _ConsumeInt32(tokenizer)\n elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,\n descriptor.FieldDescriptor.TYPE_SINT64,\n descriptor.FieldDescriptor.TYPE_SFIXED64):\n value = _ConsumeInt64(tokenizer)\n elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,\n descriptor.FieldDescriptor.TYPE_FIXED32):\n value = _ConsumeUint32(tokenizer)\n elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,\n descriptor.FieldDescriptor.TYPE_FIXED64):\n value = _ConsumeUint64(tokenizer)\n elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,\n descriptor.FieldDescriptor.TYPE_DOUBLE):\n value = tokenizer.ConsumeFloat()\n elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:\n value = tokenizer.ConsumeBool()\n elif field.type == descriptor.FieldDescriptor.TYPE_STRING:\n value = tokenizer.ConsumeString()\n elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n value = tokenizer.ConsumeByteString()\n elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:\n value = tokenizer.ConsumeEnum(field)\n else:\n raise RuntimeError('Unknown field type %d' % field.type)\n\n if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n if field.is_extension:\n message.Extensions[field].append(value)\n else:\n getattr(message, field.name).append(value)\n else:\n if field.is_extension:\n if not self._allow_multiple_scalars and message.HasExtension(field):\n raise tokenizer.ParseErrorPreviousToken(\n 'Message type \"%s\" should not have multiple \"%s\" extensions.' %\n (message.DESCRIPTOR.full_name, field.full_name))\n else:\n message.Extensions[field] = value\n else:\n if not self._allow_multiple_scalars and message.HasField(field.name):\n raise tokenizer.ParseErrorPreviousToken(\n 'Message type \"%s\" should not have multiple \"%s\" fields.' %\n (message.DESCRIPTOR.full_name, field.name))\n else:\n setattr(message, field.name, value)", "docstring": "Merges a single scalar field into a message.\n\nArgs:\ntokenizer: A tokenizer to parse the field value.\nmessage: A protocol message to record the data.\nfield: The descriptor of the field to be merged.\n\nRaises:\nParseError: In case of text parsing problems.\nRuntimeError: On runtime errors.", "source": "juraj-google-style"} {"code": "def GetPresetsForOperatingSystem(cls, operating_system, operating_system_product, operating_system_version):\n operating_system = artifacts.OperatingSystemArtifact(family=operating_system, product=operating_system_product, version=operating_system_version)\n return cls._presets.GetPresetsByOperatingSystem(operating_system)", "docstring": "Determines the presets for a specific operating system.\n\nArgs:\noperating_system (str): operating system for example \"Windows\". This\nshould be one of the values in definitions.OPERATING_SYSTEM_FAMILIES.\noperating_system_product (str): operating system product for\nexample \"Windows XP\" as determined by preprocessing.\noperating_system_version (str): operating system version for\nexample \"5.1\" as determined by preprocessing.\n\nReturns:\nlist[PresetDefinition]: preset definitions, where an empty list\nrepresents all parsers and parser plugins (no preset).", "source": "codesearchnet"} {"code": "def predict(self, X_feat, X_seq):\n \n\n \n X_seq = np.expand_dims(X_seq, axis=1)\n\n return self._get_other_var(X_feat, X_seq, variable=\"y_pred\")", "docstring": "Predict the response variable :py:attr:`y` for new input data (:py:attr:`X_feat`, :py:attr:`X_seq`).\n\nArgs:\nX_feat: Feature design matrix. Same format as :py:attr:`X_feat` in :py:meth:`train`\nX_seq: Sequenc design matrix. Same format as :py:attr:`X_seq` in :py:meth:`train`", "source": "juraj-google-style"} {"code": "def select_if(df, fun):\n \n\n def _filter_f(col):\n try:\n return fun(df[col])\n except:\n return False\n\n cols = list(filter(_filter_f, df.columns))\n return df[cols]", "docstring": "Selects columns where fun(ction) is true\nArgs:\nfun: a function that will be applied to columns", "source": "juraj-google-style"} {"code": "def validate(self, handler):\n \n\n \n test_method = self.plugin_test_validation(handler)\n if not test_method:\n return None\n\n \n \n for name, plugin_class in inspect.getmembers(handler, inspect.isclass):\n if self.plugin_class_validation(plugin_class):\n return {'class':plugin_class, 'test':test_method}\n\n \n print 'Failure for plugin: %s' % (handler.__name__)\n print 'Validation Error: Worker class is required to have a dependencies list and an execute method'\n return None", "docstring": "Validate the plugin, each plugin must have the following:\n1) The worker class must have an execute method: execute(self, input_data).\n2) The worker class must have a dependencies list (even if it's empty).\n3) The file must have a top level test() method.\n\nArgs:\nhandler: the loaded plugin.", "source": "juraj-google-style"} {"code": "def FlagCxx14Features(filename, clean_lines, linenum, error):\n \n line = clean_lines.elided[linenum]\n\n include = Match(r'\\s*\n\n \n if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):\n error(filename, linenum, 'build/c++14', 5,\n ('<%s> is an unapproved C++14 header.') % include.group(1))", "docstring": "Flag those C++14 features that we restrict.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "juraj-google-style"} {"code": "def apply(self, inputs, *args, **kwargs):\n warnings.warn('`layer.apply` is deprecated and will be removed in a future version. Please use `layer.__call__` method instead.')\n return self.__call__(inputs, *args, **kwargs)", "docstring": "Deprecated, do NOT use!\n\nThis is an alias of `self.__call__`.\n\nArgs:\ninputs: Input tensor(s).\n*args: additional positional arguments to be passed to `self.call`.\n**kwargs: additional keyword arguments to be passed to `self.call`.\n\nReturns:\nOutput tensor(s).", "source": "github-repos"} {"code": "def register_filters(self, filters, force=False):\n \n for filter_name, filter_ref in filters.items():\n self.register_filter(filter_name, filter_ref, force)", "docstring": "Add/register filters.\n\nArgs:\nfilters (dict): Dictionary of Python functions to use as :program:`Jinja2` filters.\nforce (bool): If set to ``True``, forces the registration of a filter no matter if it already exists or not.", "source": "juraj-google-style"} {"code": "def from_object(cls, obj):\n return cls(obj.get('sessionId', None), obj.get('status', 0), obj.get('value', None))", "docstring": "The factory method to create WebDriverResult from JSON Object.\n\nArgs:\nobj(dict): The JSON Object returned by server.", "source": "codesearchnet"} {"code": "def create_server(self, server_name, *args, **kwargs):\n \n server = ServerConnection(name=server_name, reactor=self)\n\n if args or kwargs:\n server.set_connect_info(*args, **kwargs)\n\n \n for verb, infos in self._event_handlers.items():\n for info in infos:\n server.register_event(info['direction'], verb, info['handler'],\n priority=info['priority'])\n\n self.servers[server_name] = server\n\n return server", "docstring": "Create an IRC server connection slot.\n\nThe server will actually be connected to when\n:meth:`girc.client.ServerConnection.connect` is called later.\n\nArgs:\nserver_name (str): Name of the server, to be used for functions and accessing the\nserver later through the reactor.\n\nReturns:\nserver (girc.client.ServerConnection): A not-yet-connected server.", "source": "juraj-google-style"} {"code": "def list_marts(self):\n\n def _row_gen(attributes):\n for attr in attributes.values():\n (yield (attr.name, attr.display_name))\n return pd.DataFrame.from_records(_row_gen(self.marts), columns=['name', 'display_name'])", "docstring": "Lists available marts in a readable DataFrame format.\n\nReturns:\npd.DataFrame: Frame listing available marts.", "source": "codesearchnet"} {"code": "def print_table(col_tuple, row_tuples):\n \n col_widths = [max(len(str(row[col])) for row in [col_tuple] + row_tuples)\n for col in range(len(col_tuple))]\n format_str = ' '.join('{{:<{}}}'.format(col_width)\n for col_width in col_widths)\n header_border = ' '.join('=' * col_width for col_width in col_widths)\n print(header_border)\n print(format_str.format(*col_tuple))\n print(header_border)\n for row_tuple in row_tuples:\n print(format_str.format(*row_tuple))\n print(header_border)\n print()", "docstring": "Print column headers and rows as a reStructuredText table.\n\nArgs:\ncol_tuple: Tuple of column name strings.\nrow_tuples: List of tuples containing row data.", "source": "juraj-google-style"} {"code": "def _log_every_n_to_logger(n, logger, level, message, *args): \n \n logger = logger or logging.getLogger()\n def _gen(): \n while True:\n for _ in range(n):\n yield False\n logger.log(level, message, *args)\n yield True\n gen = _gen()\n return lambda: six.next(gen)", "docstring": "Logs the given message every n calls to a logger.\n\nArgs:\nn: Number of calls before logging.\nlogger: The logger to which to log.\nlevel: The logging level (e.g. logging.INFO).\nmessage: A message to log\n*args: Any format args for the message.\nReturns:\nA method that logs and returns True every n calls.", "source": "juraj-google-style"} {"code": "def remove_regex(urls, regex):\n if (not regex):\n return urls\n if (not isinstance(urls, (list, set, tuple))):\n urls = [urls]\n try:\n non_matching_urls = [url for url in urls if (not re.search(regex, url))]\n except TypeError:\n return []\n return non_matching_urls", "docstring": "Parse a list for non-matches to a regex.\n\nArgs:\nurls: iterable of urls\nregex: string regex to be parsed for\n\nReturns:\nlist of strings not matching regex", "source": "codesearchnet"} {"code": "def is_leap_year(years):\n years = tf.convert_to_tensor(years, tf.int32)\n\n def divides_by(n):\n return tf.math.equal(years % n, 0)\n return tf.math.logical_and(divides_by(4), tf.math.logical_or(~divides_by(100), divides_by(400)))", "docstring": "Calculates whether years are leap years.\n\nArgs:\nyears: Tensor of int32 type. Elements should be positive.\n\nReturns:\nTensor of bool type.", "source": "github-repos"} {"code": "def _finish_operation_action(self, action):\n success = action.data['success']\n conn_key = action.data['id']\n if (self._get_connection_state(conn_key) != self.InProgress):\n self._logger.error('Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s', str(conn_key))\n return\n data = self._get_connection(conn_key)\n callback = data['callback']\n conn_id = data['conn_id']\n args = action.data['callback_args']\n data['state'] = self.Idle\n data['microstate'] = None\n callback(conn_id, self.id, success, *args)", "docstring": "Finish an attempted operation.\n\nArgs:\naction (ConnectionAction): the action object describing the result\nof the operation that we are finishing", "source": "codesearchnet"} {"code": "def markers(self, values):\n if (not isinstance(values, list)):\n raise TypeError('Markers must be a list of objects')\n self.options['markers'] = values", "docstring": "Set the markers.\n\nArgs:\nvalues (list): list of marker objects.\n\nRaises:\nValueError: Markers must be a list of objects.", "source": "codesearchnet"} {"code": "def GetSubkeyByIndex(self, index):\n \n subkeys = list(self._subkeys.values())\n\n if index < 0 or index >= len(subkeys):\n raise IndexError('Index out of bounds.')\n\n return subkeys[index]", "docstring": "Retrieves a subkey by index.\n\nArgs:\nindex (int): index of the subkey.\n\nReturns:\nWinRegistryKey: Windows Registry subkey or None if not found.\n\nRaises:\nIndexError: if the index is out of bounds.", "source": "juraj-google-style"} {"code": "def CreateSubdivision(self, parent=None, value=None):\n \n division = {\n 'xsi_type': 'ProductPartition',\n 'partitionType': 'SUBDIVISION',\n 'id': str(self.next_id)\n }\n\n \n if parent is not None:\n division['parentCriterionId'] = parent['id']\n division['caseValue'] = value\n\n adgroup_criterion = {\n 'xsi_type': 'BiddableAdGroupCriterion',\n 'adGroupId': self.adgroup_id,\n 'criterion': division\n }\n\n self.CreateAddOperation(adgroup_criterion)\n self.next_id -= 1\n\n return division", "docstring": "Creates a subdivision node.\n\nArgs:\nparent: The node that should be this node's parent.\nvalue: The value being partitioned on.\nReturns:\nA new subdivision node.", "source": "juraj-google-style"} {"code": "def _ValidateFractionalMaxPoolResult(self, input_tensor, pooling_ratio, pseudo_random, overlapping):\n with self.cached_session():\n p, r, c = nn_ops.fractional_max_pool_v2(input_tensor, pooling_ratio, pseudo_random, overlapping, seed=self._SEED)\n actual, row_seq, col_seq = self.evaluate([p, r, c])\n expected = self._GetExpectedFractionalMaxPoolResult(input_tensor, row_seq, col_seq, overlapping)\n self.assertShapeEqual(expected, p)\n self.assertAllClose(expected, actual)", "docstring": "Validate FractionalMaxPool's result against expected.\n\nExpected result is computed given input_tensor, and pooling region defined\nby row_seq and col_seq.\n\nArgs:\ninput_tensor: A tensor or numpy ndarray.\npooling_ratio: A list or tuple of length 4, first and last element be 1.\npseudo_random: Use pseudo random method to generate pooling sequence.\noverlapping: Use overlapping when pooling.\n\nReturns:\nNone", "source": "github-repos"} {"code": "def dnd_setSnooze(self, *, num_minutes: int, **kwargs) -> SlackResponse:\n self._validate_xoxp_token()\n kwargs.update({'num_minutes': num_minutes})\n return self.api_call('dnd.setSnooze', http_verb='GET', params=kwargs)", "docstring": "Turns on Do Not Disturb mode for the current user, or changes its duration.\n\nArgs:\nnum_minutes (int): The snooze duration. e.g. 60", "source": "codesearchnet"} {"code": "def MakeSuiteFromCdf(cdf, name=None):\n if (name is None):\n name = cdf.name\n suite = Suite(name=name)\n prev = 0.0\n for (val, prob) in cdf.Items():\n suite.Incr(val, (prob - prev))\n prev = prob\n return suite", "docstring": "Makes a normalized Suite from a Cdf object.\n\nArgs:\ncdf: Cdf object\nname: string name for the new Suite\n\nReturns:\nSuite object", "source": "codesearchnet"} {"code": "def get_max_capvol(self, remove=True, insert=True, volume=None):\n \n\n vol = volume if volume else self.struc_oxid.volume\n return self._get_max_cap_ah(remove, insert) * 1000 * 1E24 / (vol * const.N_A)", "docstring": "Give max capacity in mAh/cc for inserting and removing a charged cation into base structure.\n\nArgs:\nremove: (bool) whether to allow cation removal\ninsert: (bool) whether to allow cation insertion\nvolume: (float) volume to use for normalization (default=volume of initial structure)\n\nReturns:\nmax vol capacity in mAh/cc", "source": "juraj-google-style"} {"code": "def _ParseJournalEntry(self, file_object, file_offset):\n entry_object = self._ParseEntryObject(file_object, file_offset)\n entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item')\n file_offset += 64\n data_end_offset = ((file_offset + entry_object.data_size) - 64)\n fields = {'real_time': entry_object.real_time}\n while (file_offset < data_end_offset):\n try:\n (entry_item, entry_item_data_size) = self._ReadStructureFromFileObject(file_object, file_offset, entry_item_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError('Unable to parse entry item at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n file_offset += entry_item_data_size\n if (entry_item.object_offset < self._maximum_journal_file_offset):\n raise errors.ParseError('object offset should be after hash tables ({0:d} < {1:d})'.format(entry_item.object_offset, self._maximum_journal_file_offset))\n event_data = self._ParseDataObject(file_object, entry_item.object_offset)\n event_string = event_data.decode('utf-8')\n (key, value) = event_string.split('=', 1)\n fields[key] = value\n return fields", "docstring": "Parses a journal entry.\n\nThis method will generate an event per ENTRY object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the entry object relative to the start\nof the file-like object.\n\nReturns:\ndict[str, objects]: entry items per key.\n\nRaises:\nParseError: when an object offset is out of bounds.", "source": "codesearchnet"} {"code": "def create_snapshot(self, volume_id_or_uri, snapshot, timeout=(- 1)):\n uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n return self._client.create(snapshot, uri=uri, timeout=timeout, default_values=self.DEFAULT_VALUES_SNAPSHOT)", "docstring": "Creates a snapshot for the specified volume.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume ID or the volume URI.\nsnapshot (dict):\nObject to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Storage volume.", "source": "codesearchnet"} {"code": "def movie_credits(self, **kwargs):\n \n path = self._get_id_path('movie_credits')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the movie credits for a specific person id.\n\nArgs:\nlanguage: (optional) ISO 639-1 code.\nappend_to_response: (optional) Comma separated, any person method.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"} {"code": "def delete_knowledge_base(project_id, knowledge_base_id):\n \n import dialogflow_v2beta1 as dialogflow\n client = dialogflow.KnowledgeBasesClient()\n knowledge_base_path = client.knowledge_base_path(\n project_id, knowledge_base_id)\n\n response = client.delete_knowledge_base(knowledge_base_path)\n\n print('Knowledge Base deleted.'.format(response))", "docstring": "Deletes a specific Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.", "source": "juraj-google-style"} {"code": "def relative_tokens_ids_to_notes(self, tokens: np.ndarray, start_idx: float, cutoff_time_idx: Optional[float]=None):\n words = [self._convert_id_to_token(token) for token in tokens]\n current_idx = start_idx\n current_velocity = 0\n note_onsets_ready = [None for i in range(sum([k.endswith('NOTE') for k in self.encoder.keys()]) + 1)]\n notes = []\n for token_type, number in words:\n if token_type == 'TOKEN_SPECIAL':\n if number == 1:\n break\n elif token_type == 'TOKEN_TIME':\n current_idx = token_time_to_note(number=number, cutoff_time_idx=cutoff_time_idx, current_idx=current_idx)\n elif token_type == 'TOKEN_VELOCITY':\n current_velocity = number\n elif token_type == 'TOKEN_NOTE':\n notes = token_note_to_note(number=number, current_velocity=current_velocity, default_velocity=self.default_velocity, note_onsets_ready=note_onsets_ready, current_idx=current_idx, notes=notes)\n else:\n raise ValueError('Token type not understood!')\n for pitch, note_onset in enumerate(note_onsets_ready):\n if note_onset is not None:\n if cutoff_time_idx is None:\n cutoff = note_onset + 1\n else:\n cutoff = max(cutoff_time_idx, note_onset + 1)\n offset_idx = max(current_idx, cutoff)\n notes.append([note_onset, offset_idx, pitch, self.default_velocity])\n if len(notes) == 0:\n return []\n else:\n notes = np.array(notes)\n note_order = notes[:, 0] * 128 + notes[:, 1]\n notes = notes[note_order.argsort()]\n return notes", "docstring": "Converts relative tokens to notes which will then be used to create Pretty Midi objects.\n\nArgs:\ntokens (`numpy.ndarray`):\nRelative Tokens which will be converted to notes.\nstart_idx (`float`):\nA parameter which denotes the starting index.\ncutoff_time_idx (`float`, *optional*):\nA parameter used while converting tokens to notes.", "source": "github-repos"} {"code": "def __init__(self, parent=None, **kwargs):\n \n if not parent:\n raise ValueError('Missing parent value.')\n\n super(VMDKPathSpec, self).__init__(parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the VMDK file path specification must have a parent.\n\nArgs:\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"} {"code": "def unpack(packet):\n validate_packet(packet)\n version = packet[0]\n try:\n pyof_lib = PYOF_VERSION_LIBS[version]\n except KeyError:\n raise UnpackException('Version not supported')\n try:\n message = pyof_lib.common.utils.unpack_message(packet)\n return message\n except (UnpackException, ValueError) as exception:\n raise UnpackException(exception)", "docstring": "Unpack the OpenFlow Packet and returns a message.\n\nArgs:\npacket: buffer with the openflow packet.\n\nReturns:\nGenericMessage: Message unpacked based on openflow packet.\n\nRaises:\nUnpackException: if the packet can't be unpacked.", "source": "codesearchnet"} {"code": "def _parse_pem_data(pem_data):\n \n sep = '-----BEGIN CERTIFICATE-----'\n cert_chain = [six.b(sep + s) for s in pem_data.split(sep)[1:]]\n certs = []\n load_cert = x509.load_pem_x509_certificate\n for cert in cert_chain:\n try:\n certs.append(load_cert(cert, default_backend()))\n except ValueError:\n warnings.warn('Certificate is invalid.')\n return False\n\n return certs", "docstring": "Parse PEM-encoded X.509 certificate chain.\n\nArgs:\npem_data: str. PEM file retrieved from SignatureCertChainUrl.\n\nReturns:\nlist or bool: If url is valid, returns the certificate chain as a list\nof cryptography.hazmat.backends.openssl.x509._Certificate\ncertificates where certs[0] is the first certificate in the file; if\nurl is invalid, returns False.", "source": "juraj-google-style"} {"code": "def LogUpdate(self, data):\n \n for hypo in self.Values():\n like = self.LogLikelihood(data, hypo)\n self.Incr(hypo, like)", "docstring": "Updates a suite of hypotheses based on new data.\n\nModifies the suite directly; if you want to keep the original, make\na copy.\n\nNote: unlike Update, LogUpdate does not normalize.\n\nArgs:\ndata: any representation of the data", "source": "juraj-google-style"} {"code": "def NHWCToNCHW(input_tensor: Union[tensor_lib.Tensor, list[int]]) -> Union[tensor_lib.Tensor, list[int]]:\n new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}\n if isinstance(input_tensor, tensor_lib.Tensor):\n ndims = input_tensor.shape.ndims\n return array_ops.transpose(input_tensor, new_axes[ndims])\n else:\n ndims = len(input_tensor)\n return [input_tensor[a] for a in new_axes[ndims]]", "docstring": "Converts the input from the NHWC format to NCHW.\n\nArgs:\ninput_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape\n\nReturns:\nconverted tensor or shape array", "source": "github-repos"} {"code": "def add_children(self, children):\n self._children += [c for c in children if (c not in self._children)]", "docstring": "Adds new children nodes after filtering for duplicates\n\nArgs:\nchildren (list): list of OmniTree nodes to add as children", "source": "codesearchnet"} {"code": "def unauthorized(cls, errors=None):\n \n if cls.expose_status: \n cls.response.content_type = 'application/json'\n cls.response._status_line = '401 Unauthorized'\n\n return cls(401, errors=errors).to_json", "docstring": "Shortcut API for HTTP 401 `Unauthorized` response.\n\nArgs:\nerrors (list): Response key/value data.\n\nReturns:\nWSResponse Instance.", "source": "juraj-google-style"} {"code": "def cgmlst_subspecies_call(df_relatives):\n closest_distance = df_relatives['distance'].min()\n if (closest_distance > CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD):\n logging.warning('Min cgMLST distance (%s) above subspeciation distance threshold (%s)', closest_distance, CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD)\n return None\n else:\n df_relatives = df_relatives.loc[((df_relatives.distance <= CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD), :)]\n df_relatives = df_relatives.sort_values('distance', ascending=True)\n logging.debug('df_relatives by cgmlst %s', df_relatives.head())\n genome_spp = genomes_to_subspecies()\n subspecies_below_threshold = [(genome_spp[member_genome] if (member_genome in genome_spp) else None) for member_genome in df_relatives.index]\n subspecies_below_threshold = filter(None, subspecies_below_threshold)\n subspecies_counter = Counter(subspecies_below_threshold)\n logging.debug('Subspecies counter: %s', subspecies_counter)\n return (subspecies_counter.most_common(1)[0][0], closest_distance, dict(subspecies_counter))", "docstring": "Call Salmonella subspecies based on cgMLST results\n\nThis method attempts to find the majority subspecies type within curated\npublic genomes above a cgMLST allelic profile distance threshold.\n\nNote:\n``CGMLST_SUBSPECIATION_DISTANCE_THRESHOLD`` is the cgMLST distance\nthreshold used to determine the subspecies by cgMLST. It is set at a\ndistance of 0.9 which translates to a cgMLST allelic similarity of 10%.\nA threshold of 0.9 is generous and reasonable given the congruence\nbetween subspecies designations and 10% cgMLST clusters by Adjusted\nRand (~0.850) and Adjusted Wallace metrics (~0.850 both ways).\n\nArgs:\ndf_relatives (pandas.DataFrame): Table of genomes related by cgMLST to input genome\n\nReturns:\nNone: if no curated public genomes found to have a cgMLST profile similarity of 10% or greater\n(string, float, dict): most common subspecies, closest related public genome distance, subspecies frequencies", "source": "codesearchnet"} {"code": "def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None):\n return slicewise(tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=(name or 'cwise'))", "docstring": "Component-wise operation with no broadcasting.\n\nArgs:\ntf_fn: a component-wise function taking n tf.Tensor inputs and producing\na tf.Tensor output\nxs: n Tensors\noutput_dtype: an optional dtype\ngrad_function: an optional python function\nname: an optional string\n\nReturns:\na Tensor", "source": "codesearchnet"} {"code": "def dependencies(self, user=None, napp=None):\n napps = self._get_napp_key('napp_dependencies', user, napp)\n return [tuple(napp.split('/')) for napp in napps]", "docstring": "Get napp_dependencies from install NApp.\n\nArgs:\nuser(string) A Username.\nnapp(string): A NApp name.\nReturns:\nnapps(list): List with tuples with Username and NApp name.\ne.g. [('kytos'/'of_core'), ('kytos/of_l2ls')]", "source": "codesearchnet"} {"code": "def remove_all_lambda_permissions(app_name='', env='', region='us-east-1'):\n \n session = boto3.Session(profile_name=env, region_name=region)\n lambda_client = session.client('lambda')\n legacy_prefix = app_name + \"_\"\n\n lambda_arn = get_lambda_arn(app_name, env, region)\n lambda_alias_arn = get_lambda_alias_arn(app_name, env, region)\n arns = (lambda_arn, lambda_alias_arn)\n\n for arn in arns:\n try:\n response = lambda_client.get_policy(FunctionName=arn)\n except boto3.exceptions.botocore.exceptions.ClientError as error:\n LOG.info(\"No policy exists for function %s, skipping deletion\", arn)\n LOG.debug(error)\n continue\n\n policy_json = json.loads(response['Policy'])\n LOG.debug(\"Found Policy: %s\", response)\n for perm in policy_json['Statement']:\n if perm['Sid'].startswith(FOREMAST_PREFIX) or perm['Sid'].startswith(legacy_prefix):\n lambda_client.remove_permission(FunctionName=arn, StatementId=perm['Sid'])\n LOG.info('removed permission: %s', perm['Sid'])\n else:\n LOG.info('Skipping deleting permission %s - Not managed by Foremast', perm['Sid'])", "docstring": "Remove all foremast-* permissions from lambda.\n\nArgs:\napp_name (str): Application name\nenv (str): AWS environment\nregion (str): AWS region", "source": "juraj-google-style"} {"code": "def _encode_fhir_path_builder_constraint(self, builder: expressions.Builder, top_level_constraint: Optional[expressions.Builder]) -> Optional[_BuilderSql]:\n if not top_level_constraint or isinstance(top_level_constraint.node, _evaluation.RootMessageNode):\n fhir_path_expression_sql, sql_expression = self._translate_fhir_path_expression(builder)\n if sql_expression and fhir_path_expression_sql:\n return _BuilderSql(f'(SELECT IFNULL(LOGICAL_AND(result_), TRUE)\\nFROM UNNEST({sql_expression}) AS result_)', fhir_path_expression_sql, builder)\n return None\n root_sql_expression = self._encode_fhir_path_builder(top_level_constraint)\n relative_builder = expressions.Builder.replace_with_operand(builder, old_path=top_level_constraint.fhir_path, replacement_node=_evaluation.StructureBaseNode(self._context, top_level_constraint.return_type))\n fhir_path_expression_sql, sql_expression = self._translate_fhir_path_expression(relative_builder)\n if not sql_expression or not root_sql_expression or (not fhir_path_expression_sql):\n return None\n return _BuilderSql(f'(SELECT IFNULL(LOGICAL_AND(result_), TRUE)\\nFROM (SELECT {sql_expression} AS subquery_\\nFROM (SELECT AS VALUE ctx_element_\\nFROM UNNEST({root_sql_expression}) AS ctx_element_)),\\nUNNEST(subquery_) AS result_)', fhir_path_expression_sql, relative_builder)", "docstring": "Returns a Standard SQL translation of the constraint `fhir_path_expression` relative to its top-level constraint.\n\nArgs:\nbuilder: Builder containing the information to be encoded to Standard SQL.\ntop_level_constraint: Builder containing the constraint that the input\nbuilder is tied to.\n\nReturns:\nA Standard SQL encoding of the constraint `fhir_path_expression` upon\nsuccessful completion. The SQL will evaluate to a single boolean\nindicating whether the constraint is satisfied and the builder that\ncreated it. May be different from the input builder(s).", "source": "github-repos"} {"code": "def prepend(self, node):\n \n if not isinstance(node, grammar.STATEMENTS):\n raise ValueError\n self.to_prepend[-1].appendleft(node)", "docstring": "Prepend a statement to the current statement.\n\nNote that multiple calls to prepend will result in the last statement to be\nprepended to end up at the top.\n\nArgs:\nnode: The statement to prepend.\n\nRaises:\nValueError: If the given node is not a statement.", "source": "juraj-google-style"} {"code": "def attach(self, engine, log_handler, event_name):\n if (event_name not in State.event_to_attr):\n raise RuntimeError(\"Unknown event name '{}'\".format(event_name))\n engine.add_event_handler(event_name, log_handler, self, event_name)", "docstring": "Attach the logger to the engine and execute `log_handler` function at `event_name` events.\n\nArgs:\nengine (Engine): engine object.\nlog_handler (callable): a logging handler to execute\nevent_name: event to attach the logging handler to. Valid events are from :class:`~ignite.engine.Events`\nor any `event_name` added by :meth:`~ignite.engine.Engine.register_events`.", "source": "codesearchnet"} {"code": "def connect(self, slot):\n \n self._ensure_slot_args(slot)\n if not self.is_connected(slot):\n self.slots.append(slot)", "docstring": "Connect ``slot`` to this singal.\n\nArgs:\nslot (callable): Callable object wich accepts keyword arguments.\n\nRaises:\nInvalidSlot: If ``slot`` doesn't accept keyword arguments.", "source": "juraj-google-style"} {"code": "def InspectZipFile(self, parser_mediator, zip_file):\n try:\n xml_data = zip_file.read('_rels/.rels')\n property_files = self._ParseRelationshipsXMLFile(xml_data)\n except (IndexError, IOError, KeyError, OverflowError, ValueError, zipfile.BadZipfile) as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse relationships XML file: _rels/.rels with error: {0!s}'.format(exception))\n return\n metadata = {}\n for path in property_files:\n try:\n xml_data = zip_file.read(path)\n properties = self._ParsePropertiesXMLFile(xml_data)\n except (IndexError, IOError, KeyError, OverflowError, ValueError, zipfile.BadZipfile) as exception:\n parser_mediator.ProduceExtractionWarning('Unable to parse properties XML file: {0:s} with error: {1!s}'.format(path, exception))\n continue\n metadata.update(properties)\n event_data = OpenXMLEventData()\n event_data.app_version = self._GetPropertyValue(parser_mediator, metadata, 'app_version')\n event_data.app_version = self._GetPropertyValue(parser_mediator, metadata, 'app_version')\n event_data.author = self._GetPropertyValue(parser_mediator, metadata, 'author')\n event_data.creating_app = self._GetPropertyValue(parser_mediator, metadata, 'creating_app')\n event_data.doc_security = self._GetPropertyValue(parser_mediator, metadata, 'doc_security')\n event_data.hyperlinks_changed = self._GetPropertyValue(parser_mediator, metadata, 'hyperlinks_changed')\n event_data.i4 = self._GetPropertyValue(parser_mediator, metadata, 'i4')\n event_data.last_saved_by = self._GetPropertyValue(parser_mediator, metadata, 'last_saved_by')\n event_data.links_up_to_date = self._GetPropertyValue(parser_mediator, metadata, 'links_up_to_date')\n event_data.number_of_characters = self._GetPropertyValue(parser_mediator, metadata, 'number_of_characters')\n event_data.number_of_characters_with_spaces = self._GetPropertyValue(parser_mediator, metadata, 'number_of_characters_with_spaces')\n event_data.number_of_lines = self._GetPropertyValue(parser_mediator, metadata, 'number_of_lines')\n event_data.number_of_pages = self._GetPropertyValue(parser_mediator, metadata, 'number_of_pages')\n event_data.number_of_paragraphs = self._GetPropertyValue(parser_mediator, metadata, 'number_of_paragraphs')\n event_data.number_of_words = self._GetPropertyValue(parser_mediator, metadata, 'number_of_words')\n event_data.revision_number = self._GetPropertyValue(parser_mediator, metadata, 'revision_number')\n event_data.scale_crop = self._GetPropertyValue(parser_mediator, metadata, 'scale_crop')\n event_data.shared_doc = self._GetPropertyValue(parser_mediator, metadata, 'shared_doc')\n event_data.template = self._GetPropertyValue(parser_mediator, metadata, 'template')\n event_data.total_time = self._GetPropertyValue(parser_mediator, metadata, 'total_time')\n self._ProduceEvent(parser_mediator, event_data, metadata, 'created', definitions.TIME_DESCRIPTION_CREATION, 'creation time')\n self._ProduceEvent(parser_mediator, event_data, metadata, 'modified', definitions.TIME_DESCRIPTION_MODIFICATION, 'modification time')\n self._ProduceEvent(parser_mediator, event_data, metadata, 'last_printed', definitions.TIME_DESCRIPTION_LAST_PRINTED, 'last printed time')", "docstring": "Parses an OXML file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nzip_file (zipfile.ZipFile): the zip file containing OXML content. It is\nnot be closed in this method, but will be closed by the parser logic\nin czip.py.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "codesearchnet"} {"code": "def _fit(self, dataset):\n \n sc = SparkContext.getOrCreate()\n\n logging.info(\"===== 1. train args: {0}\".format(self.args))\n logging.info(\"===== 2. train params: {0}\".format(self._paramMap))\n local_args = self.merge_args_params()\n logging.info(\"===== 3. train args + params: {0}\".format(local_args))\n\n if local_args.input_mode == TFCluster.InputMode.TENSORFLOW:\n if dfutil.isLoadedDF(dataset):\n \n logging.info(\"Loaded DataFrame of TFRecord.\")\n local_args.tfrecord_dir = dfutil.loadedDF[dataset]\n else:\n \n assert local_args.tfrecord_dir, \"Please specify --tfrecord_dir to export DataFrame to TFRecord.\"\n if self.getInputMapping():\n \n dataset = dataset.select(list(self.getInputMapping()))\n logging.info(\"Exporting DataFrame {} as TFRecord to: {}\".format(dataset.dtypes, local_args.tfrecord_dir))\n dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir)\n logging.info(\"Done saving\")\n\n tf_args = self.args.argv if self.args.argv else local_args\n cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps,\n local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes)\n if local_args.input_mode == TFCluster.InputMode.SPARK:\n \n input_cols = sorted(self.getInputMapping())\n cluster.train(dataset.select(input_cols).rdd, local_args.epochs)\n cluster.shutdown(grace_secs=30)\n\n \n if self.export_fn:\n assert local_args.export_dir, \"Export function requires --export_dir to be set\"\n logging.info(\"Exporting saved_model (via export_fn) to: {}\".format(local_args.export_dir))\n\n def _export(iterator, fn, args):\n single_node_env(args)\n fn(args)\n\n \n sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args))\n\n return self._copyValues(TFModel(self.args))", "docstring": "Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk.\n\nArgs:\n:dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors.\n\nReturns:\nA TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model.", "source": "juraj-google-style"} {"code": "def VerifyStructure(self, parser_mediator, line):\n \n try:\n structure = self._DPKG_LOG_LINE.parseString(line)\n except pyparsing.ParseException as exception:\n logger.debug(\n 'Unable to parse Debian dpkg.log file with error: {0!s}'.format(\n exception))\n return False\n\n return 'date_time' in structure and 'body' in structure", "docstring": "Verifies if a line from a text file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line is in the expected format, False if not.", "source": "juraj-google-style"} {"code": "def set_hostname(hostname=None, deploy=False):\n if (not hostname):\n raise CommandExecutionError('Hostname option must not be none.')\n ret = {}\n query = {'type': 'config', 'action': 'set', 'xpath': \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system\", 'element': '{0}'.format(hostname)}\n ret.update(__proxy__['panos.call'](query))\n if (deploy is True):\n ret.update(commit())\n return ret", "docstring": "Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed.\n\nCLI Example:\n\nArgs:\nhostname (str): The hostname to set\n\ndeploy (bool): If true then commit the full candidate configuration, if false only set pending change.\n\n.. code-block:: bash\n\nsalt '*' panos.set_hostname newhostname\nsalt '*' panos.set_hostname newhostname deploy=True", "source": "codesearchnet"} {"code": "def clear(self, keep_attrs=False):\n \n if not keep_attrs:\n for a in (self.graph_attr, self.node_attr, self.edge_attr):\n a.clear()\n del self.body[:]", "docstring": "Reset content to an empty body, clear graph/node/egde_attr mappings.\n\nArgs:\nkeep_attrs (bool): preserve graph/node/egde_attr mappings", "source": "juraj-google-style"} {"code": "def get_server_group(self):\n api_url = '{0}/applications/{1}'.format(API_URL, self.app)\n response = requests.get(api_url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)\n for server_group in response.json()['clusters'][self.env]:\n return server_group['serverGroups'][(- 1)]", "docstring": "Finds the most recently deployed server group for the application.\nThis is the server group that the scaling policy will be applied to.\n\nReturns:\nserver_group (str): Name of the newest server group", "source": "codesearchnet"} {"code": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"} {"code": "def run_step(context):\n logger.debug('started')\n assert context, f'context must have value for {__name__}'\n deprecated(context)\n context.assert_key_has_value('assert', __name__)\n assert_this = context['assert']['this']\n is_equals_there = ('equals' in context['assert'])\n if is_equals_there:\n assert_equals = context['assert']['equals']\n logger.debug(\"comparing assert['this'] to assert['equals'].\")\n assert_result = (context.get_formatted_iterable(assert_this) == context.get_formatted_iterable(assert_equals))\n else:\n logger.debug(\"evaluating assert['this'] as a boolean.\")\n assert_result = context.get_formatted_as_type(assert_this, out_type=bool)\n logger.info(f'assert evaluated to {assert_result}')\n if (not assert_result):\n if is_equals_there:\n type_this = type(context.get_formatted_iterable(assert_this)).__name__\n type_equals = type(context.get_formatted_iterable(assert_equals)).__name__\n error_text = f\"assert assert['this'] is of type {type_this} and does not equal assert['equals'] of type {type_equals}.\"\n else:\n error_text = f'assert {assert_this} evaluated to False.'\n raise ContextError(error_text)\n logger.debug('done')", "docstring": "Assert that something is True or equal to something else.\n\nArgs:\ncontext: dictionary-like pypyr.context.Context. context is mandatory.\nUses the following context keys in context:\n- assert\n- this. mandatory. Any type. If assert['equals'] not specified,\nevals as boolean.\n- equals. optional. Any type.\n\nIf assert['this'] evaluates to False raises error.\nIf assert['equals'] is specified, raises error if\nassert.this != assert.equals.\n\nassert['this'] & assert['equals'] both support string substitutions.\n\nReturns:\nNone\n\nRaises:\nContextError: if assert evaluates to False.", "source": "codesearchnet"} {"code": "def __setitem__(self, parameter, instr_params):\n \n\n for instruction, param_index in instr_params:\n assert isinstance(instruction, Instruction)\n assert isinstance(param_index, int)\n self._table[parameter] = instr_params", "docstring": "Sets list of Instructions that depend on Parameter.\n\nArgs:\nparameter (Parameter): the parameter to set\ninstr_params (list): List of (Instruction, int) tuples. Int is the\nparameter index at which the parameter appears in the instruction.", "source": "juraj-google-style"} {"code": "def __init__(self, dataset):\n self.dataset = dataset\n elem_spec = self.dataset.element_spec\n _check_table_initializer_element_spec(elem_spec)\n key_type = elem_spec[0].dtype\n value_type = elem_spec[1].dtype\n super(DatasetInitializer, self).__init__(key_type, value_type)", "docstring": "Creates a table initializer from a `tf.data.Dataset`.\n\nArgs:\ndataset: A `tf.data.Dataset` object that produces tuples of scalars. The\nfirst scalar is treated as a key and the second as value.\nRaises: ValueError if `dataset` doesn't conform to specifications.\nReturns: A `DatasetInitializer` object", "source": "github-repos"} {"code": "def load_ems(self, modules_paths: List[str]):\n \n all_em_lst = []\n if modules_paths:\n for modules_path in modules_paths:\n em_lst = []\n try:\n for file_name in os.listdir(modules_path):\n if file_name.startswith(\"em_\") and file_name.endswith(\".py\"):\n sys.path.append(modules_path) \n this_module = importlib.import_module(file_name[:-3])\n for em in self.classes_in_module(this_module):\n em_lst.append(em(self))\n except:\n self.log(\"Error when loading etk modules from \" + modules_path, \"error\")\n raise NotGetETKModuleError(\"Wrong file path for ETK modules\")\n all_em_lst += em_lst\n\n try:\n all_em_lst = self.topological_sort(all_em_lst)\n except Exception:\n self.log(\"Topological sort for ETK modules fails\", \"error\")\n raise NotGetETKModuleError(\"Topological sort for ETK modules fails\")\n\n \n \n \n return all_em_lst", "docstring": "Load all extraction modules from the path\n\nArgs:\nmodules_path: str\n\nReturns:", "source": "juraj-google-style"} {"code": "def install(path, restart=False):\n cmd = ['wusa.exe', path, '/quiet']\n if restart:\n cmd.append('/forcerestart')\n else:\n cmd.append('/norestart')\n ret_code = __salt__['cmd.retcode'](cmd, ignore_retcode=True)\n file_name = os.path.basename(path)\n errors = {2359302: '{0} is already installed'.format(file_name), 87: 'Unknown error'}\n if (ret_code in errors):\n raise CommandExecutionError(errors[ret_code])\n elif ret_code:\n raise CommandExecutionError('Unknown error: {0}'.format(ret_code))\n return True", "docstring": "Install a KB from a .msu file.\n\nArgs:\n\npath (str):\nThe full path to the msu file to install\n\nrestart (bool):\n``True`` to force a restart if required by the installation. Adds\nthe ``/forcerestart`` switch to the ``wusa.exe`` command. ``False``\nwill add the ``/norestart`` switch instead. Default is ``False``\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nRaise:\nCommandExecutionError: If the package is already installed or an error\nis encountered\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' wusa.install C:/temp/KB123456.msu", "source": "codesearchnet"} {"code": "def random_mixed_density_matrix(num_qubits, num_mixtures=5):\n pre_probs = tf.random.uniform([num_mixtures], 1e-09)\n mixture_probabilities = pre_probs / tf.reduce_sum(pre_probs)\n random_unitary = random_unitary_matrix(num_qubits)\n dim = 2 ** num_qubits\n final_state = tf.zeros([dim, dim], tf.complex128)\n for i in range(num_mixtures):\n pure_state = tf.one_hot(i, dim, 1.0, 0.0, 0, tf.complex128)\n evolved_pure_state = tf.linalg.matvec(random_unitary, pure_state)\n adjoint_evolved_pure_state = tf.squeeze(tf.linalg.adjoint(tf.expand_dims(evolved_pure_state, 0)))\n final_state = final_state + tf.cast(mixture_probabilities[i], tf.complex128) * tf.einsum('i,j->ij', evolved_pure_state, adjoint_evolved_pure_state)\n return (final_state, mixture_probabilities)", "docstring": "Returns a random pure density matrix.\n\nApplies a common random unitary to `num_mixtures` orthogonal states, then\nmixes them with random weights.\n\nArgs:\nnum_qubits: Number of qubits on which the matrix acts.\nnum_mixtures: The number of orthogonal pure states to mix.\n\nReturns:\nfinal_state: The mixed density matrix.\nmixture_probabilities: The probability of each state in the mixture.", "source": "github-repos"} {"code": "def run(coro, loop=None):\n loop = (loop or asyncio.get_event_loop())\n return loop.run_until_complete(coro)", "docstring": "Convenient shortcut alias to ``loop.run_until_complete``.\n\nArguments:\ncoro (coroutine): coroutine object to schedule.\nloop (asyncio.BaseEventLoop): optional event loop to use.\nDefaults to: ``asyncio.get_event_loop()``.\n\nReturns:\nmixed: returned value by coroutine.\n\nUsage::\n\nasync def mul_2(num):\nreturn num * 2\n\npaco.run(mul_2(4))\n# => 8", "source": "codesearchnet"} {"code": "def user_agent_detail(self, **kwargs):\n \n path = '%s/%s/user_agent_detail' % (self.manager.path, self.get_id())\n return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "Get the user agent detail.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the server cannot perform the request", "source": "juraj-google-style"} {"code": "def get_ip_address_info(ip_address, cache=None, nameservers=None, timeout=2.0, parallel=False):\n ip_address = ip_address.lower()\n if cache:\n info = cache.get(ip_address, None)\n if info:\n return info\n info = OrderedDict()\n info['ip_address'] = ip_address\n reverse_dns = get_reverse_dns(ip_address, nameservers=nameservers, timeout=timeout)\n country = get_ip_address_country(ip_address, parallel=parallel)\n info['country'] = country\n info['reverse_dns'] = reverse_dns\n info['base_domain'] = None\n if (reverse_dns is not None):\n base_domain = get_base_domain(reverse_dns)\n info['base_domain'] = base_domain\n return info", "docstring": "Returns reverse DNS and country information for the given IP address\n\nArgs:\nip_address (str): The IP address to check\ncache (ExpiringDict): Cache storage\nnameservers (list): A list of one or more nameservers to use\n(Cloudflare's public DNS resolvers by default)\ntimeout (float): Sets the DNS timeout in seconds\nparallel (bool): parallel processing\n\nReturns:\nOrderedDict: ``ip_address``, ``reverse_dns``", "source": "codesearchnet"} {"code": "def attention_image_summary(attn, image_shapes=None):\n attn = tf.cast(attn, tf.float32)\n num_heads = common_layers.shape_list(attn)[1]\n image = tf.transpose(attn, [0, 2, 3, 1])\n image = tf.pow(image, 0.2)\n image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod((- num_heads), 3)]])\n image = split_last_dimension(image, 3)\n image = tf.reduce_max(image, 4)\n if (image_shapes is not None):\n if (len(image_shapes) == 4):\n (q_rows, q_cols, m_rows, m_cols) = list(image_shapes)\n image = tf.reshape(image, [(- 1), q_rows, q_cols, m_rows, m_cols, 3])\n image = tf.transpose(image, [0, 1, 3, 2, 4, 5])\n image = tf.reshape(image, [(- 1), (q_rows * m_rows), (q_cols * m_cols), 3])\n else:\n assert (len(image_shapes) == 6)\n (q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels) = list(image_shapes)\n image = tf.reshape(image, [(- 1), q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3])\n image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7])\n image = tf.reshape(image, [(- 1), ((q_rows * m_rows) * q_channnels), ((q_cols * m_cols) * m_channels), 3])\n tf.summary.image('attention', image, max_outputs=1)", "docstring": "Compute color image summary.\n\nArgs:\nattn: a Tensor with shape [batch, num_heads, query_length, memory_length]\nimage_shapes: optional tuple of integer scalars.\nIf the query positions and memory positions represent the\npixels of flattened images, then pass in their dimensions:\n(query_rows, query_cols, memory_rows, memory_cols).\nIf the query positions and memory positions represent the\npixels x channels of flattened images, then pass in their dimensions:\n(query_rows, query_cols, query_channels,\nmemory_rows, memory_cols, memory_channels).", "source": "codesearchnet"} {"code": "def task(*args, **kwargs):\n func = None\n if ((len(args) == 1) and callable(args[0])):\n func = args[0]\n if (not kwargs):\n service = 'lambda'\n lambda_function_name_arg = None\n aws_region_arg = None\n else:\n service = kwargs.get('service', 'lambda')\n lambda_function_name_arg = kwargs.get('remote_aws_lambda_function_name')\n aws_region_arg = kwargs.get('remote_aws_region')\n capture_response = kwargs.get('capture_response', False)\n\n def func_wrapper(func):\n task_path = get_func_task_path(func)\n\n @wraps(func)\n def _run_async(*args, **kwargs):\n \"\\n This is the wrapping async function that replaces the function\\n that is decorated with @task.\\n Args:\\n These are just passed through to @task's func\\n\\n Assuming a valid service is passed to task() and it is run\\n inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),\\n it dispatches the function to be run through the service variable.\\n Otherwise, it runs the task synchronously.\\n\\n Returns:\\n In async mode, the object returned includes state of the dispatch.\\n For instance\\n\\n When outside of Lambda, the func passed to @task is run and we\\n return the actual value.\\n \"\n lambda_function_name = (lambda_function_name_arg or os.environ.get('AWS_LAMBDA_FUNCTION_NAME'))\n aws_region = (aws_region_arg or os.environ.get('AWS_REGION'))\n if ((service in ASYNC_CLASSES) and lambda_function_name):\n send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response).send(task_path, args, kwargs)\n return send_result\n else:\n return func(*args, **kwargs)\n update_wrapper(_run_async, func)\n _run_async.service = service\n _run_async.sync = func\n return _run_async\n return (func_wrapper(func) if func else func_wrapper)", "docstring": "Async task decorator so that running\n\nArgs:\nfunc (function): the function to be wrapped\nFurther requirements:\nfunc must be an independent top-level function.\ni.e. not a class method or an anonymous function\nservice (str): either 'lambda' or 'sns'\nremote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task\nremote_aws_region (str): the name of a remote region to make lambda/sns calls against\n\nReturns:\nA replacement function that dispatches func() to\nrun asynchronously through the service in question", "source": "codesearchnet"} {"code": "def __init__(self, variant_tensor):\n self._variant_tensor_attr = variant_tensor\n self._graph_attr = ops.get_default_graph()\n self._options_attr = options_lib.Options()\n for input_dataset in self._inputs():\n input_options = None\n if isinstance(input_dataset, data_types.DatasetV1):\n if hasattr(input_dataset, '_dataset'):\n if not isinstance(input_dataset._dataset, data_types.DatasetV2):\n raise TypeError(f'Each input of dataset {type(self)} should be a subclass of `tf.data.Dataset` but encountered {type(input_dataset._dataset)}.')\n input_options = input_dataset._dataset._options_attr\n elif isinstance(input_dataset, data_types.DatasetV2):\n input_options = input_dataset._options_attr\n else:\n raise TypeError(f'Each input of dataset {type(self)} should be a subclass of `tf.data.Dataset` but encountered {type(input_dataset)}.')\n if input_options is not None:\n self._options_attr = self._options_attr.merge(input_options)\n self._options_attr._set_mutable(False)", "docstring": "Creates a DatasetV2 object.\n\nThis is a difference between DatasetV1 and DatasetV2. DatasetV1 does not\ntake anything in its constructor whereas in the DatasetV2, we expect\nsubclasses to create a variant_tensor and pass it in to the super() call.\n\nArgs:\nvariant_tensor: A DT_VARIANT tensor that represents the dataset.", "source": "github-repos"} {"code": "def chomp(text, max_len=280, split=None):\n split = (split or '—;,.')\n while (length(text) > max_len):\n try:\n text = re.split((('[' + split) + ']'), text[::(- 1)], 1)[1][::(- 1)]\n except IndexError:\n return text\n return text", "docstring": "Shorten a string so that it fits under max_len, splitting it at 'split'.\nNot guaranteed to return a string under max_len, as it may not be possible\n\nArgs:\ntext (str): String to shorten\nmax_len (int): maximum length. default 140\nsplit (str): strings to split on (default is common punctuation: \"-;,.\")", "source": "codesearchnet"} {"code": "def _player_step_tuple(self, envs_step_tuples):\n (ob_real, reward_real, _, _) = envs_step_tuples['real_env']\n (ob_sim, reward_sim, _, _) = envs_step_tuples['sim_env']\n ob_err = absolute_hinge_difference(ob_sim, ob_real)\n ob_real_aug = self._augment_observation(ob_real, reward_real, self.cumulative_real_reward)\n ob_sim_aug = self._augment_observation(ob_sim, reward_sim, self.cumulative_sim_reward)\n ob_err_aug = self._augment_observation(ob_err, (reward_sim - reward_real), (self.cumulative_sim_reward - self.cumulative_real_reward))\n ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1)\n (_, reward, done, info) = envs_step_tuples['real_env']\n return (ob, reward, done, info)", "docstring": "Construct observation, return usual step tuple.\n\nArgs:\nenvs_step_tuples: tuples.\n\nReturns:\nStep tuple: ob, reward, done, info\nob: concatenated images [simulated observation, real observation,\ndifference], with additional informations in header.\nreward: real environment reward\ndone: True iff. envs_step_tuples['real_env'][2] is True\ninfo: real environment info", "source": "codesearchnet"} {"code": "def GetLocalPath(self, inode, cache, database):\n \n local_path = cache.GetResults('local_path')\n if not local_path:\n results = database.Query(self.LOCAL_PATH_CACHE_QUERY)\n\n cache.CacheQueryResults(\n results, 'local_path', 'child_inode_number',\n ('parent_inode_number', 'filename'))\n local_path = cache.GetResults('local_path')\n\n parent, path = local_path.get(inode, [None, None])\n\n \n \n root_value = '%local_sync_root%/'\n\n if not path:\n return root_value\n\n paths = []\n while path:\n paths.append(path)\n parent, path = local_path.get(parent, [None, None])\n\n if not paths:\n return root_value\n\n \n \n paths.reverse()\n return root_value + '/'.join(paths)", "docstring": "Return local path for a given inode.\n\nArgs:\ninode (int): inode number for the file.\ncache (SQLiteCache): cache.\ndatabase (SQLiteDatabase): database.\n\nReturns:\nstr: full path, including the filename of the given inode value.", "source": "juraj-google-style"} {"code": "def default(self, o):\n \n if isinstance(o, datetime.datetime):\n return {\"@module\": \"datetime\", \"@class\": \"datetime\",\n \"string\": o.__str__()}\n if np is not None:\n if isinstance(o, np.ndarray):\n return {\"@module\": \"numpy\",\n \"@class\": \"array\",\n \"dtype\": o.dtype.__str__(),\n \"data\": o.tolist()}\n elif isinstance(o, np.generic):\n return o.item()\n if bson is not None:\n if isinstance(o, bson.objectid.ObjectId):\n return {\"@module\": \"bson.objectid\",\n \"@class\": \"ObjectId\",\n \"oid\": str(o)}\n\n try:\n d = o.as_dict()\n if \"@module\" not in d:\n d[\"@module\"] = u\"{}\".format(o.__class__.__module__)\n if \"@class\" not in d:\n d[\"@class\"] = u\"{}\".format(o.__class__.__name__)\n if \"@version\" not in d:\n try:\n parent_module = o.__class__.__module__.split('.')[0]\n module_version = import_module(parent_module).__version__\n d[\"@version\"] = u\"{}\".format(module_version)\n except AttributeError:\n d[\"@version\"] = None\n return d\n except AttributeError:\n return json.JSONEncoder.default(self, o)", "docstring": "Overriding default method for JSON encoding. This method does two\nthings: (a) If an object has a to_dict property, return the to_dict\noutput. (b) If the @module and @class keys are not in the to_dict,\nadd them to the output automatically. If the object has no to_dict\nproperty, the default Python json encoder default method is called.\n\nArgs:\no: Python object.\n\nReturn:\nPython dict representation.", "source": "juraj-google-style"} {"code": "def cleanup(self):\n current = self.join('current')\n if (not os.path.exists(current)):\n LOGGER.debug('found broken current symlink, removing: %s', current)\n os.unlink(self.join('current'))\n self.current = None\n try:\n self._update_current()\n except PrefixNotFound:\n if (not os.listdir(self.path)):\n LOGGER.debug('workdir is empty, removing %s', self.path)\n os.rmdir(self.path)\n else:\n raise MalformedWorkdir('Unable to find any prefixes in {0}, but the directory looks malformed. Try deleting it manually.'.format(self.path))", "docstring": "Attempt to set a new current symlink if it is broken. If no other\nprefixes exist and the workdir is empty, try to delete the entire\nworkdir.\n\nRaises:\n:exc:`~MalformedWorkdir`: if no prefixes were found, but the\nworkdir is not empty.", "source": "codesearchnet"} {"code": "def do_check_pep8(files, status):\n for file_name in files:\n args = ['flake8', '--max-line-length=120', '{0}'.format(file_name)]\n output = run(*args)\n if output:\n status.append('Python PEP8/Flake8: {0}: {1}'.format(file_name, output))\n return status", "docstring": "Run the python pep8 tool against the filst of supplied files.\nAppend any linting errors to the returned status list\n\nArgs:\nfiles (str): list of files to run pep8 against\nstatus (list): list of pre-receive check failures to eventually print\nto the user\n\nReturns:\nstatus list of current pre-redeive check failures. Might be an empty\nlist.", "source": "codesearchnet"} {"code": "def update_renames_v2(output_file_path):\n function_renames = collect_function_renames()\n constant_renames = collect_constant_renames()\n all_renames = function_renames.union(constant_renames)\n manual_renames = all_renames_v2.manual_symbol_renames\n rename_lines = [get_rename_line(name, canonical_name) for name, canonical_name in all_renames if 'tf.' + name not in manual_renames]\n renames_file_text = '%srenames = {\\n%s\\n}\\n' % (_FILE_HEADER, ',\\n'.join(sorted(rename_lines)))\n file_io.write_string_to_file(output_file_path, renames_file_text)", "docstring": "Writes a Python dictionary mapping deprecated to canonical API names.\n\nArgs:\noutput_file_path: File path to write output to. Any existing contents\nwould be replaced.", "source": "github-repos"} {"code": "def wait_for_postgres(database, host, port, username, password):\n connecting_string = 'Checking for PostgreSQL...'\n if (port is not None):\n port = int(port)\n while True:\n try:\n logger.info(connecting_string)\n connection = psycopg2.connect(database=database, host=host, port=port, user=username, password=password, connect_timeout=3)\n connection.close()\n logger.info('PostgreSQL is running!')\n break\n except psycopg2.OperationalError:\n time.sleep(1)", "docstring": "Waits for PostgreSQL database to be up\n\nArgs:\ndatabase (Optional[str]): Database name\nhost (Optional[str]): Host where database is located\nport (Union[int, str, None]): Database port\nusername (Optional[str]): Username to log into database\npassword (Optional[str]): Password to log into database\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def power(self, n):\n \n if n > 0:\n return super().power(n)\n return Stinespring(SuperOp(self).power(n))", "docstring": "The matrix power of the channel.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nStinespring: the matrix power of the SuperOp converted to a\nStinespring channel.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "juraj-google-style"} {"code": "def cubic_lattice( a, b, c, spacing ):\n \n grid = np.array( list( range( 1, a * b * c + 1 ) ) ).reshape( a, b, c, order='F' )\n it = np.nditer( grid, flags=[ 'multi_index' ] )\n sites = []\n while not it.finished:\n x, y, z = it.multi_index\n r = np.array( [ x, y, z ] ) * spacing\n neighbours = [ np.roll( grid, +1, axis=0 )[x,y,z],\n np.roll( grid, -1, axis=0 )[x,y,z],\n np.roll( grid, +1, axis=1 )[x,y,z],\n np.roll( grid, -1, axis=1 )[x,y,z],\n np.roll( grid, +1, axis=2 )[x,y,z],\n np.roll( grid, -1, axis=2 )[x,y,z] ]\n sites.append( lattice_site.Site( int( it[0] ), r, neighbours, 0.0, 'L' ) )\n it.iternext()\n return lattice.Lattice( sites, cell_lengths = np.array( [ a, b, c ] ) * spacing )", "docstring": "Generate a cubic lattice.\n\nArgs:\na (Int): Number of lattice repeat units along x.\nb (Int): Number of lattice repeat units along y.\nc (Int): Number of lattice repeat units along z.\nspacing (Float): Distance between lattice sites.\n\nReturns:\n(Lattice): The new lattice", "source": "juraj-google-style"} {"code": "def get_ituz(self, callsign, timestamp=timestamp_now):\n return self.get_all(callsign, timestamp)[const.ITUZ]", "docstring": "Returns ITU Zone of a callsign\n\nArgs:\ncallsign (str): Amateur Radio callsign\ntimestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)\n\nReturns:\nint: containing the callsign's CQ Zone\n\nRaises:\nKeyError: No ITU Zone found for callsign\n\nNote:\nCurrently, only Country-files.com lookup database contains ITU Zones", "source": "codesearchnet"} {"code": "def stop_gradient(variable):\n if any_symbolic_tensors((variable,)):\n return StopGradient().symbolic_call(variable)\n return backend.core.stop_gradient(variable)", "docstring": "Stops gradient computation.\n\nArgs:\nvariable: A tensor variable for which the gradient\ncomputation is to be disabled.\n\nReturns:\nThe variable with gradient computation disabled.\n\nExamples:\n\n>>> var = keras.backend.convert_to_tensor(\n... [1., 2., 3.],\n... dtype=\"float32\"\n... )\n>>> var = keras.ops.stop_gradient(var)", "source": "github-repos"} {"code": "def _update_from_body(self, destination, source):\n for (key, value) in source.iteritems():\n destination_value = destination.get(key)\n if (isinstance(value, dict) and isinstance(destination_value, dict)):\n self._update_from_body(destination_value, value)\n else:\n destination[key] = value", "docstring": "Updates the dictionary for an API payload with the request body.\n\nThe values from the body should override those already in the payload, but\nfor nested fields (message objects) the values can be combined\nrecursively.\n\nArgs:\ndestination: A dictionary containing an API payload parsed from the\npath and query parameters in a request.\nsource: A dictionary parsed from the body of the request.", "source": "codesearchnet"} {"code": "def parseTree(self, root, state: ParseState) -> List[Dict]:\n if (root.tag in self.AST_TAG_HANDLERS):\n return self.AST_TAG_HANDLERS[root.tag](root, state)\n elif (root.tag in self.libRtns):\n return self.process_libRtn(root, state)\n else:\n prog = []\n for node in root:\n prog += self.parseTree(node, state)\n return prog", "docstring": "Parses the XML ast tree recursively to generate a JSON AST\nwhich can be ingested by other scripts to generate Python\nscripts.\n\nArgs:\nroot: The current root of the tree.\nstate: The current state of the tree defined by an object of the\nParseState class.\n\nReturns:\nast: A JSON ast that defines the structure of the Fortran file.", "source": "codesearchnet"} {"code": "def register_symbolic_tensor_type(cls):\n global _user_convertible_tensor_types\n if cls not in _user_convertible_tensor_types:\n keras_tensor.register_keras_tensor_specialization(cls, keras_tensor.UserRegisteredTypeKerasTensor)\n _user_convertible_tensor_types.add(cls)", "docstring": "Allows users to specify types regarded as symbolic `Tensor`s.\n\nUsed in conjunction with `tf.register_tensor_conversion_function`, calling\n`tf.keras.__internal__.utils.register_symbolic_tensor_type(cls)`\nallows non-`Tensor` objects to be plumbed through Keras layers.\n\nExample:\n\n```python\n# One-time setup.\nclass Foo(object):\ndef __init__(self, input_):\nself._input = input_\ndef value(self):\nreturn tf.constant(42.)\n\ntf.register_tensor_conversion_function(\nFoo, lambda x, *args, **kwargs: x.value())\n\ntf.keras.__internal__.utils.register_symbolic_tensor_type(Foo)\n\n# User-land.\nlayer = tf.keras.layers.Lambda(lambda input_: Foo(input_))\n```\n\nArgs:\ncls: A `class` type which shall be regarded as a symbolic `Tensor`.", "source": "github-repos"} {"code": "def run_ops(state, serial=False, no_wait=False):\n state.deploying = True\n if serial:\n _run_serial_ops(state)\n elif no_wait:\n _run_no_wait_ops(state)\n for op_hash in state.get_op_order():\n _run_single_op(state, op_hash)", "docstring": "Runs all operations across all servers in a configurable manner.\n\nArgs:\nstate (``pyinfra.api.State`` obj): the deploy state to execute\nserial (boolean): whether to run operations host by host\nno_wait (boolean): whether to wait for all hosts between operations", "source": "codesearchnet"} {"code": "def _stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):\n if stop_logger:\n trial.close_logger()\n if error:\n self.set_status(trial, Trial.ERROR)\n else:\n self.set_status(trial, Trial.TERMINATED)\n try:\n trial.write_error_log(error_msg)\n if (hasattr(trial, 'runner') and trial.runner):\n if ((not error) and self._reuse_actors and (self._cached_actor is None)):\n logger.debug('Reusing actor for {}'.format(trial.runner))\n self._cached_actor = trial.runner\n else:\n logger.info('Destroying actor for trial {}. If your trainable is slow to initialize, consider setting reuse_actors=True to reduce actor creation overheads.'.format(trial))\n trial.runner.stop.remote()\n trial.runner.__ray_terminate__.remote()\n except Exception:\n logger.exception('Error stopping runner for Trial %s', str(trial))\n self.set_status(trial, Trial.ERROR)\n finally:\n trial.runner = None", "docstring": "Stops this trial.\n\nStops this trial, releasing all allocating resources. If stopping the\ntrial fails, the run will be marked as terminated in error, but no\nexception will be thrown.\n\nArgs:\nerror (bool): Whether to mark this trial as terminated in error.\nerror_msg (str): Optional error message.\nstop_logger (bool): Whether to shut down the trial logger.", "source": "codesearchnet"} {"code": "def count_up_to(self, limit):\n return state_ops.count_up_to(self._variable, limit=limit)", "docstring": "Increments this variable until it reaches `limit`.\n\nWhen that Op is run it tries to increment the variable by `1`. If\nincrementing the variable would bring it above `limit` then the Op raises\nthe exception `OutOfRangeError`.\n\nIf no error is raised, the Op outputs the value of the variable before\nthe increment.\n\nThis is essentially a shortcut for `count_up_to(self, limit)`.\n\nArgs:\nlimit: value at which incrementing the variable raises an error.\n\nReturns:\nA `Tensor` that will hold the variable value before the increment. If no\nother Op modifies this variable, the values produced will all be\ndistinct.", "source": "github-repos"} {"code": "def _ParseShellItem(self, parser_mediator, shell_item):\n \n path_segment = self._ParseShellItemPathSegment(shell_item)\n self._path_segments.append(path_segment)\n\n event_data = shell_item_events.ShellItemFileEntryEventData()\n event_data.origin = self._origin\n event_data.shell_item_path = self.CopyToPath()\n\n if isinstance(shell_item, pyfwsi.file_entry):\n event_data.name = shell_item.name\n\n for extension_block in shell_item.extension_blocks:\n if isinstance(extension_block, pyfwsi.file_entry_extension):\n long_name = extension_block.long_name\n localized_name = extension_block.localized_name\n file_reference = extension_block.file_reference\n if file_reference:\n file_reference = '{0:d}-{1:d}'.format(\n file_reference & 0xffffffffffff, file_reference >> 48)\n\n event_data.file_reference = file_reference\n event_data.localized_name = localized_name\n event_data.long_name = long_name\n\n fat_date_time = extension_block.get_creation_time_as_integer()\n if fat_date_time != 0:\n date_time = dfdatetime_fat_date_time.FATDateTime(\n fat_date_time=fat_date_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n fat_date_time = extension_block.get_access_time_as_integer()\n if fat_date_time != 0:\n date_time = dfdatetime_fat_date_time.FATDateTime(\n fat_date_time=fat_date_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n fat_date_time = shell_item.get_modification_time_as_integer()\n if fat_date_time != 0:\n date_time = dfdatetime_fat_date_time.FATDateTime(\n fat_date_time=fat_date_time)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a shell item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nshell_item (pyfwsi.item): shell item.", "source": "juraj-google-style"} {"code": "def get_max_bond_lengths(structure, el_radius_updates=None):\n \n \n jmnn = JmolNN(el_radius_updates=el_radius_updates)\n\n bonds_lens = {}\n els = sorted(structure.composition.elements, key=lambda x: x.Z)\n\n for i1 in range(len(els)):\n for i2 in range(len(els) - i1):\n bonds_lens[els[i1], els[i1 + i2]] = jmnn.get_max_bond_distance(\n els[i1].symbol, els[i1 + i2].symbol)\n\n return bonds_lens", "docstring": "Provides max bond length estimates for a structure based on the JMol\ntable and algorithms.\n\nArgs:\nstructure: (structure)\nel_radius_updates: (dict) symbol->float to update atomic radii\n\nReturns: (dict) - (Element1, Element2) -> float. The two elements are\nordered by Z.", "source": "juraj-google-style"} {"code": "def get_dataset_end_date_as_datetime(self):\n dataset_date = self.data.get('dataset_date', None)\n if dataset_date:\n if ('-' in dataset_date):\n dataset_date = dataset_date.split('-')[1]\n return datetime.strptime(dataset_date, '%m/%d/%Y')\n return None", "docstring": "Get dataset end date as datetime.datetime object.\n\nReturns:\nOptional[datetime.datetime]: Dataset date in datetime object or None if no date is set", "source": "codesearchnet"} {"code": "def __init__(self, comma_compat=False):\n \n self._comma_compat = comma_compat\n name = 'whitespace or comma' if self._comma_compat else 'whitespace'\n super(WhitespaceSeparatedListParser, self).__init__(None, name)", "docstring": "Initializer.\n\nArgs:\ncomma_compat: bool, whether to support comma as an additional separator.\nIf False then only whitespace is supported. This is intended only for\nbackwards compatibility with flags that used to be comma-separated.", "source": "juraj-google-style"} {"code": "def remove_user(self, group, username):\n \n try:\n self.lookup_id(group)\n except ldap_tools.exceptions.InvalidResult as err: \n raise err from None\n\n operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]}\n self.client.modify(self.__distinguished_name(group), operation)", "docstring": "Remove a user from the specified LDAP group.\n\nArgs:\ngroup: Name of group to update\nusername: Username of user to remove\n\nRaises:\nldap_tools.exceptions.InvalidResult:\nResults of the query were invalid. The actual exception raised\ninherits from InvalidResult. See #lookup_id for more info.", "source": "juraj-google-style"} {"code": "def get_status_tree(root_pipeline_id):\n \n root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id)\n root_pipeline_record = db.get(root_pipeline_key)\n if root_pipeline_record is None:\n raise PipelineStatusError(\n 'Could not find pipeline ID \"%s\"' % root_pipeline_id)\n\n \n \n actual_root_key = _PipelineRecord.root_pipeline.get_value_for_datastore(\n root_pipeline_record)\n if actual_root_key != root_pipeline_key:\n root_pipeline_key = actual_root_key\n root_pipeline_id = root_pipeline_key.id_or_name()\n root_pipeline_record = db.get(root_pipeline_key)\n if not root_pipeline_record:\n raise PipelineStatusError(\n 'Could not find pipeline ID \"%s\"' % root_pipeline_id)\n\n \n queries = {}\n for model in (_PipelineRecord, _SlotRecord, _BarrierRecord, _StatusRecord):\n queries[model] = model.all().filter(\n 'root_pipeline =', root_pipeline_key).run(batch_size=1000)\n\n found_pipeline_dict = dict(\n (stage.key(), stage) for stage in queries[_PipelineRecord])\n found_slot_dict = dict(\n (slot.key(), slot) for slot in queries[_SlotRecord])\n found_barrier_dict = dict(\n (barrier.key(), barrier) for barrier in queries[_BarrierRecord])\n found_status_dict = dict(\n (status.key(), status) for status in queries[_StatusRecord])\n\n \n \n valid_pipeline_keys = set([root_pipeline_key])\n slot_filler_dict = {} \n expand_stack = [root_pipeline_record]\n while expand_stack:\n old_stack = expand_stack\n expand_stack = []\n for pipeline_record in old_stack:\n for child_pipeline_key in pipeline_record.fanned_out:\n \n \n child_pipeline_record = found_pipeline_dict.get(child_pipeline_key)\n if child_pipeline_record is None:\n raise PipelineStatusError(\n 'Pipeline ID \"%s\" points to child ID \"%s\" which does not exist.'\n % (pipeline_record.key().name(), child_pipeline_key.name()))\n expand_stack.append(child_pipeline_record)\n valid_pipeline_keys.add(child_pipeline_key)\n\n \n \n \n child_outputs = child_pipeline_record.params['output_slots']\n for output_slot_key in child_outputs.itervalues():\n slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key\n\n output = {\n 'rootPipelineId': root_pipeline_id,\n 'slots': {},\n 'pipelines': {},\n }\n\n for pipeline_key in found_pipeline_dict.keys():\n if pipeline_key not in valid_pipeline_keys:\n continue\n output['pipelines'][pipeline_key.name()] = _get_internal_status(\n pipeline_key=pipeline_key,\n pipeline_dict=found_pipeline_dict,\n slot_dict=found_slot_dict,\n barrier_dict=found_barrier_dict,\n status_dict=found_status_dict)\n\n for slot_key, filler_pipeline_key in slot_filler_dict.iteritems():\n output['slots'][str(slot_key)] = _get_internal_slot(\n slot_key=slot_key,\n filler_pipeline_key=filler_pipeline_key,\n slot_dict=found_slot_dict)\n\n return output", "docstring": "Gets the full status tree of a pipeline.\n\nArgs:\nroot_pipeline_id: The pipeline ID to get status for.\n\nReturns:\nDictionary with the keys:\nrootPipelineId: The ID of the root pipeline.\nslots: Mapping of slot IDs to result of from _get_internal_slot.\npipelines: Mapping of pipeline IDs to result of _get_internal_status.\n\nRaises:\nPipelineStatusError if any input is bad.", "source": "juraj-google-style"} {"code": "def _prefix_from_ip_string(self, ip_str):\n \n \n try:\n ip_int = self._ip_int_from_string(ip_str)\n except AddressValueError:\n self._report_invalid_netmask(ip_str)\n\n \n \n \n try:\n return self._prefix_from_ip_int(ip_int)\n except ValueError:\n pass\n\n \n ip_int ^= self._ALL_ONES\n try:\n return self._prefix_from_ip_int(ip_int)\n except ValueError:\n self._report_invalid_netmask(ip_str)", "docstring": "Turn a netmask/hostmask string into a prefix length\n\nArgs:\nip_str: The netmask/hostmask to be converted\n\nReturns:\nAn integer, the prefix length.\n\nRaises:\nNetmaskValueError: If the input is not a valid netmask/hostmask", "source": "juraj-google-style"} {"code": "def set_servo_position(self, goalposition, goaltime, led):\n goalposition_msb = (int(goalposition) >> 8)\n goalposition_lsb = (int(goalposition) & 255)\n data = []\n data.append(12)\n data.append(self.servoid)\n data.append(I_JOG_REQ)\n data.append(goalposition_lsb)\n data.append(goalposition_msb)\n data.append(led)\n data.append(self.servoid)\n data.append(goaltime)\n send_data(data)", "docstring": "Set the position of Herkulex\n\nEnable torque using torque_on function before calling this\n\nArgs:\n\ngoalposition (int): The desired position, min-0 & max-1023\ngoaltime (int): the time taken to move from present\nposition to goalposition\nled (int): the LED color\n0x00 LED off\n0x04 GREEN\n0x08 BLUE\n0x10 RED", "source": "codesearchnet"} {"code": "def aggregate_repo(repo, args, sem, err_queue):\n try:\n logger.debug(('%s' % repo))\n dirmatch = args.dirmatch\n if (not match_dir(repo.cwd, dirmatch)):\n logger.info('Skip %s', repo.cwd)\n return\n if (args.command == 'aggregate'):\n repo.aggregate()\n if args.do_push:\n repo.push()\n elif (args.command == 'show-closed-prs'):\n repo.show_closed_prs()\n elif (args.command == 'show-all-prs'):\n repo.show_all_prs()\n except Exception:\n err_queue.put_nowait(sys.exc_info())\n finally:\n sem.release()", "docstring": "Aggregate one repo according to the args.\n\nArgs:\nrepo (Repo): The repository to aggregate.\nargs (argparse.Namespace): CLI arguments.", "source": "codesearchnet"} {"code": "def send(self, value):\n if ((not self.block) and (self._stdin is not None)):\n self.writer.write('{}\\n'.format(value))\n return self\n else:\n raise TypeError(NON_BLOCKING_ERROR_MESSAGE)", "docstring": "Send text to stdin. Can only be used on non blocking commands\n\nArgs:\nvalue (str): the text to write on stdin\nRaises:\nTypeError: If command is blocking\nReturns:\nShellCommand: return this ShellCommand instance for chaining", "source": "codesearchnet"} {"code": "def add_argument_to(self, parser):\n from devassistant.cli.devassistant_argparse import DefaultIffUsedActionFactory\n if isinstance(self.kwargs.get('action', ''), list):\n if (self.kwargs['action'][0] == 'default_iff_used'):\n self.kwargs['action'] = DefaultIffUsedActionFactory.generate_action(self.kwargs['action'][1])\n self.kwargs.pop('preserved', None)\n try:\n parser.add_argument(*self.flags, **self.kwargs)\n except Exception as ex:\n problem = \"Error while adding argument '{name}': {error}\".format(name=self.name, error=repr(ex))\n raise exceptions.ExecutionException(problem)", "docstring": "Used by cli to add this as an argument to argparse parser.\n\nArgs:\nparser: parser to add this argument to", "source": "codesearchnet"} {"code": "def convert_sum(\n params, w_name, scope_name, inputs, layers, weights, names\n):\n \n print('Converting Sum ...')\n\n def target_layer(x):\n import keras.backend as K\n return K.sum(x)\n\n lambda_layer = keras.layers.Lambda(target_layer)\n layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert sum.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"} {"code": "def __init__(self, transitions):\n \n self._transitions = {}\n self._order = []\n for trdef in transitions:\n self._transitions[trdef.name] = trdef\n self._order.append(trdef.name)", "docstring": "Create a TransitionList.\n\nArgs:\ntransitions (list of (name, source, target) tuple): the transitions\nto include.", "source": "juraj-google-style"} {"code": "def tan(cls, x: 'TensorFluent') -> 'TensorFluent':\n \n return cls._unary_op(x, tf.tan, tf.float32)", "docstring": "Returns a TensorFluent for the tan function.\n\nArgs:\nx: The input fluent.\n\nReturns:\nA TensorFluent wrapping the tan function.", "source": "juraj-google-style"} {"code": "def WatchMetadata(self, handler, metadata_key='', recursive=True, timeout=None):\n while True:\n response = self._HandleMetadataUpdate(metadata_key=metadata_key, recursive=recursive, wait=True, timeout=timeout)\n try:\n handler(response)\n except Exception as e:\n self.logger.exception('Exception calling the response handler. %s.', e)", "docstring": "Watch for changes to the contents of the metadata server.\n\nArgs:\nhandler: callable, a function to call with the updated metadata contents.\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\ntimeout: int, timeout in seconds for returning metadata output.", "source": "codesearchnet"} {"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0) + [1]\n return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"} {"code": "def _upload_code(s3_conn, bucket, prefix, name, contents, content_hash, payload_acl):\n logger.debug('lambda: ZIP hash: %s', content_hash)\n key = '{}lambda-{}-{}.zip'.format(prefix, name, content_hash)\n if _head_object(s3_conn, bucket, key):\n logger.info('lambda: object %s already exists, not uploading', key)\n else:\n logger.info('lambda: uploading object %s', key)\n s3_conn.put_object(Bucket=bucket, Key=key, Body=contents, ContentType='application/zip', ACL=payload_acl)\n return Code(S3Bucket=bucket, S3Key=key)", "docstring": "Upload a ZIP file to S3 for use by Lambda.\n\nThe key used for the upload will be unique based on the checksum of the\ncontents. No changes will be made if the contents in S3 already match the\nexpected contents.\n\nArgs:\ns3_conn (botocore.client.S3): S3 connection to use for operations.\nbucket (str): name of the bucket to create.\nprefix (str): S3 prefix to prepend to the constructed key name for\nthe uploaded file\nname (str): desired name of the Lambda function. Will be used to\nconstruct a key name for the uploaded file.\ncontents (str): byte string with the content of the file upload.\ncontent_hash (str): md5 hash of the contents to be uploaded.\npayload_acl (str): The canned S3 object ACL to be applied to the\nuploaded payload\n\nReturns:\ntroposphere.awslambda.Code: CloudFormation Lambda Code object,\npointing to the uploaded payload in S3.\n\nRaises:\nbotocore.exceptions.ClientError: any error from boto3 is passed\nthrough.", "source": "codesearchnet"} {"code": "def GreaterThanOrEqualTo(self, value):\n \n self._awql = self._CreateSingleValueCondition(value, '>=')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"greater than or equal to\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"} {"code": "def assert_no_legacy_layers(layers):\n legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]\n if legacy_layers:\n layer_str = '\\n'.join((' ' + str(l) for l in legacy_layers))\n raise TypeError('The following are legacy tf.layers.Layers:\\n{}\\nTo use keras as a framework (for instance using the Network, Model, or Sequential classes), please use the tf.keras.layers implementation instead. (Or, if writing custom layers, subclass from tf.keras.layers rather than tf.layers)'.format(layer_str))", "docstring": "Prevent tf.layers.Layers from being used with Keras.\n\nCertain legacy layers inherit from their keras analogs; however they are\nnot supported with keras and can lead to subtle and hard to diagnose bugs.\n\nArgs:\nlayers: A list of layers to check\n\nRaises:\nTypeError: If any elements of layers are tf.layers.Layers", "source": "github-repos"} {"code": "def get_path(self, temp_ver):\n \n if temp_ver not in self:\n raise RuntimeError(\n 'Template: {} not present'.format(temp_ver.name)\n )\n return self._prefixed(temp_ver.name)", "docstring": "Get the path of the given version in this store\n\nArgs:\ntemp_ver TemplateVersion: version to look for\n\nReturns:\nstr: The path to the template version inside the store\n\nRaises:\nRuntimeError: if the template is not in the store", "source": "juraj-google-style"} {"code": "def _check_full_tensor_value(self, tensor_value, wall_time, op_type, output_slot, execution_index=None, graph_execution_trace_index=None):\n size = np.size(tensor_value)\n if not size or not np.issubdtype(tensor_value.dtype, np.floating):\n return\n is_inf = np.isinf(tensor_value)\n num_neg_inf = np.count_nonzero(np.logical_and(is_inf, np.less(tensor_value, 0.0)))\n num_pos_inf = np.count_nonzero(np.logical_and(is_inf, np.greater(tensor_value, 0.0)))\n num_nan = np.count_nonzero(np.isnan(tensor_value))\n if num_neg_inf or num_pos_inf or num_nan:\n self._alerts.append(InfNanAlert(wall_time, op_type, output_slot, size=size, num_neg_inf=num_neg_inf, num_pos_inf=num_pos_inf, num_nan=num_nan, execution_index=execution_index, graph_execution_trace_index=graph_execution_trace_index))", "docstring": "Check a full tensor value.\n\nAppends to the list of alerts if any inf or nan is found in the full tensor\nvalue.\n\nArgs:\ntensor_value: The full tensor value as a `np.ndarray`.\nwall_time: Wall timestamp for the execution event that generated the\ntensor value.\nop_type: Op type executed.\noutput_slot: The output slot of the op.\nexecution_index: Index to the top-level execution event.\ngraph_execution_trace_index: Index to the intra-graph execution trace\n(if applicable.)", "source": "github-repos"} {"code": "def power(self, n):\n if (not isinstance(n, (int, np.integer))):\n raise QiskitError('Can only power with integer powers.')\n if (self._input_dim != self._output_dim):\n raise QiskitError('Can only power with input_dim = output_dim.')\n return SuperOp(np.linalg.matrix_power(self._data, n), self.input_dims(), self.output_dims())", "docstring": "Return the compose of a QuantumChannel with itself n times.\n\nArgs:\nn (int): compute the matrix power of the superoperator matrix.\n\nReturns:\nSuperOp: the n-times composition channel as a SuperOp object.\n\nRaises:\nQiskitError: if the input and output dimensions of the\nQuantumChannel are not equal, or the power is not an integer.", "source": "codesearchnet"} {"code": "def _check_consistent_returns(self, node):\n \n \n explicit_returns = [\n _node for _node in self._return_nodes[node.name] if _node.value is not None\n ]\n if not explicit_returns:\n return\n if len(explicit_returns) == len(\n self._return_nodes[node.name]\n ) and self._is_node_return_ended(node):\n return\n self.add_message(\"inconsistent-return-statements\", node=node)", "docstring": "Check that all return statements inside a function are consistent.\n\nReturn statements are consistent if:\n- all returns are explicit and if there is no implicit return;\n- all returns are empty and if there is, possibly, an implicit return.\n\nArgs:\nnode (astroid.FunctionDef): the function holding the return statements.", "source": "juraj-google-style"} {"code": "def skip_if(condition: Union[Callable[[], bool], bool]) -> Callable[[_F], _F]:\n\n def real_skip_if(fn: _F) -> _F:\n\n def wrapper(*args, **kwargs):\n if callable(condition):\n skip = condition()\n else:\n skip = condition\n if not skip:\n return fn(*args, **kwargs)\n return wrapper\n return real_skip_if", "docstring": "Skips the decorated function if condition is or evaluates to True.\n\nArgs:\ncondition: Either an expression that can be used in \"if not condition\"\nstatement, or a callable whose result should be a boolean.\n\nReturns:\nThe wrapped function", "source": "github-repos"} {"code": "def mesh_axis_to_tensor_axis(self, mesh_ndims):\n ta2ma = self._tensor_axis_to_mesh_axis\n return tuple([(ta2ma.index(mesh_axis) if (mesh_axis in ta2ma) else None) for mesh_axis in xrange(mesh_ndims)])", "docstring": "For each mesh axis, which Tensor axis maps to it.\n\nArgs:\nmesh_ndims: int.\n\nReturns:\nTuple of optional integers, with length mesh_ndims.", "source": "codesearchnet"} {"code": "def add_maps(self, parent, root_path=\"\"):\n \n for mapsource in self.map_folders[root_path]['maps']:\n parent.append(self.get_network_link(mapsource))\n for folder in self.map_folders[root_path]['folders']:\n kml_folder_obj = kml_folder(folder)\n parent.append(kml_folder_obj)\n self.add_maps(parent=kml_folder_obj, root_path=F_SEP.join((root_path, folder)))", "docstring": "Recursively add maps in a folder hierarchy.\n\nArgs:\nparent (KMLElement): KMLElement to which we want to append child folders or maps respectively\nroot_path (str): path of 'parent'", "source": "juraj-google-style"} {"code": "def get_soap_structure(obj, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):\n Hpos = obj.get_positions()\n arrsoap = get_soap_locals(obj, Hpos, alp, bet, rCut, nMax, Lmax, crossOver, all_atomtypes=all_atomtypes, eta=eta)\n return arrsoap", "docstring": "Get the RBF basis SOAP output for atoms in a finite structure.\n\nArgs:\nobj(ase.Atoms): Atomic structure for which the SOAP output is\ncalculated.\nalp: Alphas\nbet: Betas\nrCut: Radial cutoff.\nnMax: Maximum nmber of radial basis functions\nLmax: Maximum spherical harmonics degree\ncrossOver:\nall_atomtypes: Can be used to specify the atomic elements for which to\ncalculate the output. If given the output is calculated only for the\ngiven species.\neta: The gaussian smearing width.\n\nReturns:\nnp.ndarray: SOAP output for the given structure.", "source": "codesearchnet"} {"code": "def match_main(self, text, pattern, loc):\n if ((text == None) or (pattern == None)):\n raise ValueError('Null inputs. (match_main)')\n loc = max(0, min(loc, len(text)))\n if (text == pattern):\n return 0\n elif (not text):\n return (- 1)\n elif (text[loc:(loc + len(pattern))] == pattern):\n return loc\n else:\n match = self.match_bitap(text, pattern, loc)\n return match", "docstring": "Locate the best instance of 'pattern' in 'text' near 'loc'.\n\nArgs:\ntext: The text to search.\npattern: The pattern to search for.\nloc: The location to search around.\n\nReturns:\nBest match index or -1.", "source": "codesearchnet"} {"code": "def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int:\n return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch.", "source": "github-repos"} {"code": "def __init__(self, config_files, mask_surface=True, mask_quality=True, **kwargs):\n \n self.pressure_dataset_names = defaultdict(list)\n super(NUCAPSReader, self).__init__(config_files,\n **kwargs)\n self.mask_surface = self.info.get('mask_surface', mask_surface)\n self.mask_quality = self.info.get('mask_quality', mask_quality)", "docstring": "Configure reader behavior.\n\nArgs:\nmask_surface (boolean): mask anything below the surface pressure\nmask_quality (boolean): mask anything where the `Quality_Flag` metadata is ``!= 1``.", "source": "juraj-google-style"} {"code": "def List(self, request, global_params=None):\n config = self.GetMethodConfig('List')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists existing `BuildTrigger`s. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListBuildTriggersResponse) The response message.", "source": "github-repos"} {"code": "def in_cross_replica_context():\n return _get_per_thread_mode().cross_replica_context is not None", "docstring": "Returns `True` if in a cross-replica context.\n\nSee `tf.distribute.get_replica_context` for details.\n\n```\nassert not tf.distribute.in_cross_replica_context()\nwith strategy.scope():\nassert tf.distribute.in_cross_replica_context()\n\ndef f():\nassert not tf.distribute.in_cross_replica_context()\n\nstrategy.run(f)\n```\n\nReturns:\n`True` if in a cross-replica context (`get_replica_context()` returns\n`None`), or `False` if in a replica context (`get_replica_context()` returns\nnon-`None`).", "source": "github-repos"} {"code": "def categorical_case(pmf, fns, rand=None):\n \n rand = tf.random_uniform([]) if rand is None else rand\n cmf = tf.pad(tf.cumsum(pmf), [(1, 0)])\n cmf = [cmf[i] for i in range(len(fns) + 1)]\n preds = [(rand >= a) & (rand < b) for a, b in zip(cmf[:-1], cmf[1:])]\n return tf.case(list(zip(preds, fns)), exclusive=True)", "docstring": "Returns the outputs of fns[i] with probability pmf[i].\n\nArgs:\npmf: A 1-D tensor of probabilities, the probability mass function.\nfns: A list of callables that return tensors, same length as pmf.\nrand: An optional scalar between 0.0 and 1.0, the output of an RNG.\n\nReturns:\nA tensor, the output of fns[i] with probability pmf[i].", "source": "juraj-google-style"} {"code": "def label_total_duration(self, label_list_ids=None):\n duration = collections.defaultdict(float)\n for label_list in self.label_lists.values():\n if ((label_list_ids is None) or (label_list.idx in label_list_ids)):\n for (label_value, label_duration) in label_list.label_total_duration().items():\n duration[label_value] += label_duration\n return duration", "docstring": "Return a dictionary containing the number of seconds,\nevery label-value is occurring in this utterance.\n\nArgs:\nlabel_list_ids (list): If not None, only labels from label-lists\nwith an id contained in this\nlist are considered.\n\nReturns:\ndict: A dictionary containing the number of seconds\nwith the label-value as key.", "source": "codesearchnet"} {"code": "def Normal(cls,\n mean: 'TensorFluent', variance: 'TensorFluent',\n batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:\n \n if mean.scope != variance.scope:\n raise ValueError('Normal distribution: parameters must have same scope!')\n loc = mean.tensor\n scale = tf.sqrt(variance.tensor)\n dist = tf.distributions.Normal(loc, scale)\n batch = mean.batch or variance.batch\n if not batch and batch_size is not None:\n t = dist.sample(batch_size)\n batch = True\n else:\n t = dist.sample()\n scope = mean.scope.as_list()\n return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Normal sampling op with given mean and variance.\n\nArgs:\nmean: The mean parameter of the Normal distribution.\nvariance: The variance parameter of the Normal distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Normal distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\nValueError: If parameters do not have the same scope.", "source": "juraj-google-style"} {"code": "def get_policy(self, name):\n \n\n address = _create_policy_address(name)\n policy_list_bytes = None\n\n try:\n policy_list_bytes = self._state_view.get(address=address)\n except KeyError:\n return None\n\n if policy_list_bytes is not None:\n policy_list = _create_from_bytes(policy_list_bytes,\n identity_pb2.PolicyList)\n for policy in policy_list.policies:\n if policy.name == name:\n return policy\n return None", "docstring": "Get a single Policy by name.\n\nArgs:\nname (str): The name of the Policy.\n\nReturns:\n(:obj:`Policy`) The Policy that matches the name.", "source": "juraj-google-style"} {"code": "def _maybe_set_tnp_casting(xnp: numpy_utils.NpModule) -> None:\n if not numpy_utils.lazy.has_tf or xnp is not numpy_utils.lazy.tnp:\n return\n if not numpy_utils.lazy.is_tnp_enabled:\n from tensorflow.python.ops.numpy_ops import np_dtypes\n if not np_dtypes.is_prefer_float32():\n np_dtypes.set_prefer_float32(True)\n msg = epy.dedent('\\n WARNING: Using array types for TF but without numpy mode enabled. It\\n is recommended to activate numpy mode as:\\n\\n import tensorflow.experimental.numpy as tnp\\n tnp.experimental_enable_numpy_behavior(prefer_float32=True)\\n ')\n print(msg)", "docstring": "If TF numpy mode is not set, make sure `tnp.asarray(1.)` is `tf.float32`.\n\nIf user uses TF without numpy mode, it will create casting issues (for\nexample: `tf.float64 + tf.float32` will raise an error).\nTo limit the errors encountered, we set `tnp.asarray(1.)` to `tf.float32`\ninstead of `tf.float64`.\n\nIf numpy mode is already activated, then no need to do anything, as\n`tf.float64 + tf.float32` will support auto-casting, like Jax and Numpy.\n\nArgs:\nxnp: numpy module.", "source": "github-repos"} {"code": "def GetNetworkAddressWithTime(self):\n if ((self.port is not None) and (self.host is not None) and (self.Version is not None)):\n return NetworkAddressWithTime(self.host, self.port, self.Version.Services)\n return None", "docstring": "Get a network address object.\n\nReturns:\nNetworkAddressWithTime: if we have a connection to a node.\nNone: otherwise.", "source": "codesearchnet"} {"code": "def write(self, obj: (BioCDocument or BioCPassage or BioCSentence)):\n if ((self.level == DOCUMENT) and (not isinstance(obj, BioCDocument))):\n raise ValueError\n if ((self.level == PASSAGE) and (not isinstance(obj, BioCPassage))):\n raise ValueError\n if ((self.level == SENTENCE) and (not isinstance(obj, BioCSentence))):\n raise ValueError\n self.writer.write(BioCJSONEncoder().default(obj))", "docstring": "Encode and write a single object.\n\nArgs:\nobj: an instance of BioCDocument, BioCPassage, or BioCSentence\n\nReturns:", "source": "codesearchnet"} {"code": "def get_metrics(weights: Array, dataset: Dataset) -> Metrics:\n pred = dataset.X.dot(weights) > 0\n actual = dataset.Y\n tp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 1))\n tn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 0))\n fp: int = jnp.sum(jnp.logical_and(pred == 1, actual == 0))\n fn: int = jnp.sum(jnp.logical_and(pred == 0, actual == 1))\n loss: float = cross_entropy_loss(weights, dataset.X, dataset.Y)\n accuracy = (tp + tn) / (tp + tn + fp + fn)\n precision = tp / (tp + fp + EPSILON)\n recall = tp / (tp + fn + EPSILON)\n fscore = 2 * precision * recall / (precision + recall + EPSILON)\n return Metrics(tp=tp, tn=tn, fp=fp, fn=fn, accuracy=accuracy, precision=precision, recall=recall, fscore=fscore, loss=loss)", "docstring": "Gets evaluation metrics from the learned weight vector and the dataset.\n\nArgs:\nweights: A weight vector.\ndataset: A dataset.\n\nReturns:\nresult (Metrics): The metrics over the given weights and the dataset.", "source": "github-repos"} {"code": "def __call__(self, argv, known_only=False):\n \n if not argv:\n \n \n self.MarkAsParsed()\n self._AssertAllValidators()\n return []\n\n \n program_name = argv[0]\n args = self.ReadFlagsFromFiles(argv[1:], force_gnu=False)\n\n \n unknown_flags, unparsed_args, undefok = self._ParseArgs(args, known_only)\n\n \n \n for name, value in unknown_flags:\n if name in undefok:\n continue\n\n suggestions = _helpers.GetFlagSuggestions(\n name, self.RegisteredFlags())\n raise exceptions.UnrecognizedFlagError(\n name, value, suggestions=suggestions)\n\n self.MarkAsParsed()\n self._AssertAllValidators()\n return [program_name] + unparsed_args", "docstring": "Parses flags from argv; stores parsed flags into this FlagValues object.\n\nAll unparsed arguments are returned.\n\nArgs:\nargv: argument list. Can be of any type that may be converted to a list.\nknown_only: parse and remove known flags, return rest untouched.\n\nReturns:\nThe list of arguments not parsed as options, including argv[0].\n\nRaises:\nError: on any parsing error.\nValueError: on flag value parsing error.", "source": "juraj-google-style"} {"code": "def get_utt_regions(self):\n regions = []\n current_offset = 0\n for utt_idx in sorted(self.utt_ids):\n offset = current_offset\n num_frames = []\n refs = []\n for cnt in self.containers:\n num_frames.append(cnt.get(utt_idx).shape[0])\n refs.append(cnt.get(utt_idx, mem_map=True))\n if (len(set(num_frames)) != 1):\n raise ValueError('Utterance {} has not the same number of frames in all containers!'.format(utt_idx))\n num_chunks = math.ceil((num_frames[0] / float(self.frames_per_chunk)))\n region = (offset, num_chunks, refs)\n regions.append(region)\n current_offset += num_chunks\n return regions", "docstring": "Return the regions of all utterances, assuming all utterances are concatenated.\nIt is assumed that the utterances are sorted in ascending order for concatenation.\n\nA region is defined by offset (in chunks), length (num-chunks) and\na list of references to the utterance datasets in the containers.\n\nReturns:\nlist: List of with a tuple for every utterances containing the region info.", "source": "codesearchnet"} {"code": "def _desired_sdk_filename_in_staging_location(sdk_location) -> str:\n if sdk_location.endswith('.whl'):\n _, wheel_filename = FileSystems.split(sdk_location)\n if wheel_filename.startswith('apache_beam'):\n return wheel_filename\n else:\n raise RuntimeError('Unrecognized SDK wheel file: %s' % sdk_location)\n else:\n return names.STAGED_SDK_SOURCES_FILENAME", "docstring": "Returns the name that SDK file should have in the staging location.\nArgs:\nsdk_location: Full path to SDK file.", "source": "github-repos"} {"code": "def factor_cmap(field_name, palette, factors, start=0, end=None, nan_color='gray'):\n return field(field_name, CategoricalColorMapper(palette=palette, factors=factors, start=start, end=end, nan_color=nan_color))", "docstring": "Create a ``DataSpec`` dict that applies a client-side\n``CategoricalColorMapper`` transformation to a ``ColumnDataSource``\ncolumn.\n\nArgs:\nfield_name (str) : a field name to configure ``DataSpec`` with\n\npalette (seq[color]) : a list of colors to use for colormapping\n\nfactors (seq) : a sequences of categorical factors corresponding to\nthe palette\n\nstart (int, optional) : a start slice index to apply when the column\ndata has factors with multiple levels. (default: 0)\n\nend (int, optional) : an end slice index to apply when the column\ndata has factors with multiple levels. (default: None)\n\nnan_color (color, optional) : a default color to use when mapping data\nfrom a column does not succeed (default: \"gray\")\n\nReturns:\ndict", "source": "codesearchnet"} {"code": "def simple_generate(cls, create, **kwargs):\n \n strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY\n return cls.generate(strategy, **kwargs)", "docstring": "Generate a new instance.\n\nThe instance will be either 'built' or 'created'.\n\nArgs:\ncreate (bool): whether to 'build' or 'create' the instance.\n\nReturns:\nobject: the generated instance", "source": "juraj-google-style"} {"code": "def setup(self, universe):\n \n \n \n try:\n prices = universe[self.name]\n except KeyError:\n prices = None\n\n \n if prices is not None:\n self._prices = prices\n self.data = pd.DataFrame(index=universe.index,\n columns=['value', 'position'],\n data=0.0)\n self._prices_set = True\n else:\n self.data = pd.DataFrame(index=universe.index,\n columns=['price', 'value', 'position'])\n self._prices = self.data['price']\n self._prices_set = False\n\n self._values = self.data['value']\n self._positions = self.data['position']\n\n \n self.data['outlay'] = 0.\n self._outlays = self.data['outlay']", "docstring": "Setup Security with universe. Speeds up future runs.\n\nArgs:\n* universe (DataFrame): DataFrame of prices with security's name as\none of the columns.", "source": "juraj-google-style"} {"code": "def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):\n \n if unique_matches:\n pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)\n else:\n pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)\n return pairings", "docstring": "Match forecast and observed tracks.\n\nArgs:\nmodel_tracks:\nobs_tracks:\nunique_matches:\nclosest_matches:\n\nReturns:", "source": "juraj-google-style"} {"code": "def load_model(model_cls_path, model_cls_name, model_load_args):\n \n spec = importlib.util.spec_from_file_location('active_model',\n model_cls_path)\n model_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(model_module)\n model_cls = getattr(model_module, model_cls_name)\n model = model_cls()\n if not isinstance(model, BaseModel):\n warnings.warn(\"Loaded model '%s' at '%s' is not an instance of %r\"\n % (model_cls_name, model_cls_path, BaseModel))\n model.load(**model_load_args)\n return model", "docstring": "Get an instance of the described model.\n\nArgs:\nmodel_cls_path: Path to the module in which the model class\nis defined.\nmodel_cls_name: Name of the model class.\nmodel_load_args: Dictionary of args to pass to the `load` method\nof the model instance.\n\nReturns:\nAn instance of :class:`.models.model.BaseModel` or subclass", "source": "juraj-google-style"} {"code": "def merge(self, other_roc):\n \n if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds):\n self.contingency_tables += other_roc.contingency_tables\n else:\n print(\"Input table thresholds do not match.\")", "docstring": "Ingest the values of another DistributedROC object into this one and update the statistics inplace.\n\nArgs:\nother_roc: another DistributedROC object.", "source": "juraj-google-style"} {"code": "def _scope_vals(self, vals):\n if isinstance(vals, (list, tuple)):\n return vals\n elif isinstance(vals, dict):\n return vals.values()\n else:\n return [vals]", "docstring": "Return a list of values to pass to `name_scope()`.\n\nArgs:\nvals: A tensor, a list or tuple of tensors, or a dictionary.\n\nReturns:\nThe values in vals as a list.", "source": "github-repos"} {"code": "def __init__(self, instance_id: str = None):\n \n self.instance_id = instance_id\n if instance_id:\n self.channel_id += \"", "docstring": "Initialize the channel.\nInherited initializer must call the \"super init\" method\nat the beginning.\n\nArgs:\ninstance_id: Instance ID of the channel.", "source": "juraj-google-style"} {"code": "def is_same_vectors(self, vec_set1, vec_set2):\n \n if (np.absolute(rel_strain(vec_set1[0], vec_set2[0])) >\n self.max_length_tol):\n return False\n elif (np.absolute(rel_strain(vec_set1[1], vec_set2[1])) >\n self.max_length_tol):\n return False\n elif (np.absolute(rel_angle(vec_set1, vec_set2)) >\n self.max_angle_tol):\n return False\n else:\n return True", "docstring": "Determine if two sets of vectors are the same within length and angle\ntolerances\n\nArgs:\nvec_set1(array[array]): an array of two vectors\nvec_set2(array[array]): second array of two vectors", "source": "juraj-google-style"} {"code": "def watermark_text(image, text, corner=2):\n FONT_PATH = ''\n if resource_exists(__name__, 'resources/fonts/SourceSansPro-Regular.ttf'):\n FONT_PATH = resource_filename(__name__, 'resources/fonts/SourceSansPro-Regular.ttf')\n padding = 5\n was_P = (image.mode == 'P')\n was_L = (image.mode == 'L')\n if (image.mode not in ['RGB', 'RGBA']):\n if (image.format in ['JPG', 'JPEG']):\n image = image.convert('RGB')\n else:\n image = image.convert('RGBA')\n img_draw = ImageDraw.Draw(image)\n fontsize = 1\n img_fraction = 0.05\n try:\n font = ImageFont.truetype(font=FONT_PATH, size=fontsize)\n was_over = False\n inc = 2\n while True:\n if (font.getsize(text)[1] > (img_fraction * image.height)):\n if (not was_over):\n was_over = True\n inc = (- 1)\n elif was_over:\n break\n fontsize += inc\n font = ImageFont.truetype(font=FONT_PATH, size=fontsize)\n fontsize -= 1\n font = ImageFont.truetype(font=FONT_PATH, size=fontsize)\n except:\n print('Failed to load Aperture font. Using default font instead.')\n font = ImageFont.load_default()\n pos = get_pos(corner, image.size, font.getsize(text), padding)\n img_draw.text(((pos[0] - 1), pos[1]), text, font=font, fill='black')\n img_draw.text(((pos[0] + 1), pos[1]), text, font=font, fill='black')\n img_draw.text((pos[0], (pos[1] - 1)), text, font=font, fill='black')\n img_draw.text((pos[0], (pos[1] + 1)), text, font=font, fill='black')\n img_draw.text(pos, text, font=font, fill='white')\n cleanup_resources()\n del img_draw\n if was_P:\n image = image.convert('P', palette=Image.ADAPTIVE, colors=256)\n elif was_L:\n image = image.convert('L')\n return image", "docstring": "Adds a text watermark to an instance of a PIL Image.\n\nThe text will be sized so that the height of the text is\nroughly 1/20th the height of the base image. The text will\nbe white with a thin black outline.\n\nArgs:\nimage: An instance of a PIL Image. This is the base image.\ntext: Text to use as a watermark.\ncorner: An integer between 0 and 3 representing the corner\nwhere the watermark image should be placed on top of the\nbase image. 0 is top left, 1 is top right, 2 is bottom\nright and 3 is bottom left. NOTE: Right now, this is\npermanently set to 2 (bottom right) but this can be\nchanged in the future by either creating a new cmd-line\nflag or putting this in the config file.\n\nReturns: The watermarked image", "source": "codesearchnet"} {"code": "def orbit2frame(name, ref_orbit, orientation=None, center=None, bypass=False):\n if (orientation is None):\n orientation = ref_orbit.frame.orientation\n elif (orientation.upper() in ('RSW', 'LVLH')):\n orientation = 'QSW'\n elif (orientation.upper() not in ('QSW', 'TNW')):\n raise ValueError((\"Unknown orientation '%s'\" % orientation))\n if (center is None):\n center = Earth\n\n def _to_parent_frame(self):\n 'Conversion from orbit frame to parent frame\\n '\n offset = ref_orbit.propagate(self.date).base.copy()\n if (orientation.upper() in ('QSW', 'TNW')):\n orb = ref_orbit.propagate(self.date)\n m = (to_qsw(orb) if (orientation.upper() == 'QSW') else to_tnw(orb))\n rotation = Frame._convert(m, m).T\n else:\n rotation = np.identity(6)\n return (rotation, offset)\n mtd = ('_to_%s' % ref_orbit.frame.__name__)\n dct = {mtd: _to_parent_frame, 'orientation': orientation, 'center': center, 'bypass': bypass}\n cls = _MetaFrame(name, (Frame,), dct)\n (cls + ref_orbit.frame)\n return cls", "docstring": "Create a frame based on a Orbit or Ephem object.\n\nArgs:\nname (str): Name to give the created frame\nref_orbit (Orbit or Ephem):\norientation (str): Orientation of the created frame\nbypass (bool): By-pass the warning when creating a frame with an already\ntaken name\nReturn:\nFrame:\n\nIf orientation is ``None``, the new frame will keep the orientation of the\nreference frame of the Orbit and move along with the orbit.\nOther acceptable values are ``\"QSW\"`` (and its aliases \"LVLH\" and \"RSW\") or ``\"TNW\"``.\n\nSee :py:func:`~beyond.frames.local.to_qsw` and :py:func:`~beyond.frames.local.to_tnw`\nfor informations regarding these orientations.", "source": "codesearchnet"} {"code": "def _use_tables(objs):\n \n from ..models.widgets import TableWidget\n return _any(objs, lambda obj: isinstance(obj, TableWidget))", "docstring": "Whether a collection of Bokeh objects contains a TableWidget\n\nArgs:\nobjs (seq[Model or Document]) :\n\nReturns:\nbool", "source": "juraj-google-style"} {"code": "def assert_sequential_execution(order, operations):\n operations = sorted(operations, key=lambda op: order[op])\n for i in range(len(operations) - 1):\n if not _exists_dependency(operations[i], operations[i + 1]):\n print(operations[i].graph.as_graph_def())\n raise AssertionError('No dependency between {} and {}. Graph is dumped to stdout.'.format(operations[i].name, operations[i + 1].name))", "docstring": "Asserts there's a deterministic execution order between the operations.\n\nArgs:\norder: a map from a tf.Operation to its topological order.\noperations: a list of operations that should be executed sequentially. It\ncan be given in any order.", "source": "github-repos"} {"code": "def distribution(namespace: Union[Type, str], name: str) -> 'Metrics.DelegatingDistribution':\n namespace = Metrics.get_namespace(namespace)\n return Metrics.DelegatingDistribution(MetricName(namespace, name))", "docstring": "Obtains or creates a Distribution metric.\n\nDistribution metrics are restricted to integer-only distributions.\n\nArgs:\nnamespace: A class or string that gives the namespace to a metric\nname: A string that gives a unique name to a metric\n\nReturns:\nA Distribution object.", "source": "github-repos"} {"code": "def _sync_to_uri(self, uri):\n cmd_cp = 'aws s3 cp {} {} --recursive --profile {}'.format(self.s3_version_uri, uri, self.env)\n cmd_sync = 'aws s3 sync {} {} --delete --exact-timestamps --profile {}'.format(self.s3_version_uri, uri, self.env)\n cp_result = subprocess.run(cmd_cp, check=True, shell=True, stdout=subprocess.PIPE)\n LOG.debug('Copy to %s before sync output: %s', uri, cp_result.stdout)\n LOG.info('Copied version %s to %s', self.version, uri)\n sync_result = subprocess.run(cmd_sync, check=True, shell=True, stdout=subprocess.PIPE)\n LOG.debug('Sync to %s command output: %s', uri, sync_result.stdout)\n LOG.info('Synced version %s to %s', self.version, uri)", "docstring": "Copy and sync versioned directory to uri in S3.\n\nArgs:\nuri (str): S3 URI to sync version to.", "source": "codesearchnet"} {"code": "def _send_offset_requests(self, timestamps):\n \n timestamps_by_node = collections.defaultdict(dict)\n for partition, timestamp in six.iteritems(timestamps):\n node_id = self._client.cluster.leader_for_partition(partition)\n if node_id is None:\n self._client.add_topic(partition.topic)\n log.debug(\"Partition %s is unknown for fetching offset,\"\n \" wait for metadata refresh\", partition)\n return Future().failure(Errors.StaleMetadata(partition))\n elif node_id == -1:\n log.debug(\"Leader for partition %s unavailable for fetching \"\n \"offset, wait for metadata refresh\", partition)\n return Future().failure(\n Errors.LeaderNotAvailableError(partition))\n else:\n timestamps_by_node[node_id][partition] = timestamp\n\n \n list_offsets_future = Future()\n responses = []\n node_count = len(timestamps_by_node)\n\n def on_success(value):\n responses.append(value)\n if len(responses) == node_count:\n offsets = {}\n for r in responses:\n offsets.update(r)\n list_offsets_future.success(offsets)\n\n def on_fail(err):\n if not list_offsets_future.is_done:\n list_offsets_future.failure(err)\n\n for node_id, timestamps in six.iteritems(timestamps_by_node):\n _f = self._send_offset_request(node_id, timestamps)\n _f.add_callback(on_success)\n _f.add_errback(on_fail)\n return list_offsets_future", "docstring": "Fetch offsets for each partition in timestamps dict. This may send\nrequest to multiple nodes, based on who is Leader for partition.\n\nArguments:\ntimestamps (dict): {TopicPartition: int} mapping of fetching\ntimestamps.\n\nReturns:\nFuture: resolves to a mapping of retrieved offsets", "source": "juraj-google-style"} {"code": "def set_attribute(self, node: cfg.CFGNode, obj: abstract.BaseValue, name: str, value: cfg.Variable) -> cfg.CFGNode:\n if not self._check_writable(obj, name):\n return node\n if self.ctx.vm.frame is not None and obj is self.ctx.vm.frame.f_globals:\n for v in value.data:\n v.update_official_name(name)\n if isinstance(obj, abstract.Empty):\n return node\n elif isinstance(obj, abstract.Module):\n log.warning('Ignoring overwrite of %s.%s', obj.name, name)\n return node\n elif isinstance(obj, (abstract.StaticMethod, abstract.ClassMethod)):\n return self.set_attribute(node, obj.method, name, value)\n elif isinstance(obj, abstract.SimpleValue):\n return self._set_member(node, obj, name, value)\n elif isinstance(obj, abstract.BoundFunction):\n return self.set_attribute(node, obj.underlying, name, value)\n elif isinstance(obj, abstract.Unsolvable):\n return node\n elif isinstance(obj, abstract.Unknown):\n if name in obj.members:\n obj.members[name].PasteVariable(value, node)\n else:\n obj.members[name] = value.AssignToNewVariable(node)\n return node\n elif isinstance(obj, abstract.TypeParameterInstance):\n nodes = []\n for v in obj.instance.get_instance_type_parameter(obj.name).data:\n nodes.append(self.set_attribute(node, v, name, value))\n return self.ctx.join_cfg_nodes(nodes) if nodes else node\n elif isinstance(obj, abstract.Union):\n for option in obj.options:\n node = self.set_attribute(node, option, name, value)\n return node\n else:\n raise NotImplementedError(obj.__class__.__name__)", "docstring": "Set an attribute on an object.\n\nThe attribute might already have a Variable in it and in that case we cannot\noverwrite it and instead need to add the elements of the new variable to the\nold variable.\n\nArgs:\nnode: The current CFG node.\nobj: The object.\nname: The name of the attribute to set.\nvalue: The Variable to store in it.\n\nReturns:\nA (possibly changed) CFG node.\nRaises:\nAttributeError: If the attribute cannot be set.\nNotImplementedError: If attribute setting is not implemented for obj.", "source": "github-repos"} {"code": "def svd(x, full_matrices=True, compute_uv=True):\n if any_symbolic_tensors((x,)):\n return SVD(full_matrices, compute_uv).symbolic_call(x)\n return _svd(x, full_matrices, compute_uv)", "docstring": "Computes the singular value decomposition of a matrix.\n\nArgs:\nx: Input tensor of shape `(..., M, N)`.\n\nReturns:\nA tuple of three tensors: a tensor of shape `(..., M, M)` containing the\nleft singular vectors, a tensor of shape `(..., M, N)` containing the\nsingular values and a tensor of shape `(..., N, N)` containing the\nright singular vectors.", "source": "github-repos"} {"code": "async def is_change_done(self, zone, change_id):\n zone_id = self.get_managed_zone(zone)\n url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'\n resp = (await self.get_json(url))\n return (resp['status'] == self.DNS_CHANGES_DONE)", "docstring": "Check if a DNS change has completed.\n\nArgs:\nzone (str): DNS zone of the change.\nchange_id (str): Identifier of the change.\nReturns:\nBoolean", "source": "codesearchnet"} {"code": "def __lt__(self, other):\n \n if not isinstance(other, interface.DateTimeValues):\n raise ValueError('Other not an instance of DateTimeValues')\n\n if not isinstance(other, SemanticTime):\n return True\n\n return self._SORT_ORDER < other._SORT_ORDER", "docstring": "Determines if the date time values are less than other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are less than other.\n\nRaises:\nValueError: if other is not an instance of DateTimeValues.", "source": "juraj-google-style"} {"code": "def get_nets_lacnic(self, response):\n nets = []\n for match in re.finditer('^(inetnum|inet6num|route):[^\\\\S\\\\n]+(.+?,[^\\\\S\\\\n].+|.+)$', response, re.MULTILINE):\n try:\n net = copy.deepcopy(BASE_NET)\n net_range = match.group(2).strip()\n try:\n net['range'] = net['range'] = ('{0} - {1}'.format(ip_network(net_range)[0].__str__(), ip_network(net_range)[(- 1)].__str__()) if ('/' in net_range) else net_range)\n except ValueError:\n net['range'] = net_range\n temp = []\n for addr in net_range.split(', '):\n count = addr.count('.')\n if ((count is not 0) and (count < 4)):\n addr_split = addr.strip().split('/')\n for i in range((count + 1), 4):\n addr_split[0] += '.0'\n addr = '/'.join(addr_split)\n temp.append(ip_network(addr.strip()).__str__())\n net['cidr'] = ', '.join(temp)\n net['start'] = match.start()\n net['end'] = match.end()\n nets.append(net)\n except ValueError:\n pass\n return nets", "docstring": "The function for parsing network blocks from LACNIC whois data.\n\nArgs:\nresponse (:obj:`str`): The response from the LACNIC whois server.\n\nReturns:\nlist of dict: Mapping of networks with start and end positions.\n\n::\n\n[{\n'cidr' (str) - The network routing block\n'start' (int) - The starting point of the network\n'end' (int) - The endpoint point of the network\n}]", "source": "codesearchnet"} {"code": "def add_update(self, updates, inputs=None):\n if inputs is not None:\n tf_logging.warning('`add_update` `inputs` kwarg has been deprecated. You no longer need to pass a value to `inputs` as it is being automatically inferred.')\n call_context = base_layer_utils.call_context()\n if call_context.in_keras_graph:\n return\n if not call_context.frozen:\n for update in nest.flatten(updates):\n if callable(update):\n update()", "docstring": "Add update op(s), potentially dependent on layer inputs.\n\nWeight updates (for instance, the updates of the moving mean and variance\nin a BatchNormalization layer) may be dependent on the inputs passed\nwhen calling a layer. Hence, when reusing the same layer on\ndifferent inputs `a` and `b`, some entries in `layer.updates` may be\ndependent on `a` and some on `b`. This method automatically keeps track\nof dependencies.\n\nThis call is ignored when eager execution is enabled (in that case, variable\nupdates are run on the fly and thus do not need to be tracked for later\nexecution).\n\nArgs:\nupdates: Update op, or list/tuple of update ops, or zero-arg callable\nthat returns an update op. A zero-arg callable should be passed in\norder to disable running the updates by setting `trainable=False`\non this Layer, when executing in Eager mode.\ninputs: Deprecated, will be automatically inferred.", "source": "github-repos"} {"code": "def _GetArgsAndFlagsString(spec, metadata):\n args_with_no_defaults = spec.args[:len(spec.args) - len(spec.defaults)]\n args_with_defaults = spec.args[len(spec.args) - len(spec.defaults):]\n accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS)\n arg_and_flag_strings = []\n if args_with_no_defaults:\n if accepts_positional_args:\n arg_strings = [formatting.Underline(arg.upper()) for arg in args_with_no_defaults]\n else:\n arg_strings = [f'--{arg}={formatting.Underline(arg.upper())}' for arg in args_with_no_defaults]\n arg_and_flag_strings.extend(arg_strings)\n if args_with_defaults or spec.kwonlyargs or spec.varkw:\n arg_and_flag_strings.append('')\n if spec.varargs:\n varargs_underlined = formatting.Underline(spec.varargs.upper())\n varargs_string = f'[{varargs_underlined}]...'\n arg_and_flag_strings.append(varargs_string)\n return ' '.join(arg_and_flag_strings)", "docstring": "The args and flags string for showing how to call a function.\n\nIf positional arguments are accepted, the args will be shown as positional.\nE.g. \"ARG1 ARG2 [--flag=FLAG]\"\n\nIf positional arguments are disallowed, the args will be shown with flags\nsyntax.\nE.g. \"--arg1=ARG1 [--flag=FLAG]\"\n\nArgs:\nspec: The full arg spec for the component to construct the args and flags\nstring for.\nmetadata: Metadata for the component, including whether it accepts\npositional arguments.\n\nReturns:\nThe constructed args and flags string.", "source": "github-repos"} {"code": "def parse_document_id(chrom, pos, ref, alt, variant_type, case_id):\n \n return generate_md5_key([chrom, pos, ref, alt, variant_type, case_id])", "docstring": "Parse the unique document id for a variant.\n\nThis will always be unique in the database.\n\nArgs:\nchrom(str)\npos(str)\nref(str)\nalt(str)\nvariant_type(str): 'clinical' or 'research'\ncase_id(str): unqiue family id\n\nReturns:\ndocument_id(str): The unique document id in an md5 string", "source": "juraj-google-style"} {"code": "def classes_file(flag_leaf=False):\n \n if __flag_first:\n __setup()\n\n if not flag_leaf:\n return _classes_file\n\n return [cls for cls in _classes_file if cls not in _classes_file_superclass]", "docstring": "All known File* classes\n\nArgs:\nflag_leaf: returns only classes that do not have subclasses\n(\"leaf\" nodes as in a class tree graph)", "source": "juraj-google-style"} {"code": "def _alt_inner_shape(self, new_inner_rank):\n if new_inner_rank == 0:\n raise ValueError('new_inner_rank cannot be zero')\n elif self.inner_rank == 0:\n raise ValueError('old inner_rank cannot be zero')\n elif new_inner_rank == self.inner_rank:\n return self.inner_shape\n elif new_inner_rank < self.inner_rank:\n if self._static_inner_shape.is_fully_defined():\n return _alt_inner_shape_from_tensor_shape(self._static_inner_shape, self.dtype, new_inner_rank)\n first_dimension = self._num_slices_in_dimension(-new_inner_rank)\n if new_inner_rank == 1:\n return array_ops.expand_dims(first_dimension, 0)\n remaining_dimensions = self.inner_shape[1 - new_inner_rank:]\n return array_ops.concat([array_ops.expand_dims(first_dimension, 0), remaining_dimensions], axis=0)\n else:\n assert new_inner_rank > self.inner_rank\n new_dimensions = new_inner_rank - self.inner_rank\n if any([not x.is_uniform() for x in self.row_partitions[-new_dimensions:]]):\n raise ValueError('Cannot get an inner shape over a ragged dimension')\n first_dimension = self._num_slices_in_dimension(-new_inner_rank)\n new_dimensions = new_inner_rank - self.inner_rank\n new_dims = [first_dimension] + [x.uniform_row_length() for x in self.row_partitions[-new_dimensions:]]\n return array_ops.concat([array_ops_stack.stack(new_dims), self.inner_shape[1:]], axis=0)", "docstring": "Get an alternative inner shape with higher or lower rank.\n\nFor the rank of the inner shape to be be higher, the last few ragged\ndimensions must have uniform_row_length.\n\nArgs:\nnew_inner_rank: the new rank of the inner_shape\n\nReturns:\nA new inner_shape of rank new_inner_rank.", "source": "github-repos"} {"code": "def QueueQueryTasks(self, queue, limit=1):\n \n prefix = DataStore.QUEUE_TASK_PREDICATE_PREFIX\n all_tasks = []\n\n for _, serialized, ts in self.ResolvePrefix(\n queue, prefix, timestamp=DataStore.ALL_TIMESTAMPS):\n task = rdf_flows.GrrMessage.FromSerializedString(serialized)\n task.leased_until = ts\n all_tasks.append(task)\n\n return all_tasks[:limit]", "docstring": "Retrieves tasks from a queue without leasing them.\n\nThis is good for a read only snapshot of the tasks.\n\nArgs:\nqueue: The task queue that this task belongs to, usually client.Queue()\nwhere client is the ClientURN object you want to schedule msgs on.\nlimit: Number of values to fetch.\n\nReturns:\nA list of Task() objects.", "source": "juraj-google-style"} {"code": "def _GetAction(self, action, text):\n if ('airportdProcessDLILEvent' in action):\n interface = text.split()[0]\n return 'Interface {0:s} turn up.'.format(interface)\n if ('doAutoJoin' in action):\n match = self._CONNECTED_RE.match(text)\n if match:\n ssid = match.group(1)[1:(- 1)]\n else:\n ssid = 'Unknown'\n return 'Wifi connected to SSID {0:s}'.format(ssid)\n if ('processSystemPSKAssoc' in action):\n wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)\n if wifi_parameters:\n ssid = wifi_parameters.group(1)\n bssid = wifi_parameters.group(2)\n security = wifi_parameters.group(3)\n if (not ssid):\n ssid = 'Unknown'\n if (not bssid):\n bssid = 'Unknown'\n if (not security):\n security = 'Unknown'\n return 'New wifi configured. BSSID: {0:s}, SSID: {1:s}, Security: {2:s}.'.format(bssid, ssid, security)\n return text", "docstring": "Parse the well known actions for easy reading.\n\nArgs:\naction (str): the function or action called by the agent.\ntext (str): mac Wifi log text.\n\nReturns:\nstr: a formatted string representing the known (or common) action.\nIf the action is not known the original log text is returned.", "source": "codesearchnet"} {"code": "def _sign_threshold_signature_fulfillment(cls, input_, message, key_pairs):\n \n input_ = deepcopy(input_)\n message = sha3_256(message.encode())\n if input_.fulfills:\n message.update('{}{}'.format(\n input_.fulfills.txid, input_.fulfills.output).encode())\n\n for owner_before in set(input_.owners_before):\n \n \n\n \n \n \n\n \n \n ccffill = input_.fulfillment\n subffills = ccffill.get_subcondition_from_vk(\n base58.b58decode(owner_before))\n if not subffills:\n raise KeypairMismatchException('Public key {} cannot be found '\n 'in the fulfillment'\n .format(owner_before))\n try:\n private_key = key_pairs[owner_before]\n except KeyError:\n raise KeypairMismatchException('Public key {} is not a pair '\n 'to any of the private keys'\n .format(owner_before))\n\n \n \n for subffill in subffills:\n subffill.sign(\n message.digest(), base58.b58decode(private_key.encode()))\n return input_", "docstring": "Signs a ThresholdSha256.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The Input to be signed.\nmessage (str): The message to be signed\nkey_pairs (dict): The keys to sign the Transaction with.", "source": "juraj-google-style"} {"code": "def find_common_root(elements):\n \n if not elements:\n raise UserWarning(\"Can't find common root - no elements suplied.\")\n\n root_path = el_to_path_vector(elements.pop())\n\n for el in elements:\n el_path = el_to_path_vector(el)\n\n root_path = common_vector_root(root_path, el_path)\n\n if not root_path:\n raise UserWarning(\n \"Vectors without common root:\\n%s\" % str(el_path)\n )\n\n return root_path", "docstring": "Find root which is common for all `elements`.\n\nArgs:\nelements (list): List of double-linked HTMLElement objects.\n\nReturns:\nlist: Vector of HTMLElement containing path to common root.", "source": "juraj-google-style"} {"code": "def assert_equal_flattened(self, expected_results, actual_results):\n self.assertEqual(len(expected_results), len(actual_results))\n for i, expected_result in enumerate(expected_results):\n final_result = []\n actual_result = actual_results[i]\n for val in actual_result:\n final_result.extend(val.numpy())\n self.assertAllEqual(expected_result, final_result)", "docstring": "Asserts that flattened results are equal.\n\nDue to the number of replicas in the strategy, the output may have a\ndifferent structure and needs to be flattened for comparison.\n\nArgs:\nexpected_results: The results expected as a result of a computation.\nactual_results: The actual results of a computation.", "source": "github-repos"} {"code": "def set_file_to_upload(self, file_to_upload):\n \n \n if 'url' in self.data:\n del self.data['url']\n self.file_to_upload = file_to_upload", "docstring": "Delete any existing url and set the file uploaded to the local path provided\n\nArgs:\nfile_to_upload (str): Local path to file to upload\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def single_gate_matrix(gate, params=None):\n (theta, phi, lam) = map(float, single_gate_params(gate, params))\n return np.array([[np.cos((theta / 2)), ((- np.exp((1j * lam))) * np.sin((theta / 2)))], [(np.exp((1j * phi)) * np.sin((theta / 2))), (np.exp(((1j * phi) + (1j * lam))) * np.cos((theta / 2)))]])", "docstring": "Get the matrix for a single qubit.\n\nArgs:\ngate(str): the single qubit gate name\nparams(list): the operation parameters op['params']\nReturns:\narray: A numpy array representing the matrix", "source": "codesearchnet"} {"code": "def _prepare_host_call_fn(self, processed_t_fetches, op_fetches, graph, graph_summary_tag):\n if self._parameters.trace_dir is None:\n raise ValueError('Provide a trace_dir for tensor tracer in summary mode. --trace_dir=/model/dir')\n\n def _write_cache(step, event_file_suffix=None, **kwargs):\n \n file_suffix = _TT_EVENT_FILE_SUFFIX\n if event_file_suffix is not None:\n file_suffix = string_ops.string_join([file_suffix, event_file_suffix], separator='.')\n summary_write_ops = []\n summary_writer = summary.create_file_writer_v2(self._parameters.trace_dir, filename_suffix=file_suffix, max_queue=_TT_SUMMARY_MAX_QUEUE)\n graph.add_to_collection(TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n step_value = step[0]\n dt = step_value.dtype\n if dt.__ne__(dtypes.int64) and dt.__ne__(dtypes.uint64) and dt.__ne__(dtypes.float64):\n step_value = math_ops.cast(step_value, dtypes.int64)\n with summary_writer.as_default():\n summary_metadata = summary_pb2.SummaryMetadata(plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n for key, value in kwargs.items():\n if not self._parameters.collect_summary_per_core:\n if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n value = self.aggregate_global_cache(value)\n with ops.control_dependencies([summary_writer.init()]):\n summary_write_ops.append(summary.write(_TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag, value, metadata=summary_metadata, step=step_value))\n return control_flow_ops.group(summary_write_ops)\n global_step = training_util.get_or_create_global_step()\n step = array_ops.reshape(global_step, [1])\n self._host_call_fn = {}\n host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]\n caches_to_write = {}\n with ops.control_dependencies(host_call_deps):\n all_caches = self._cache_variable_for_graph(graph)\n for cache_name, cache_variable in all_caches.items():\n new_cache_shape = [1]\n new_cache_shape.extend(cache_variable.shape.as_list())\n cache = array_ops.reshape(cache_variable, new_cache_shape)\n caches_to_write[cache_name] = cache\n caches_to_write['step'] = step\n self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write)", "docstring": "Creates a host call function that will write the cache as tb summary.\n\nArgs:\nprocessed_t_fetches: List of tensor provided to session.run.\nop_fetches: List of operations provided to session.run.\ngraph: TensorFlow graph.\ngraph_summary_tag: the summary_tag name for the given graph.\nRaises:\nValueError if trace_dir is not set.", "source": "github-repos"} {"code": "def warn_once(self, msg, msg_name=None):\n assert isinstance(msg, str)\n msg_name = (msg_name if msg_name else msg)\n if (msg_name not in warnings_given):\n warnings.warn(msg)\n warnings_given.add(msg_name)", "docstring": "Prints a warning statement just once\n\nArgs:\nmsg: The warning message\nmsg_name: [optional] The name of the warning. If None, the msg_name\nwill be the msg itself.", "source": "codesearchnet"} {"code": "def from_lasio(cls, l, remap=None, funcs=None):\n params = {}\n funcs = (funcs or {})\n funcs['location'] = str\n for (field, (sect, code)) in las_fields['location'].items():\n params[field] = utils.lasio_get(l, sect, code, remap=remap, funcs=funcs)\n return cls(params)", "docstring": "Make a Location object from a lasio object. Assumes we're starting\nwith a lasio object, l.\n\nArgs:\nl (lasio).\nremap (dict): Optional. A dict of 'old': 'new' LAS field names.\nfuncs (dict): Optional. A dict of 'las field': function() for\nimplementing a transform before loading. Can be a lambda.\n\nReturns:\nLocation. An instance of this class.", "source": "codesearchnet"} {"code": "def get_port(self, id_or_uri, port_id_or_uri):\n uri = self._client.build_subresource_uri(id_or_uri, port_id_or_uri, 'ports')\n return self._client.get(uri)", "docstring": "Gets an interconnect port.\n\nArgs:\nid_or_uri: Can be either the interconnect id or uri.\nport_id_or_uri: The interconnect port id or uri.\n\nReturns:\ndict: The interconnect port.", "source": "codesearchnet"} {"code": "def _offset(value):\n \n o = int(value)\n if o == 0:\n return 0\n a = abs(o)\n s = a*36+(a%100)*24\n return (o", "docstring": "Parse timezone to offset in seconds.\n\nArgs:\nvalue: A timezone in the '+0000' format. An integer would also work.\n\nReturns:\nThe timezone offset from GMT in seconds as an integer.", "source": "juraj-google-style"} {"code": "def _is_trivial(node):\n trivial_node_types = (gast.Name, bool, str, gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow, gast.LShift, gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd, gast.FloorDiv, gast.Invert, gast.Not, gast.UAdd, gast.USub, gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE, gast.Is, gast.IsNot, gast.In, gast.NotIn, gast.expr_context)\n if isinstance(node, trivial_node_types) and (not _is_py2_name_constant(node)):\n return True\n if gast_util.is_ellipsis(node):\n return True\n return False", "docstring": "Returns whether to consider the given node 'trivial'.\n\nThe definition of 'trivial' is a node that can't meaningfully be pulled out\ninto its own assignment statement.\n\nThis is surprisingly difficult to do robustly across versions of Python and\ngast, as the parsing of constants has changed, if I may, constantly.\n\nArgs:\nnode: An AST node to check for triviality\n\nReturns:\ntrivial: A Python `bool` indicating whether the node is trivial.", "source": "github-repos"} {"code": "def _process_image(filename, coder):\n with tf.gfile.FastGFile(filename, 'r') as f:\n image_data = f.read()\n if _is_png(filename):\n print(('Converting PNG to JPEG for %s' % filename))\n image_data = coder.png_to_jpeg(image_data)\n elif _is_cmyk(filename):\n print(('Converting CMYK to RGB for %s' % filename))\n image_data = coder.cmyk_to_rgb(image_data)\n image = coder.decode_jpeg(image_data)\n assert (len(image.shape) == 3)\n height = image.shape[0]\n width = image.shape[1]\n assert (image.shape[2] == 3)\n return (image_data, height, width)", "docstring": "Process a single image file.\n\nArgs:\nfilename: string, path to an image file e.g., '/path/to/example.JPG'.\ncoder: instance of ImageCoder to provide TensorFlow image coding utils.\nReturns:\nimage_buffer: string, JPEG encoding of RGB image.\nheight: integer, image height in pixels.\nwidth: integer, image width in pixels.", "source": "codesearchnet"} {"code": "def with_dependencies(self, checks):\n pass", "docstring": "Add dependencies to a _LayerBroadcaster.\n\nArgs:\nchecks: a list of ops that need to be run before any tensors from the\nBroadcaster are used.\n\nReturns:\na copy of this _LayerBroadcaster with dependencies added.", "source": "github-repos"} {"code": "def catch(func, *args, **kwargs):\n \n try:\n func(*args, **kwargs)\n except Exception as e:\n return e", "docstring": "Call the supplied function with the supplied arguments,\ncatching and returning any exception that it throws.\n\nArguments:\nfunc: the function to run.\n*args: positional arguments to pass into the function.\n**kwargs: keyword arguments to pass into the function.\nReturns:\nIf the function throws an exception, return the exception.\nIf the function does not throw an exception, return None.", "source": "juraj-google-style"} {"code": "def uses_star_args_or_kwargs_in_call(node):\n return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)", "docstring": "Check if an ast.Call node uses arbitrary-length *args or **kwargs.\n\nThis function works with the AST call node format of Python3.5+\nas well as the different AST format of earlier versions of Python.\n\nArgs:\nnode: The ast.Call node to check arg values for.\n\nReturns:\nTrue if the node uses starred variadic positional args or keyword args.\nFalse if it does not.", "source": "github-repos"} {"code": "def _get_userprofile_from_registry(user, sid):\n \n profile_dir = __utils__['reg.read_value'](\n 'HKEY_LOCAL_MACHINE',\n 'SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\ProfileList\\\\{0}'.format(sid),\n 'ProfileImagePath'\n )['vdata']\n log.debug(\n 'user %s with sid=%s profile is located at \"%s\"',\n user, sid, profile_dir\n )\n return profile_dir", "docstring": "In case net user doesn't return the userprofile we can get it from the\nregistry\n\nArgs:\nuser (str): The user name, used in debug message\n\nsid (str): The sid to lookup in the registry\n\nReturns:\nstr: Profile directory", "source": "juraj-google-style"} {"code": "def _ReadRecordSchemaInformation(self, tables, file_object, record_offset):\n \n _ = self._ReadRecordHeader(file_object, record_offset)\n\n attribute_value_offsets = self._ReadRecordAttributeValueOffset(\n file_object, record_offset + 24, 2)\n\n if attribute_value_offsets != (0x21, 0x25):\n raise errors.ParseError('Unsupported record attribute value offsets')\n\n file_offset = file_object.tell()\n data_type_map = self._GetDataTypeMap('keychain_record_schema_information')\n\n record_values, _ = self._ReadStructureFromFileObject(\n file_object, file_offset, data_type_map)\n\n relation_name = record_values.relation_name.decode('ascii')\n\n table = KeychainDatabaseTable()\n table.relation_identifier = record_values.relation_identifier\n table.relation_name = relation_name\n\n tables[table.relation_identifier] = table\n\n table = tables.get(self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO, None)\n if not table:\n raise errors.ParseError('Missing CSSM_DL_DB_SCHEMA_INFO table.')\n\n record = collections.OrderedDict({\n 'RelationID': record_values.relation_identifier,\n 'RelationName': relation_name})\n\n table.records.append(record)", "docstring": "Reads a schema information (CSSM_DL_DB_SCHEMA_INFO) record.\n\nArgs:\ntables (dict[int, KeychainDatabaseTable]): tables per identifier.\nfile_object (file): file-like object.\nrecord_offset (int): offset of the record relative to the start of\nthe file.\n\nRaises:\nParseError: if the record cannot be read.", "source": "juraj-google-style"} {"code": "def submit_data(self, batch_id, halt_on_error=True):\n \n \n if self.halt_on_batch_error is not None:\n halt_on_error = self.halt_on_batch_error\n\n content = self.data\n \n self._batch_data_count = len(content.get('group')) + len(content.get('indicator'))\n self.tcex.log.info('Batch Size: {:,}'.format(self._batch_data_count))\n if content.get('group') or content.get('indicator'):\n headers = {'Content-Type': 'application/octet-stream'}\n try:\n r = self.tcex.session.post(\n '/v2/batch/{}'.format(batch_id), headers=headers, json=content\n )\n except Exception as e:\n self.tcex.handle_error(10520, [e], halt_on_error)\n if not r.ok or 'application/json' not in r.headers.get('content-type', ''):\n self.tcex.handle_error(10525, [r.status_code, r.text], halt_on_error)\n return r.json()\n return {}", "docstring": "Submit Batch request to ThreatConnect API.\nArgs:\nbatch_id (string): The batch id of the current job.", "source": "juraj-google-style"} {"code": "def get_snippet_client(self, name):\n if name in self._snippet_clients:\n return self._snippet_clients[name]", "docstring": "Gets the snippet client managed under a given name.\n\nArgs:\nname: string, the name of the snippet client under management.\n\nReturns:\nSnippetClient.", "source": "github-repos"} {"code": "def replace_method_name(self, signature_key, method_name, tags=None):\n if not signature_key:\n raise ValueError('`signature_key` must be defined.')\n if not method_name:\n raise ValueError('`method_name` must be defined.')\n if tags is not None and (not isinstance(tags, list)):\n tags = [tags]\n found_match = False\n for meta_graph_def in self._saved_model.meta_graphs:\n if tags is None or set(tags) == set(meta_graph_def.meta_info_def.tags):\n if signature_key not in meta_graph_def.signature_def:\n raise ValueError(f\"MetaGraphDef associated with tags {tags} does not have a signature_def with key: '{signature_key}'. This means either you specified the wrong signature key or forgot to put the signature_def with the corresponding key in your SavedModel.\")\n meta_graph_def.signature_def[signature_key].method_name = method_name\n found_match = True\n if not found_match:\n raise ValueError(f'MetaGraphDef associated with tags {tags} could not be found in SavedModel. This means either you specified invalid tags or your SavedModel does not have a MetaGraphDef with the specified tags.')", "docstring": "Replaces the method_name in the specified signature_def.\n\nThis will match and replace multiple sig defs iff tags is None (i.e when\nmultiple `MetaGraph`s have a signature_def with the same key).\nIf tags is not None, this will only replace a single signature_def in the\n`MetaGraph` with matching tags.\n\nArgs:\nsignature_key: Key of the signature_def to be updated.\nmethod_name: new method_name to replace the existing one.\ntags: A tag or sequence of tags identifying the `MetaGraph` to update. If\nNone, all meta graphs will be updated.\nRaises:\nValueError: if signature_key or method_name are not defined or\nif no metagraphs were found with the associated tags or\nif no meta graph has a signature_def that matches signature_key.", "source": "github-repos"} {"code": "def retry_handler(retries=0, delay=timedelta(), conditions=[]):\n \n delay_in_seconds = delay.total_seconds()\n return partial(retry_loop, retries, delay_in_seconds, conditions)", "docstring": "A simple wrapper function that creates a handler function by using\non the retry_loop function.\n\nArgs:\nretries (Integral): The number of times to retry if a failure occurs.\ndelay (timedelta, optional, 0 seconds): A timedelta representing\nthe amount time to delay between retries.\nconditions (list): A list of retry conditions.\nReturns:\nfunction: The retry_loop function partialed.", "source": "juraj-google-style"} {"code": "def _batch_rp_spec(rp_spec: RowPartitionSpec, batch_size: Optional[int]) -> RowPartitionSpec:\n if batch_size is None:\n return RowPartitionSpec(uniform_row_length=rp_spec.uniform_row_length, dtype=rp_spec.dtype)\n nrows = None if rp_spec.nrows is None else rp_spec.nrows * batch_size\n nvals = None if rp_spec.nvals is None else rp_spec.nvals * batch_size\n return RowPartitionSpec(nrows=nrows, nvals=nvals, uniform_row_length=rp_spec.uniform_row_length, dtype=rp_spec.dtype)", "docstring": "Batches a RowPartitionSpec.\n\nGiven a RowPartitionSpec and a batch_size, create a RowPartitionSpec that\nwill be the spec for the concatenation of batch_size RowPartitions.\n\nA RowPartition can be considered a transformation from a list of a given\nlength to a list of lists. Assume rp_a is a map from list_a to nlist_a,\nAnd rp_b is a map from list_b to nlist_b. concat(rp_a, rp_b) is a\ntransform of concat(list_a, list_b) to concat(nlist_a, nlist_b).\n\nIf batch_size is None, then have the spec be able to handle an arbitrary\nnumber of RowPartitions.\n\nArgs:\nrp_spec: a RowPartitionSpec for all the RowPartitions to be concatenated.\nbatch_size: the number of rp_specs to be concatenated.\n\nReturns:\na batched RowPartitionSpec.", "source": "github-repos"} {"code": "def exhaustive_iri_check(self, ontology: pd.DataFrame, iri_predicate: str, diff: bool=True) -> Tuple[list]:\n (inside, outside) = ([], [])\n header = (['Index'] + list(ontology.columns))\n for row in ontology.itertuples():\n row = {header[i]: val for (i, val) in enumerate(row)}\n entity_iri = row[iri_predicate]\n if isinstance(entity_iri, list):\n if (len(entity_iri) != 0):\n exit('Need to have only 1 iri in the cell from the onotology.')\n else:\n entity_iri = entity_iri[0]\n ilx_row = self.iri2row.get(entity_iri)\n if ilx_row:\n inside.append({'external_ontology_row': row, 'ilx_rows': [ilx_row]})\n else:\n outside.append(row)\n if diff:\n diff = self.__exhaustive_diff(inside)\n return (inside, outside, diff)\n return (inside, outside)", "docstring": "All entities with conflicting iris gets a full diff to see if they belong\n\nArgs:\nontology: pandas DataFrame created from an ontology where the colnames are predicates\nand if classes exist it is also thrown into a the colnames.\niri_predicate: usually in qname form and is the colname of the DataFrame for iri\nDefault is \"iri\" for graph2pandas module\ndiff: complete exhaustive diff if between curie matches... will take FOREVER if there are a lot -> n^2\nReturns:\ninside: entities that are inside of InterLex\noutside: entities NOT in InterLex\ndiff (optional): List[List[dict]]... so complicated but usefull diff between matches only", "source": "codesearchnet"} {"code": "def detect_language(index_page):\n dom = dhtmlparser.parseString(index_page)\n clean_content = dhtmlparser.removeTags(dom)\n lang = None\n try:\n lang = langdetect.detect(clean_content)\n except UnicodeDecodeError:\n lang = langdetect.detect(clean_content.decode('utf-8'))\n return SourceString(lang, source='langdetect')", "docstring": "Detect `languages` using `langdetect` library.\n\nArgs:\nindex_page (str): HTML content of the page you wish to analyze.\n\nReturns:\nobj: One :class:`.SourceString` object.", "source": "codesearchnet"} {"code": "def check_integrity(sakefile, settings):\n \n sprint = settings[\"sprint\"]\n error = settings[\"error\"]\n sprint(\"Call to check_integrity issued\", level=\"verbose\")\n if not sakefile:\n error(\"Sakefile is empty\")\n return False\n \n if len(sakefile.keys()) != len(set(sakefile.keys())):\n error(\"Sakefile contains duplicate targets\")\n return False\n for target in sakefile:\n if target == \"all\":\n if not check_target_integrity(target, sakefile[\"all\"], all=True):\n error(\"Failed to accept target 'all'\")\n return False\n continue\n if \"formula\" not in sakefile[target]:\n if not check_target_integrity(target, sakefile[target],\n meta=True):\n errmes = \"Failed to accept meta-target '{}'\".format(target)\n error(errmes)\n return False\n for atom_target in sakefile[target]:\n if atom_target == \"help\":\n continue\n if not check_target_integrity(atom_target,\n sakefile[target][atom_target],\n parent=target):\n errmes = \"Failed to accept target '{}'\\n\".format(\n atom_target)\n error(errmes)\n return False\n continue\n if not check_target_integrity(target, sakefile[target]):\n errmes = \"Failed to accept target '{}'\\n\".format(target)\n error(errmes)\n return False\n return True", "docstring": "Checks the format of the sakefile dictionary\nto ensure it conforms to specification\n\nArgs:\nA dictionary that is the parsed Sakefile (from sake.py)\nThe setting dictionary (for print functions)\nReturns:\nTrue if the Sakefile is conformant\nFalse if not", "source": "juraj-google-style"} {"code": "class AriaSharedExpertsMLP(nn.Module):\n\n def __init__(self, config: AriaTextConfig):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts\n self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)\n self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)\n self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, x):\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n return down_proj", "docstring": "Shared Expert MLP for shared experts.\n\nUnlike routed experts, shared experts process all tokens without routing.\nThis class reconfigures the intermediate size in comparison to the LlamaMLP.\n\nArgs:\nconfig (`AriaTextConfig`): Configuration object for the Aria language model.", "source": "github-repos"} {"code": "def notify_rollover(self, stream):\n \n\n self.offset -= 1\n\n if not self.matches(stream):\n return\n\n if self._count == 0:\n raise InternalError(\"BufferedStreamWalker out of sync with storage engine, count was wrong.\")\n\n self._count -= 1", "docstring": "Notify that a reading in the given stream was overwritten.\n\nArgs:\nstream (DataStream): The stream that had overwritten data.", "source": "juraj-google-style"} {"code": "def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:\n \n v1 = self[k].coords - self[l].coords\n v2 = self[j].coords - self[k].coords\n v3 = self[i].coords - self[j].coords\n v23 = np.cross(v2, v3)\n v12 = np.cross(v1, v2)\n return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),\n np.dot(v12, v23)))", "docstring": "Returns dihedral angle specified by four sites.\n\nArgs:\ni: Index of first site\nj: Index of second site\nk: Index of third site\nl: Index of fourth site\n\nReturns:\nDihedral angle in degrees.", "source": "juraj-google-style"} {"code": "def _extend_object(parent, n, o, otype, fqdn):\n from inspect import ismodule, isclass\n pmodule = (parent if (ismodule(parent) or isclass(parent)) else None)\n try:\n if (otype == 'methods'):\n setattr(o.__func__, '__acornext__', None)\n else:\n setattr(o, '__acornext__', None)\n fqdn = _fqdn(o, recheck=True, pmodule=pmodule)\n return o\n except (TypeError, AttributeError):\n okey = id(o)\n if (okey not in _extended_objs):\n xobj = _create_extension(o, otype, fqdn, pmodule)\n fqdn = _fqdn(xobj, recheck=True, pmodule=pmodule)\n if (xobj is not None):\n _extended_objs[okey] = xobj\n try:\n setattr(parent, n, _extended_objs[okey])\n return _extended_objs[okey]\n except KeyError:\n msg.warn('Object extension failed: {} ({}).'.format(o, otype))", "docstring": "Extends the specified object if it needs to be extended. The method\nattempts to add an attribute to the object; if it fails, a new object is\ncreated that inherits all of `o` attributes, but is now a regular object\nthat can have attributes set.\n\nArgs:\nparent: has `n` in its `__dict__` attribute.\nn (str): object name attribute.\no (list): object instances to be extended.\notype (str): object types; one of [\"classes\", \"functions\", \"methods\",\n\"modules\"].\nfqdn (str): fully qualified name of the package that the object belongs\nto.", "source": "codesearchnet"} {"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(Nonce, self).read(input_stream, kmip_version=kmip_version)\n local_stream = BytearrayStream(input_stream.read(self.length))\n if self.is_tag_next(enums.Tags.NONCE_ID, local_stream):\n self._nonce_id = primitives.ByteString(tag=enums.Tags.NONCE_ID)\n self._nonce_id.read(local_stream, kmip_version=kmip_version)\n else:\n raise ValueError('Nonce encoding missing the nonce ID.')\n if self.is_tag_next(enums.Tags.NONCE_VALUE, local_stream):\n self._nonce_value = primitives.ByteString(tag=enums.Tags.NONCE_VALUE)\n self._nonce_value.read(local_stream, kmip_version=kmip_version)\n else:\n raise ValueError('Nonce encoding missing the nonce value.')\n self.is_oversized(local_stream)", "docstring": "Read the data encoding the Nonce struct and decode it into its\nconstituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the nonce ID or nonce value is missing from\nthe encoding.", "source": "codesearchnet"} {"code": "def layer_statistics_dump(self, file: IO[str]) -> None:\n fields = ['op_name', 'tensor_idx'] + list(self._layer_debug_metrics.keys())\n if self._debug_options.layer_direct_compare_metrics is not None:\n fields += list(self._debug_options.layer_direct_compare_metrics.keys())\n fields += ['scale', 'zero_point', 'tensor_name']\n writer = csv.DictWriter(file, fields)\n writer.writeheader()\n if self.layer_statistics:\n for name, metrics in self.layer_statistics.items():\n data = metrics.copy()\n data['tensor_name'], _ = self._get_operand_name_and_index(name)\n data['tensor_idx'] = self._numeric_verify_op_details[name]['inputs'][0]\n data['op_name'] = self._quant_interpreter._get_op_details(self._defining_op[data['tensor_idx']])['op_name']\n details = self._quant_interpreter._get_tensor_details(data['tensor_idx'], subgraph_index=0)\n data['scale'], data['zero_point'] = (details['quantization_parameters']['scales'][0], details['quantization_parameters']['zero_points'][0])\n writer.writerow(data)", "docstring": "Dumps layer statistics into file, in csv format.\n\nArgs:\nfile: file, or file-like object to write.", "source": "github-repos"} {"code": "def parse_procheck(quality_directory):\n procheck_summaries = glob.glob(os.path.join(quality_directory, '*.sum'))\n if (len(procheck_summaries) == 0):\n return pd.DataFrame()\n all_procheck = {}\n for summ in procheck_summaries:\n structure_id = os.path.basename(summ).split('.sum')[0]\n procheck_dict = {}\n with open(summ) as f_in:\n lines = (line.rstrip() for line in f_in)\n lines = (line for line in lines if line)\n for line in lines:\n if (len(line.split()) > 1):\n if (line.split()[1] == 'Ramachandran'):\n procheck_dict['procheck_rama_favored'] = percentage_to_float(line.split()[3])\n procheck_dict['procheck_rama_allowed'] = percentage_to_float(line.split()[5])\n procheck_dict['procheck_rama_allowed_plus'] = percentage_to_float(line.split()[7])\n procheck_dict['procheck_rama_disallowed'] = percentage_to_float(line.split()[9])\n if (line.split()[1] == 'G-factors'):\n procheck_dict['procheck_gfac_dihedrals'] = line.split()[3]\n procheck_dict['procheck_gfac_covalent'] = line.split()[5]\n procheck_dict['procheck_gfac_overall'] = line.split()[7]\n all_procheck[structure_id] = procheck_dict\n DF_PROCHECK = pd.DataFrame.from_dict(all_procheck, orient='index')\n return DF_PROCHECK", "docstring": "Parses all PROCHECK files in a directory and returns a Pandas DataFrame of the results\n\nArgs:\nquality_directory: path to directory with PROCHECK output (.sum files)\n\nReturns:\nPandas DataFrame: Summary of PROCHECK results", "source": "codesearchnet"} {"code": "def parse_config_file(config_file, skip_unknown=False):\n for (reader, existence_check) in _FILE_READERS:\n if existence_check(config_file):\n with reader(config_file) as f:\n parse_config(f, skip_unknown=skip_unknown)\n return\n raise IOError('Unable to open file: {}'.format(config_file))", "docstring": "Parse a Gin config file.\n\nArgs:\nconfig_file: The path to a Gin config file.\nskip_unknown: A boolean indicating whether unknown configurables and imports\nshould be skipped instead of causing errors (alternatively a list of\nconfigurable names to skip if unknown). See `parse_config` for additional\ndetails.\n\nRaises:\nIOError: If `config_file` cannot be read using any register file reader.", "source": "codesearchnet"} {"code": "def user_activity_stats(self, username, format=None):\n request_url = '{}/api/0/user/{}/activity/stats'.format(self.instance, username)\n payload = {}\n if (username is not None):\n payload['username'] = username\n if (format is not None):\n payload['format'] = format\n return_value = self._call_api(request_url, params=payload)\n return return_value", "docstring": "Retrieve the activity stats about a specific user over the last year.\n\nParams:\nusername (string): filters the username of the user whose activity you are interested in.\nformat (string): Allows changing the of the date/time returned from iso format\nto unix timestamp Can be: timestamp or isoformat\nReturns:\ndict: A dictionary of activities done by a given user for all the projects\nfor a given Pagure instance.", "source": "codesearchnet"} {"code": "def transform(self, column):\n self.check_data_type()\n return pd.DataFrame({self.col_name: np.exp(column[self.col_name])})", "docstring": "Applies an exponential to values to turn them positive numbers.\n\nArgs:\ncolumn (pandas.DataFrame): Data to transform.\n\nReturns:\npd.DataFrame", "source": "codesearchnet"} {"code": "def create(self, master_course_id, coach_email, max_students_allowed, title, modules=None):\n \n payload = {\n 'master_course_id': master_course_id,\n 'coach_email': coach_email,\n 'max_students_allowed': max_students_allowed,\n 'display_name': title,\n }\n\n if modules is not None:\n payload['course_modules'] = modules\n\n resp = self.requester.post(\n parse.urljoin(self.base_url, '/api/ccx/v0/ccx/'),\n json=payload\n )\n\n try:\n resp.raise_for_status()\n except:\n log.error(resp.json())\n raise\n\n return resp.json()['ccx_course_id']", "docstring": "Creates a CCX\n\nArgs:\nmaster_course_id (str): edx course id of the master course\ncoach_email (str): email of the user to make a coach. This user must exist on edx.\nmax_students_allowed (int): Maximum number of students to allow in this ccx.\ntitle (str): Title of the CCX to be created\nmodules (optional list): A list of locator_ids (str) for the modules to enable.\n\nReturns:\nccx_id (str): The ID of the ccx.", "source": "juraj-google-style"} {"code": "def pylint_check(files):\n files = fs.wrap_paths(files)\n cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini')\n pylint_cmd = 'pylint --rcfile {} {}'.format(cfg_path, files)\n return shell.run(pylint_cmd, exit_on_error=False).return_code", "docstring": "Run code checks using pylint.\n\nArgs:\nfiles (list[str]):\nA list of files to check\n\nReturns:\nbool: **True** if all files passed the checks, **False** otherwise.", "source": "codesearchnet"} {"code": "def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):\n if topology_only:\n self.points = drp(self.points, eps)\n else:\n self.points = spt(self.points, max_dist_error, max_speed_error)\n return self", "docstring": "In-place segment simplification\n\nSee `drp` and `compression` modules\n\nArgs:\neps (float): Distance threshold for the `drp` function\nmax_dist_error (float): Max distance error, in meters\nmax_speed_error (float): Max speed error, in km/h\ntopology_only (bool, optional): True to only keep topology, not considering\ntimes when simplifying. Defaults to False.\nReturns:\n:obj:`Segment`", "source": "codesearchnet"} {"code": "def _ReadString(\n self, file_object, file_offset, data_type_map, description):\n \n \n element_data_size = (\n data_type_map._element_data_type_definition.GetByteSize())\n elements_terminator = (\n data_type_map._data_type_definition.elements_terminator)\n\n byte_stream = []\n\n element_data = file_object.read(element_data_size)\n byte_stream.append(element_data)\n while element_data and element_data != elements_terminator:\n element_data = file_object.read(element_data_size)\n byte_stream.append(element_data)\n\n byte_stream = b''.join(byte_stream)\n\n return self._ReadStructureFromByteStream(\n byte_stream, file_offset, data_type_map, description)", "docstring": "Reads a string.\n\nArgs:\nfile_object (FileIO): file-like object.\nfile_offset (int): offset of the data relative from the start of\nthe file-like object.\ndata_type_map (dtfabric.DataTypeMap): data type map of the string.\ndescription (str): description of the string.\n\nReturns:\nobject: structure values object.\n\nRaises:\nFileFormatError: if the string cannot be read.\nValueError: if file-like object or date type map are invalid.", "source": "juraj-google-style"} {"code": "def check_causatives(self, case_obj=None, institute_obj=None):\n \n institute_id = case_obj['owner'] if case_obj else institute_obj['_id']\n institute_causative_variant_ids = self.get_causatives(institute_id)\n if len(institute_causative_variant_ids) == 0:\n return []\n\n if case_obj:\n \n case_causative_ids = set(case_obj.get('causatives', []))\n institute_causative_variant_ids = list(\n set(institute_causative_variant_ids).difference(case_causative_ids)\n )\n\n \n query = self.variant_collection.find(\n {'_id': {'$in': institute_causative_variant_ids}},\n {'variant_id': 1}\n )\n positional_variant_ids = [item['variant_id'] for item in query]\n\n filters = {'variant_id': {'$in': positional_variant_ids}}\n if case_obj:\n filters['case_id'] = case_obj['_id']\n else:\n filters['institute'] = institute_obj['_id']\n return self.variant_collection.find(filters)", "docstring": "Check if there are any variants that are previously marked causative\n\nLoop through all variants that are marked 'causative' for an\ninstitute and check if any of the variants are present in the\ncurrent case.\n\nArgs:\ncase_obj (dict): A Case object\ninstitute_obj (dict): check across the whole institute\n\nReturns:\ncausatives(iterable(Variant))", "source": "juraj-google-style"} {"code": "def move_bulk(self, from_statuses, to_status):\n \n\n for status in from_statuses:\n from_status_items = self.__get_var(\"items_\" + status)\n self.__set_var(\"items_\" + status, OrderedDict())\n\n to_status_items = self.__get_var(\"items_\" + to_status)\n to_status_items.update(from_status_items)", "docstring": "Move a bulk of request/response pairs to another status\n\nArgs:\nfrom_statuses list(str): The statuses to move from\nto_status (str): The status to move to", "source": "juraj-google-style"} {"code": "def get_by(self, field, value):\n firmwares = self.get_all()\n matches = []\n for item in firmwares:\n if (item.get(field) == value):\n matches.append(item)\n return matches", "docstring": "Gets the list of firmware baseline resources managed by the appliance. Optional parameters can be used to\nfilter the list of resources returned.\n\nThe search is case-insensitive.\n\nArgs:\nfield: Field name to filter.\nvalue: Value to filter.\n\nReturns:\nlist: List of firmware baseline resources.", "source": "codesearchnet"} {"code": "def cuda(self) -> Rotation:\n if self._rot_mats is not None:\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif self._quats is not None:\n return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)\n else:\n raise ValueError('Both rotations are None')", "docstring": "Analogous to the cuda() method of torch Tensors\n\nReturns:\nA copy of the Rotation in CUDA memory", "source": "github-repos"} {"code": "def entry_verifier(entries, regex, delimiter):\n cregex = re.compile(regex)\n python_version = int(sys.version.split('.')[0])\n decoder = ('unicode-escape' if (python_version == 3) else 'string-escape')\n dedelimiter = codecs.decode(delimiter, decoder)\n for entry in entries:\n match = re.match(cregex, entry)\n if (not match):\n split_regex = regex.split(delimiter)\n split_entry = entry.split(dedelimiter)\n part = 0\n for (regex_segment, entry_segment) in zip(split_regex, split_entry):\n if (not (regex_segment[0] == '^')):\n regex_segment = ('^' + regex_segment)\n if (not (regex_segment[(- 1)] == '$')):\n regex_segment += '$'\n if (not re.match(regex_segment, entry_segment)):\n raise FormatError(template=regex_segment, subject=entry_segment, part=part)\n part += 1", "docstring": "Checks each entry against regex for validity,\n\nIf an entry does not match the regex, the entry and regex\nare broken down by the delimiter and each segment is analyzed\nto produce an accurate error message.\n\nArgs:\nentries (list): List of entries to check with regex\n\nregex (str): Regular expression to compare entries with\n\ndelimiter (str): Character to split entry and regex by, used to check\nparts of entry and regex to narrow in on the error\n\nRaises:\nFormatError: Class containing regex match error data\n\nExample:\n>>> regex = r'^>.+\\\\n[ACGTU]+\\\\n$'\n>>> entry = [r'>entry1\\\\nAGGGACTA\\\\n']\n>>> entry_verifier(entry, regex, '\\\\n')", "source": "codesearchnet"} {"code": "def rebuild_ragged_tensor_with_value_rowids(rt, feed_dict=None, sess=None):\n if isinstance(rt, ragged_tensor.RaggedTensor):\n values = rebuild_ragged_tensor_with_value_rowids(rt.values, feed_dict, sess)\n rowids = rt.value_rowids()\n nrows = rt.nrows()\n if feed_dict is not None:\n rowids_ph = make_placeholder(rowids)\n nrows_ph = make_placeholder(nrows)\n feed_dict[rowids_ph] = sess.run(rowids)\n feed_dict[nrows_ph] = sess.run(nrows)\n rowids, nrows = (rowids_ph, nrows_ph)\n return ragged_tensor.RaggedTensor.from_value_rowids(values, rowids, nrows)\n else:\n if feed_dict is not None:\n rt_ph = make_placeholder(rt)\n feed_dict[rt_ph] = sess.run(rt)\n rt = rt_ph\n return rt", "docstring": "Returns a copy of `rt`, built using `from_value_rowids`.\n\nThis ensures that RaggedTensor._cached_value_rowids is populated, which\ntriggers a different code-path for converting ragged tensors to tensors.\n\nIf `feed_dict` and `sess` are specified, then build the new `RaggedTensor`\nusing placeholder tensors, and populate a feed dictionary that can be used\nto feed the placeholders.\n\nArgs:\nrt: The RaggedTensor to copy.\nfeed_dict: If specified, then build the new `RaggedTensor` using\nplaceholders, and populate this dict with entries to feed those\nplaceholders.\nsess: A session used to evaluate tensors; required if feed_dict is\nspecified.\n\nReturns:\nA copy of `rt`, built using `from_value_rowids`.", "source": "github-repos"} {"code": "def find_and_replace_userids(self, text):\n match = True\n pattern = re.compile('<@([A-Z0-9]{9})>')\n while match:\n match = pattern.search(text)\n if match:\n name = self.get_user_display_name(match.group(1))\n text = re.sub(re.compile(match.group(0)), ('@' + name), text)\n return text", "docstring": "Finds occurrences of Slack userids and attempts to replace them with\ndisplay names.\n\nArgs:\ntext (string): The message text\nReturns:\nstring: The message text with userids replaced.", "source": "codesearchnet"} {"code": "def _ws_on_close(self, ws: websocket.WebSocketApp):\n \n self.connected = False\n self.logger.error('Websocket closed')\n self._reconnect_websocket()", "docstring": "Callback for closing the websocket connection\n\nArgs:\nws: websocket connection (now closed)", "source": "juraj-google-style"} {"code": "def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse:\n self._validate_xoxp_token()\n kwargs.update({'usergroup': usergroup})\n return self.api_call('usergroups.users.list', http_verb='GET', params=kwargs)", "docstring": "List all users in a User Group\n\nArgs:\nusergroup (str): The encoded ID of the User Group to update.\ne.g. 'S0604QSJC'", "source": "codesearchnet"} {"code": "def object_graph_key_mapping(checkpoint_path):\n reader = py_checkpoint_reader.NewCheckpointReader(checkpoint_path)\n object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)\n object_graph_proto = trackable_object_graph_pb2.TrackableObjectGraph()\n object_graph_proto.ParseFromString(object_graph_string)\n names_to_keys = {}\n for node in object_graph_proto.nodes:\n for attribute in node.attributes:\n names_to_keys[attribute.full_name] = attribute.checkpoint_key\n return names_to_keys", "docstring": "Return name to key mappings from the checkpoint.\n\nArgs:\ncheckpoint_path: string, path to object-based checkpoint\n\nReturns:\nDictionary mapping tensor names to checkpoint keys.", "source": "github-repos"} {"code": "def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes, pointwise_filter_in_sizes, stride, padding, expected, data_format='NHWC'):\n with self.cached_session():\n t1 = self._InitValues(tensor_in_sizes)\n f1 = self._InitValues(depthwise_filter_in_sizes)\n f1.set_shape(depthwise_filter_in_sizes)\n f2 = self._InitValues(pointwise_filter_in_sizes)\n real_t1 = t1\n strides = [1, stride, stride, 1]\n if data_format == 'NCHW':\n real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n strides = [1, 1, stride, stride]\n if isinstance(padding, list):\n padding = [padding[0], padding[3], padding[1], padding[2]]\n conv = nn_impl.separable_conv2d(real_t1, f1, f2, strides=strides, padding=padding, data_format=data_format)\n if data_format == 'NCHW':\n conv = array_ops.transpose(conv, [0, 2, 3, 1])\n value = self.evaluate(conv)\n tf_logging.debug('value = %s', value)\n self.assertArrayNear(expected, np.ravel(value), 0.002)\n self.assertShapeEqual(value, conv)", "docstring": "Verifies the output values of the separable convolution function.\n\nArgs:\ntensor_in_sizes: Input tensor dimensions.\ndepthwise_filter_in_sizes: Depthwise filter tensor dimensions.\npointwise_filter_in_sizes: Pointwise filter tensor dimensions.\nstride: Stride.\npadding: Padding type.\nexpected: An array containing the expected operation outputs.\ndata_format: string data format for input tensor.", "source": "github-repos"} {"code": "def _letter_map(word):\n \n\n lmap = {}\n for letter in word:\n try:\n lmap[letter] += 1\n except KeyError:\n lmap[letter] = 1\n return lmap", "docstring": "Creates a map of letter use in a word.\n\nArgs:\nword: a string to create a letter map from\n\nReturns:\na dictionary of {letter: integer count of letter in word}", "source": "juraj-google-style"} {"code": "def __findFirstMissing(self, array, start, end):\n if (start > end):\n return (end + 1)\n if (start != array[start]):\n return start\n mid = int(((start + end) / 2))\n if (array[mid] == mid):\n return self.__findFirstMissing(array, (mid + 1), end)\n return self.__findFirstMissing(array, start, mid)", "docstring": "Find the smallest elements missing in a sorted array.\n\nReturns:\nint: The smallest element missing.", "source": "codesearchnet"} {"code": "def write_file(\n task: Task,\n filename: str,\n content: str,\n append: bool = False,\n dry_run: Optional[bool] = None,\n) -> Result:\n \n diff = _generate_diff(filename, content, append)\n\n if not task.is_dry_run(dry_run):\n mode = \"a+\" if append else \"w+\"\n with open(filename, mode=mode) as f:\n f.write(content)\n\n return Result(host=task.host, diff=diff, changed=bool(diff))", "docstring": "Write contents to a file (locally)\n\nArguments:\ndry_run: Whether to apply changes or not\nfilename: file you want to write into\ncontent: content you want to write\nappend: whether you want to replace the contents or append to it\n\nReturns:\nResult object with the following attributes set:\n* changed (``bool``):\n* diff (``str``): unified diff", "source": "juraj-google-style"} {"code": "def get_instances(serials):\n objs = []\n for s in serials:\n objs.append(Monsoon(serial=s))\n return objs", "docstring": "Create Monsoon instances from a list of serials.\n\nArgs:\nserials: A list of Monsoon (integer) serials.\n\nReturns:\nA list of Monsoon objects.", "source": "codesearchnet"} {"code": "def with_eager_op_as_function(cls: Optional[type[_T]]=None, only_as_function: bool=False) -> Union[Callable[[type[_T]], type[_T]], type[_T]]:\n\n def decorator(cls: type[_T]) -> type[_T]:\n return cls\n if cls is not None:\n return decorator(cls)\n return decorator", "docstring": "Returns the same class. This will be removed once all usages are removed.\n\nArgs:\ncls: class to decorate.\nonly_as_function: unused argument.\n\nReturns:\ncls", "source": "github-repos"} {"code": "def send_messages(cls, http_request, message_requests):\n deduplicated_messages = set(message_requests)\n for (msg_type, text) in deduplicated_messages:\n message_function = getattr(messages, msg_type)\n message_function(http_request, text)", "docstring": "Deduplicate any outgoing message requests, and send the remainder.\n\nArgs:\nhttp_request: The HTTP request in whose response we want to embed the messages\nmessage_requests: A list of undeduplicated messages in the form of tuples of message type\nand text- for example, ('error', 'Something went wrong')", "source": "codesearchnet"} {"code": "def __setitem__(self, keyword, clean_name=None):\n \n status = False\n if not clean_name and keyword:\n clean_name = keyword\n\n if keyword and clean_name:\n if not self.case_sensitive:\n keyword = keyword.lower()\n current_dict = self.keyword_trie_dict\n for letter in keyword:\n current_dict = current_dict.setdefault(letter, {})\n if self._keyword not in current_dict:\n status = True\n self._terms_in_trie += 1\n current_dict[self._keyword] = clean_name\n return status", "docstring": "To add keyword to the dictionary\npass the keyword and the clean name it maps to.\n\nArgs:\nkeyword : string\nkeyword that you want to identify\n\nclean_name : string\nclean term for that keyword that you would want to get back in return or replace\nif not provided, keyword will be used as the clean name also.\n\nExamples:\n>>> keyword_processor['Big Apple'] = 'New York'", "source": "juraj-google-style"} {"code": "def _ParseItem(self, parser_mediator, olecf_item):\n result = False\n event_data = OLECFItemEventData()\n event_data.name = olecf_item.name\n event_data.offset = 0\n event_data.size = olecf_item.size\n (creation_time, modification_time) = self._GetTimestamps(olecf_item)\n if creation_time:\n date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n result = True\n if modification_time:\n date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n result = True\n for sub_item in olecf_item.sub_items:\n if self._ParseItem(parser_mediator, sub_item):\n result = True\n return result", "docstring": "Parses an OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nolecf_item (pyolecf.item): OLECF item.\n\nReturns:\nbool: True if an event was produced.", "source": "codesearchnet"} {"code": "def calculate_columns(sequence):\n columns = {}\n for row in sequence:\n for key in row.keys():\n if (key not in columns):\n columns[key] = len(key)\n value_length = len(str(row[key]))\n if (value_length > columns[key]):\n columns[key] = value_length\n return columns", "docstring": "Find all row names and the maximum column widths.\n\nArgs:\ncolumns (dict): the keys are the column name and the value the max length.\n\nReturns:\ndict: column names (key) and widths (value).", "source": "codesearchnet"} {"code": "def path_get_destination(p: tcod.path.AStar) -> Tuple[int, int]:\n \n x = ffi.new(\"int *\")\n y = ffi.new(\"int *\")\n lib.TCOD_path_get_destination(p._path_c, x, y)\n return x[0], y[0]", "docstring": "Get the current destination position.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nTuple[int, int]: An (x, y) point.", "source": "juraj-google-style"} {"code": "def IsTensorFlowEventsFile(path):\n if (not path):\n raise ValueError('Path must be a nonempty string')\n return ('tfevents' in tf.compat.as_str_any(os.path.basename(path)))", "docstring": "Check the path name to see if it is probably a TF Events file.\n\nArgs:\npath: A file path to check if it is an event file.\n\nRaises:\nValueError: If the path is an empty string.\n\nReturns:\nIf path is formatted like a TensorFlowEventsFile.", "source": "codesearchnet"} {"code": "def calculate_positions(self, first_bee_val, second_bee_val, value_range):\n \n\n value = first_bee_val + np.random.uniform(-1, 1) \\\n * (first_bee_val - second_bee_val)\n if value_range[0] == 'int':\n value = int(value)\n if value > value_range[1][1]:\n value = value_range[1][1]\n if value < value_range[1][0]:\n value = value_range[1][0]\n\n return value", "docstring": "Calculate the new value/position for two given bee values\n\nArgs:\nfirst_bee_val (int or float): value from the first bee\nsecond_bee_val (int or float): value from the second bee\nvalue_ranges (tuple): \"(value type, (min_val, max_val))\" for the\ngiven value\n\nReturns:\nint or float: new value", "source": "juraj-google-style"} {"code": "def has_no_fat_ends(neuron, multiple_of_mean=2.0, final_point_count=5):\n bad_ids = []\n for leaf in _nf.iter_sections(neuron.neurites, iterator_type=Tree.ileaf):\n mean_radius = np.mean(leaf.points[1:][((- final_point_count):, COLS.R)])\n if ((mean_radius * multiple_of_mean) <= leaf.points[((- 1), COLS.R)]):\n bad_ids.append((leaf.id, leaf.points[(- 1):]))\n return CheckResult((len(bad_ids) == 0), bad_ids)", "docstring": "Check if leaf points are too large\n\nArguments:\nneuron(Neuron): The neuron object to test\nmultiple_of_mean(float): how many times larger the final radius\nhas to be compared to the mean of the final points\nfinal_point_count(int): how many points to include in the mean\n\nReturns:\nCheckResult with result list of ids of bad sections\n\nNote:\nA fat end is defined as a leaf segment whose last point is larger\nby a factor of `multiple_of_mean` than the mean of the points in\n`final_point_count`", "source": "codesearchnet"} {"code": "def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]:\n \n return cast(T, self._val) if self._is_ok else op(cast(E, self._val))", "docstring": "Returns the sucess value in the :class:`Result` or computes a default\nfrom the error value.\n\nArgs:\nop: The function to computes default with.\n\nReturns:\nThe success value in the :class:`Result` if it is\na :meth:`Result.Ok` value, otherwise ``op(E)``.\n\nExamples:\n>>> Ok(1).unwrap_or_else(lambda e: e * 10)\n1\n>>> Err(1).unwrap_or_else(lambda e: e * 10)\n10", "source": "juraj-google-style"} {"code": "def get_record(self, name, record_id):\n \n if name in self._cache:\n if record_id in self._cache[name]:\n return self._cache[name][record_id]", "docstring": "Retrieve a record with a given type name and record id.\n\nArgs:\nname (string): The name which the record is stored under.\nrecord_id (int): The id of the record requested.\n\nReturns:\n:class:`cinder_data.model.CinderModel`: The cached model.", "source": "juraj-google-style"} {"code": "def _make_tensor_slice_spec(slice_spec, use_constant=True):\n\n def make_piece_scalar(piece):\n if isinstance(piece, int):\n scalar = constant_op.constant(piece)\n if use_constant:\n return scalar\n else:\n return array_ops.placeholder_with_default(scalar, [])\n elif isinstance(piece, slice):\n return slice(make_piece_scalar(piece.start), make_piece_scalar(piece.stop), make_piece_scalar(piece.step))\n else:\n return piece\n if isinstance(slice_spec, tuple):\n return tuple((make_piece_scalar(piece) for piece in slice_spec))\n else:\n return make_piece_scalar(slice_spec)", "docstring": "Wraps all integers in an extended slice spec w/ a tensor.\n\nThis function is used to help test slicing when the slice spec contains\ntensors, rather than integers.\n\nArgs:\nslice_spec: The extended slice spec.\nuse_constant: If true, then wrap each integer with a tf.constant. If false,\nthen wrap each integer with a tf.placeholder.\n\nReturns:\nA copy of slice_spec, but with each integer i replaced with tf.constant(i).", "source": "github-repos"} {"code": "def list_folder(cls, session, mailbox, folder):\n \n return cls(\n '/mailboxes/%d/folders/%s/conversations.json' % (\n mailbox.id, folder.id,\n ),\n session=session,\n )", "docstring": "Return conversations in a specific folder of a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox (helpscout.models.Mailbox): Mailbox that folder is in.\nfolder (helpscout.models.Folder): Folder to list.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Conversation):\nConversations iterator.", "source": "juraj-google-style"} {"code": "def leapfrog_step(leapfrog_step_state: LeapFrogStepState, step_size: FloatTensor, target_log_prob_fn: PotentialFn, kinetic_energy_fn: PotentialFn) -> Tuple[(LeapFrogStepState, LeapFrogStepExtras)]:\n state = leapfrog_step_state.state\n state_grads = leapfrog_step_state.state_grads\n momentum = leapfrog_step_state.momentum\n step_size = maybe_broadcast_structure(step_size, state)\n state = tf.nest.map_structure(tf.convert_to_tensor, state)\n momentum = tf.nest.map_structure(tf.convert_to_tensor, momentum)\n state = tf.nest.map_structure(tf.convert_to_tensor, state)\n if (state_grads is None):\n (_, _, state_grads) = call_and_grads(target_log_prob_fn, state)\n else:\n state_grads = tf.nest.map_structure(tf.convert_to_tensor, state_grads)\n momentum = tf.nest.map_structure((lambda m, sg, s: (m + ((0.5 * sg) * s))), momentum, state_grads, step_size)\n (kinetic_energy, kinetic_energy_extra, momentum_grads) = call_and_grads(kinetic_energy_fn, momentum)\n state = tf.nest.map_structure((lambda x, mg, s: (x + (mg * s))), state, momentum_grads, step_size)\n (target_log_prob, state_extra, state_grads) = call_and_grads(target_log_prob_fn, state)\n momentum = tf.nest.map_structure((lambda m, sg, s: (m + ((0.5 * sg) * s))), momentum, state_grads, step_size)\n return (LeapFrogStepState(state, state_grads, momentum), LeapFrogStepExtras(target_log_prob, state_extra, kinetic_energy, kinetic_energy_extra))", "docstring": "Leapfrog `TransitionOperator`.\n\nArgs:\nleapfrog_step_state: LeapFrogStepState.\nstep_size: Step size, structure broadcastable to the `target_log_prob_fn`\nstate.\ntarget_log_prob_fn: Target log prob fn.\nkinetic_energy_fn: Kinetic energy fn.\n\nReturns:\nleapfrog_step_state: LeapFrogStepState.\nleapfrog_step_extras: LeapFrogStepExtras.", "source": "codesearchnet"} {"code": "def remove_team_member(self, account_id=None, email_address=None):\n return self._add_remove_team_member(self.TEAM_REMOVE_MEMBER_URL, email_address, account_id)", "docstring": "Remove a user from your Team\n\nArgs:\n\naccount_id (str): The id of the account of the user to remove from your team.\n\nemail_address (str): The email address of the account to remove from your team. The account id prevails if both account_id and email_address are provided.\n\nReturns:\nA Team object", "source": "codesearchnet"} {"code": "def find_function(self, context, funname):\n \n\n if funname in self.builtins:\n return self.builtins[funname]\n\n func = None\n if isinstance(context, dict):\n if funname in context:\n func = context[funname]\n\n \n if isinstance(func, str):\n func = self._deferred_add(func)\n context[funname] = func\n elif hasattr(context, funname):\n func = getattr(context, funname)\n\n if func is None:\n raise NotFoundError(\"Function not found\", function=funname)\n\n return func", "docstring": "Find a function in the given context by name.\n\nThis function will first search the list of builtins and if the\ndesired function is not a builtin, it will continue to search\nthe given context.\n\nArgs:\ncontext (object): A dict or class that is a typedargs context\nfunname (str): The name of the function to find\n\nReturns:\ncallable: The found function.", "source": "juraj-google-style"} {"code": "def __init__(self, source_shape, target_shape, layer_broadcasters, dtype=None):\n if not isinstance(source_shape, DynamicRaggedShape):\n raise TypeError('source_shape is not a DynamicRaggedShape')\n if not isinstance(target_shape, DynamicRaggedShape):\n raise TypeError('target_shape is not a DynamicRaggedShape')\n if not isinstance(layer_broadcasters, list):\n raise TypeError('layer_broadcasters not a list: ' + str(layer_broadcasters))\n for bc in layer_broadcasters:\n if not isinstance(bc, _LayerBroadcaster):\n raise TypeError('Not a LayerBroadcaster: ' + str(bc))\n dtype = _find_dtype(source_shape, dtype)\n dtype = _find_dtype(target_shape, dtype)\n dtype = _find_dtype_iterable(layer_broadcasters, dtype)\n dtype = _find_dtype(dtypes.int64, dtype)\n self._source_shape = source_shape.with_dtype(dtype)\n self._target_shape = target_shape.with_dtype(dtype)\n self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters]", "docstring": "Create a broadcaster.\n\nDo not call directly.\nThe source_shape, target_shape, and layer_broadcasters are converted\nto have the same dtype.\n\nNote: source_shape.rank and target_shape.rank must be known.\nArgs:\nsource_shape: the source DynamicRaggedShape\ntarget_shape: the target DynamicRaggedShape\nlayer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank.\ndtype: the preferred dtype of the broadcaster.\n\nRaises:\nTypeError: if the input types don't match.", "source": "github-repos"} {"code": "def input_list_parser(infile_list):\n \n\n final_list_of_files = []\n\n for x in infile_list:\n\n \n if op.isdir(x):\n os.chdir(x)\n final_list_of_files.extend(glob.glob('*'))\n\n \n if op.isfile(x):\n final_list_of_files.append(x)\n\n return final_list_of_files", "docstring": "Always return a list of files with varying input.\n\n>>> input_list_parser(['/path/to/folder/'])\n['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt']\n\n>>> input_list_parser(['/path/to/file.txt'])\n['/path/to/file.txt']\n\n>>> input_list_parser(['file1.txt'])\n['file1.txt']\n\nArgs:\ninfile_list: List of arguments\n\nReturns:\nlist: Standardized list of files", "source": "juraj-google-style"} {"code": "def add_listener_policy(self, json_data):\n env = boto3.session.Session(profile_name=self.env, region_name=self.region)\n elbclient = env.client('elb')\n stickiness = {}\n elb_settings = self.properties['elb']\n if elb_settings.get('ports'):\n ports = elb_settings['ports']\n for listener in ports:\n if listener.get('stickiness'):\n stickiness = self.add_stickiness()\n LOG.info('Stickiness Found: %s', stickiness)\n break\n for job in json.loads(json_data)['job']:\n for listener in job['listeners']:\n policies = []\n ext_port = listener['externalPort']\n if listener['listenerPolicies']:\n policies.extend(listener['listenerPolicies'])\n if stickiness.get(ext_port):\n policies.append(stickiness.get(ext_port))\n if policies:\n LOG.info('Adding listener policies: %s', policies)\n elbclient.set_load_balancer_policies_of_listener(LoadBalancerName=self.app, LoadBalancerPort=ext_port, PolicyNames=policies)", "docstring": "Attaches listerner policies to an ELB\n\nArgs:\njson_data (json): return data from ELB upsert", "source": "codesearchnet"} {"code": "def make_body(self, resp, params, meta, content):\n response = {'meta': meta, 'content': content}\n resp.content_type = 'application/json'\n resp.body = json.dumps(response, indent=((params['indent'] or None) if ('indent' in params) else None))", "docstring": "Construct response body in ``resp`` object using JSON serialization.\n\nArgs:\nresp (falcon.Response): response object where to include\nserialized body\nparams (dict): dictionary of parsed parameters\nmeta (dict): dictionary of metadata to be included in 'meta'\nsection of response\ncontent (dict): dictionary of response content (resource\nrepresentation) to be included in 'content' section of response\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def get(self, key, **ctx_options):\n options = _make_ctx_options(ctx_options)\n use_cache = self._use_cache(key, options)\n if use_cache:\n self._load_from_cache_if_available(key)\n use_datastore = self._use_datastore(key, options)\n if (use_datastore and isinstance(self._conn, datastore_rpc.TransactionalConnection)):\n use_memcache = False\n else:\n use_memcache = self._use_memcache(key, options)\n ns = key.namespace()\n memcache_deadline = None\n if use_memcache:\n mkey = (self._memcache_prefix + key.urlsafe())\n memcache_deadline = self._get_memcache_deadline(options)\n mvalue = (yield self.memcache_get(mkey, for_cas=use_datastore, namespace=ns, use_cache=True, deadline=memcache_deadline))\n if use_cache:\n self._load_from_cache_if_available(key)\n if (mvalue not in (_LOCKED, None)):\n cls = model.Model._lookup_model(key.kind(), self._conn.adapter.default_model)\n pb = entity_pb.EntityProto()\n try:\n pb.MergePartialFromString(mvalue)\n except ProtocolBuffer.ProtocolBufferDecodeError:\n logging.warning(('Corrupt memcache entry found with key %s and namespace %s' % (mkey, ns)))\n mvalue = None\n else:\n entity = cls._from_pb(pb)\n entity._key = key\n if use_cache:\n self._cache[key] = entity\n raise tasklets.Return(entity)\n if ((mvalue is None) and use_datastore):\n (yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns, use_cache=True, deadline=memcache_deadline))\n (yield self.memcache_gets(mkey, namespace=ns, use_cache=True, deadline=memcache_deadline))\n if (not use_datastore):\n raise tasklets.Return(None)\n if use_cache:\n entity = (yield self._get_batcher.add_once(key, options))\n else:\n entity = (yield self._get_batcher.add(key, options))\n if (entity is not None):\n if (use_memcache and (mvalue != _LOCKED)):\n pbs = entity._to_pb(set_key=False).SerializePartialToString()\n if (len(pbs) <= memcache.MAX_VALUE_SIZE):\n timeout = self._get_memcache_timeout(key, options)\n (yield self.memcache_cas(mkey, pbs, time=timeout, namespace=ns, deadline=memcache_deadline))\n if use_cache:\n self._cache[key] = entity\n raise tasklets.Return(entity)", "docstring": "Return a Model instance given the entity key.\n\nIt will use the context cache if the cache policy for the given\nkey is enabled.\n\nArgs:\nkey: Key instance.\n**ctx_options: Context options.\n\nReturns:\nA Model instance if the key exists in the datastore; None otherwise.", "source": "codesearchnet"} {"code": "def from_epsg_code(code):\n \n \n code = str(code)\n proj4 = utils.crscode_to_string(\"epsg\", code, \"proj4\")\n crs = from_proj4(proj4)\n return crs", "docstring": "Load crs object from epsg code, via spatialreference.org.\nParses based on the proj4 representation.\n\nArguments:\n\n- *code*: The EPSG code as an integer.\n\nReturns:\n\n- A CS instance of the indicated type.", "source": "juraj-google-style"} {"code": "def from_value(value: Any, context: trace.TracingContext=None) -> trace.TraceType:\n if context is None:\n context = InternalTracingContext()\n if context.is_legacy_signature and isinstance(value, trace.TraceType):\n return value\n elif isinstance(value, trace.SupportsTracingProtocol):\n generated_type = value.__tf_tracing_type__(context)\n if not isinstance(generated_type, trace.TraceType):\n raise TypeError('Expected an instance of TraceType for Tracing Protocol call to ' + str(value) + ' but got ' + str(generated_type))\n return generated_type\n if isinstance(value, weakref.ref):\n raise TypeError(f'weakref input {value} not supported for tf.function.')\n if hasattr(value, '__wrapped__'):\n return from_value(value.__wrapped__, context)\n if isinstance(value, list):\n return default_types.List(*(from_value(c, context) for c in value))\n if isinstance(value, tuple):\n if util.is_namedtuple(value):\n named_tuple_type = type(value)\n return default_types.NamedTuple.from_type_and_attributes(named_tuple_type, tuple((from_value(c, context) for c in value)))\n else:\n return default_types.Tuple(*(from_value(c, context) for c in value))\n if isinstance(value, collections.abc.Mapping):\n mapping_type = type(value)\n return default_types.Dict({k: from_value(value[k], context) for k in value}, mapping_type)\n if util.is_attrs(value):\n return default_types.Attrs.from_type_and_attributes(type(value), tuple((from_value(getattr(value, a.name), context) for a in value.__attrs_attrs__)))\n if util.is_np_ndarray(value):\n ndarray = value.__array__()\n return default_types.TENSOR(ndarray.shape, ndarray.dtype)\n if isinstance(value, custom_nest_protocol.CustomNestProtocol):\n metadata, components = value.__tf_flatten__()\n return custom_nest_trace_type.CustomNestTraceType(type(value), metadata, tuple((from_value(c, context) for c in components)))\n try:\n ref = weakref.ref(value)\n if ref is None:\n raise TypeError(f'Deleted objects are not valid tf.function arguments, Got {value!r}')\n else:\n return default_types.Weakref(ref)\n except TypeError:\n try:\n return default_types.Literal(value)\n except:\n raise TypeError(f'Could not generate a generic TraceType for {value!r}.Please verify that it is immutable/hashable. Otherwise, consider implementing the Tracing Protocol for it.')", "docstring": "Returns a TraceType corresponding to the value based on the context.\n\nArgs:\nvalue: The value to generate a TraceType for.\ncontext: The TracingContext to be shared during protocol calls.\n\nReturns:\nA TraceType object representing the given value.", "source": "github-repos"} {"code": "def get_input_params(distribution_strategy, num_samples, steps, batch_size, mode=None):\n use_per_replica_batch = not dist_utils.global_batch_size_supported(distribution_strategy)\n if context.executing_eagerly():\n allow_partial_batch = mode != ModeKeys.TRAIN or not backend.is_tpu_strategy(distribution_strategy)\n else:\n allow_partial_batch = mode == ModeKeys.TRAIN or ((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST) and backend.is_tpu_strategy(distribution_strategy))\n if steps is None:\n if batch_size is None:\n global_batch_size = min(num_samples, 32)\n else:\n global_batch_size = batch_size\n if use_per_replica_batch:\n global_batch_size *= distribution_strategy.num_replicas_in_sync\n if allow_partial_batch:\n steps = np.ceil(num_samples / global_batch_size).astype(int)\n else:\n if num_samples % global_batch_size:\n raise ValueError('The number of samples %s is not divisible by batch size %s.' % (num_samples, global_batch_size))\n steps = num_samples \n elif batch_size is None:\n if num_samples % steps:\n raise ValueError('The number of samples %s is not divisible by steps %s. Please change the number of steps to a value that can consume all the samples' % (num_samples, steps))\n global_batch_size = num_samples \n else:\n global_batch_size = batch_size\n if use_per_replica_batch:\n global_batch_size *= distribution_strategy.num_replicas_in_sync\n min_num_samples = global_batch_size * steps\n if allow_partial_batch:\n min_num_samples = global_batch_size * (steps - 1) + 1 if steps > 1 else 0\n if num_samples < min_num_samples:\n raise ValueError('Number of samples %s is less than samples required for specified batch_size %s and steps %s' % (num_samples, global_batch_size, steps))\n if use_per_replica_batch:\n if global_batch_size % distribution_strategy.num_replicas_in_sync:\n raise ValueError('The batch size (%s) could not be sharded evenly across the sync replicas (%s) in the distribution strategy.' % (global_batch_size, distribution_strategy.num_replicas_in_sync))\n batch_size = global_batch_size \n else:\n batch_size = global_batch_size\n return (steps, batch_size)", "docstring": "Calculate the number of batches and steps/steps_per_epoch.\n\nArgs:\ndistribution_strategy: The DistributionStrategy used to compile the model.\nnum_samples: The number of samples from which we determine the batch size\nand steps.\nsteps: The specified number of steps.\nbatch_size: The specified batch_size.\nmode: ModeKey representing whether input will be used for training,\nevaluation, or prediction. This is used to relax the constraints on\nconsuming all the training samples to keep compatibility till we support\npartial batches. If none, then partial batches are not allowed.\n\nReturns:\nsteps: The steps or steps_per_epoch argument depending on if a user is\ncalling `fit`, `evaluate` or `predict`. If the is_training flag is set\nwe don't require the number of samples to be used completely.\nbatch_size: The batch size to be used in model iterations.\n\nRaises:\nValueError: If the number of batches or steps evaluates to 0.", "source": "github-repos"} {"code": "def aliased_as(self, name):\n stream = copy.copy(self)\n stream._alias = name\n return stream", "docstring": "Create an alias of this stream.\n\nReturns an alias of this stream with name `name`.\nWhen invocation of an SPL operator requires an\n:py:class:`~streamsx.spl.op.Expression` against\nan input port this can be used to ensure expression\nmatches the input port alias regardless of the name\nof the actual stream.\n\nExample use where the filter expression for a ``Filter`` SPL operator\nuses ``IN`` to access input tuple attribute ``seq``::\n\ns = ...\ns = s.aliased_as('IN')\n\nparams = {'filter': op.Expression.expression('IN.seq % 4ul == 0ul')}\nf = op.Map('spl.relational::Filter', stream, params = params)\n\nArgs:\nname(str): Name for returned stream.\n\nReturns:\nStream: Alias of this stream with ``name`` equal to `name`.\n\n.. versionadded:: 1.9", "source": "codesearchnet"} {"code": "def clip_by_value(x, min, max):\n from .function_bases import maximum2 as maximum2_base\n from .function_bases import minimum2 as minimum2_base\n return minimum2_base(maximum2_base(x, min), max)", "docstring": "r\"\"\"Clip inputs by values.\n\n.. math::\n\ny = \\begin{cases}\nmax & (x > max) \\\\\nx & (otherwise) \\\\\nmin & (x < min)\n\\end{cases}.\n\nArgs:\nx (Variable): An input variable.\nmin (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s.\nmax (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s\n\nReturns:\n~nnabla.Variable: N-D array.", "source": "codesearchnet"} {"code": "def read_uint32(self, little_endian=True):\n \n if little_endian:\n endian = \"<\"\n else:\n endian = \">\"\n return self.unpack('%sI' % endian, 4)", "docstring": "Read 4 bytes as an unsigned integer value from the stream.\n\nArgs:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint:", "source": "juraj-google-style"} {"code": "def StringEscape(self, string, match, **_):\n \n precondition.AssertType(string, Text)\n\n \n \n \n if self.current_expression.operator == \"regexp\":\n self.string += compatibility.UnescapeString(string)\n elif match.group(1) in \"\\\\'\\\"rnbt\":\n self.string += compatibility.UnescapeString(string)\n else:\n raise ParseError(\"Invalid escape character %s.\" % string)", "docstring": "Escape backslashes found inside a string quote.\n\nBackslashes followed by anything other than [\\'\"rnbt] will raise an Error.\n\nArgs:\nstring: The string that matched.\nmatch: The match object (m.group(1) is the escaped code)\n\nRaises:\nParseError: For strings other than those used to define a regexp, raise an\nerror if the escaped string is not one of [\\'\"rnbt].", "source": "juraj-google-style"} {"code": "def _self_suppression(iou, _, iou_sum, iou_threshold):\n batch_size = array_ops.shape(iou)[0]\n can_suppress_others = math_ops.cast(array_ops.reshape(math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]), iou.dtype)\n iou_after_suppression = array_ops.reshape(math_ops.cast(math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold, iou.dtype), [batch_size, -1, 1]) * iou\n iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2])\n return [iou_after_suppression, math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new, iou_threshold]", "docstring": "Suppress boxes in the same tile.\n\nCompute boxes that cannot be suppressed by others (i.e.,\ncan_suppress_others), and then use them to suppress boxes in the same tile.\n\nArgs:\niou: a tensor of shape [batch_size, num_boxes_with_padding] representing\nintersection over union.\niou_sum: a scalar tensor.\niou_threshold: a scalar tensor.\n\nReturns:\niou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding].\niou_diff: a scalar tensor representing whether any box is supressed in\nthis step.\niou_sum_new: a scalar tensor of shape [batch_size] that represents\nthe iou sum after suppression.\niou_threshold: a scalar tensor.", "source": "github-repos"} {"code": "def path2route(path: SchemaPath) -> SchemaRoute:\n \n if path == \"/\" or path == \"\":\n return []\n nlist = path.split(\"/\")\n prevns = None\n res = []\n for n in (nlist[1:] if path[0] == \"/\" else nlist):\n p, s, loc = n.partition(\":\")\n if s:\n if p == prevns:\n raise InvalidSchemaPath(path)\n res.append((loc, p))\n prevns = p\n elif prevns:\n res.append((p, prevns))\n else:\n raise InvalidSchemaPath(path)\n return res", "docstring": "Translate a schema/data path to a schema/data route.\n\nArgs:\npath: Schema path.\n\nRaises:\nInvalidSchemaPath: Invalid path.", "source": "juraj-google-style"} {"code": "def __init__(self, description=None, default=None, required=False):\n \n self.__doc__ = description\n self._default = default\n self._value = default\n self._required = bool(required)", "docstring": "Initialize the option with some basic metadata.\n\nArgs:\ndescription (str, optional): A human readable description of what\nthe option represents.\ndefault (optional): The default value to use if unset.\nrequired (bool, optional): Whether or not the value must be set.", "source": "juraj-google-style"} {"code": "def raiseError(cls, message):\n \n error_message = \"[error] %s\" % message\n if cls.__raise_exception__:\n raise Exception(error_message)\n\n cls.colorprint(error_message, Fore.RED)\n sys.exit(1)", "docstring": "Print an error message\n\nArgs:\nmessage: the message to print", "source": "juraj-google-style"} {"code": "def setup_remoteckan(self, remoteckan=None, **kwargs):\n if (remoteckan is None):\n self._remoteckan = self.create_remoteckan(self.get_hdx_site_url(), full_agent=self.get_user_agent(), **kwargs)\n else:\n self._remoteckan = remoteckan", "docstring": "Set up remote CKAN from provided CKAN or by creating from configuration\n\nArgs:\nremoteckan (Optional[ckanapi.RemoteCKAN]): CKAN instance. Defaults to setting one up from configuration.\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def get_create_batch_env_fun(batch_env_fn, time_limit):\n \n\n def create_env_fun(game_name=None, sticky_actions=None):\n del game_name, sticky_actions\n batch_env = batch_env_fn(in_graph=False)\n batch_env = ResizeBatchObservation(batch_env) \n batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit)\n return batch_env\n\n return create_env_fun", "docstring": "Factory for dopamine environment initialization function.\n\nArgs:\nbatch_env_fn: function(in_graph: bool) -> batch environment.\ntime_limit: time steps limit for environment.\n\nReturns:\nfunction (with optional, unused parameters) initializing environment.", "source": "juraj-google-style"} {"code": "async def send(self, metric):\n \n message = json.dumps(metric).encode('utf-8')\n await self.loop.create_datagram_endpoint(\n lambda: UDPClientProtocol(message),\n remote_addr=(self.ip, self.port))", "docstring": "Transform metric to JSON bytestring and send to server.\n\nArgs:\nmetric (dict): Complete metric to send as JSON.", "source": "juraj-google-style"} {"code": "def ExpandUsersVariablePath(cls, path, path_separator, user_accounts):\n \n path_segments = path.split(path_separator)\n return cls._ExpandUsersVariablePathSegments(\n path_segments, path_separator, user_accounts)", "docstring": "Expands a path with a users variable, e.g. %%users.homedir%%.\n\nArgs:\npath (str): path with users variable.\npath_separator (str): path segment separator.\nuser_accounts (list[UserAccountArtifact]): user accounts.\n\nReturns:\nlist[str]: paths for which the users variables have been expanded.", "source": "juraj-google-style"} {"code": "def decompose_space(H, A):\n \n return OperatorTrace.create(\n OperatorTrace.create(A, over_space=H.operands[-1]),\n over_space=ProductSpace.create(*H.operands[:-1]))", "docstring": "Simplifies OperatorTrace expressions over tensor-product spaces by\nturning it into iterated partial traces.\n\nArgs:\nH (ProductSpace): The full space.\nA (Operator):\n\nReturns:\nOperator: Iterative partial trace expression", "source": "juraj-google-style"} {"code": "def selection_error_control(self, form_info):\n (keys, names) = self.return_selected_form_items(form_info['ChannelList'])\n chosen_channels_number = len(keys)\n if (form_info['new_channel'] and (chosen_channels_number < 2)):\n return (False, _(u'You should choose at least two channel to merge operation at a new channel.'))\n elif (form_info['existing_channel'] and (chosen_channels_number == 0)):\n return (False, _(u'You should choose at least one channel to merge operation with existing channel.'))\n elif (form_info['find_chosen_channel'] and (chosen_channels_number != 1)):\n return (False, _(u'You should choose one channel for split operation.'))\n return (True, None)", "docstring": "It controls the selection from the form according\nto the operations, and returns an error message\nif it does not comply with the rules.\n\nArgs:\nform_info: Channel or subscriber form from the user\n\nReturns: True or False\nerror message", "source": "codesearchnet"} {"code": "def require(self, entity_type, attribute_name=None):\n if (not attribute_name):\n attribute_name = entity_type\n self.requires += [(entity_type, attribute_name)]\n return self", "docstring": "The intent parser should require an entity of the provided type.\n\nArgs:\nentity_type(str): an entity type\nattribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.\n\nReturns:\nself: to continue modifications.", "source": "codesearchnet"} {"code": "def get_ilo_sso_url(self, ip=None):\n \n uri = \"{}/iloSsoUrl\".format(self.data[\"uri\"])\n\n if ip:\n uri = \"{}?ip={}\".format(uri, ip)\n\n return self._helper.do_get(uri)", "docstring": "Retrieves the URL to launch a Single Sign-On (SSO) session for the iLO web interface. If the server hardware is\nunsupported, the resulting URL will not use SSO and the iLO web interface will prompt for credentials.\nThis is not supported on G7/iLO3 or earlier servers.\n\nArgs:\nip: IP address or host name of the server's iLO management processor\n\nReturns:\nURL", "source": "juraj-google-style"} {"code": "def get_members(cls, session, team_or_id):\n \n if isinstance(team_or_id, Person):\n team_or_id = team_or_id.id\n return cls(\n '/teams/%d/members.json' % team_or_id,\n session=session,\n out_type=User,\n )", "docstring": "List the members for the team.\n\nArgs:\nteam_or_id (helpscout.models.Person or int): Team or the ID of\nthe team to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.Users): Users\niterator.", "source": "juraj-google-style"} {"code": "def from_prev_calc(cls, prev_calc_dir, mode='gap', reciprocal_density=50, copy_chgcar=True, **kwargs):\n (vasprun, outcar) = get_vasprun_outcar(prev_calc_dir)\n prev_structure = get_structure_from_prev_run(vasprun, outcar, sym_prec=0)\n added_kpoints = []\n if (mode.lower() == 'gap'):\n bs = vasprun.get_band_structure()\n (vbm, cbm) = (bs.get_vbm()['kpoint'], bs.get_cbm()['kpoint'])\n if vbm:\n added_kpoints.append(vbm.frac_coords)\n if cbm:\n added_kpoints.append(cbm.frac_coords)\n files_to_transfer = {}\n if copy_chgcar:\n chgcars = sorted(glob.glob(str((Path(prev_calc_dir) / 'CHGCAR*'))))\n if chgcars:\n files_to_transfer['CHGCAR'] = str(chgcars[(- 1)])\n return cls(structure=prev_structure, added_kpoints=added_kpoints, reciprocal_density=reciprocal_density, mode=mode, files_to_transfer=files_to_transfer, **kwargs)", "docstring": "Generate a set of Vasp input files for HSE calculations from a\ndirectory of previous Vasp run. if mode==\"gap\", it explicitly adds VBM\nand CBM of the prev run to the k-point list of this run.\n\nArgs:\nprev_calc_dir (str): Directory containing the outputs\n(vasprun.xml and OUTCAR) of previous vasp run.\nmode (str): Either \"uniform\", \"gap\" or \"line\"\nreciprocal_density (int): density of k-mesh\ncopy_chgcar (bool): whether to copy CHGCAR of previous run\n\\\\*\\\\*kwargs: All kwargs supported by MPHSEBSStaticSet,\nother than prev_structure which is determined from the previous\ncalc dir.", "source": "codesearchnet"} {"code": "def _HashBlock(self, block, start, end):\n for finger in self.fingers:\n expected_range = finger.CurrentRange()\n if (expected_range is None):\n continue\n if ((start > expected_range.start) or ((start == expected_range.start) and (end > expected_range.end)) or ((start < expected_range.start) and (end > expected_range.start))):\n raise RuntimeError('Cutting across fingers.')\n if (start == expected_range.start):\n finger.HashBlock(block)", "docstring": "_HashBlock feeds data blocks into the hashers of fingers.\n\nThis function must be called before adjusting fingers for next\ninterval, otherwise the lack of remaining ranges will cause the\nblock not to be hashed for a specific finger.\n\nStart and end are used to validate the expected ranges, to catch\nunexpected use of that logic.\n\nArgs:\nblock: The data block.\nstart: Beginning offset of this block.\nend: Offset of the next byte after the block.\n\nRaises:\nRuntimeError: If the provided and expected ranges don't match.", "source": "codesearchnet"} {"code": "def testConcreteFunctionFlatSignatureError(self, conc_args=(), conc_kwargs=None, call_args=(), call_kwargs=None, error='.*', exception=TypeError):\n conc_args = conc_args() if callable(conc_args) else conc_args\n conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}\n call_args = call_args() if callable(call_args) else call_args\n call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}\n self.assertIsInstance(conc_args, tuple)\n self.assertIsInstance(call_args, tuple)\n self.assertIsInstance(conc_kwargs, dict)\n self.assertIsInstance(call_kwargs, dict)\n\n @polymorphic_function.function\n def func(x, y=5, *varargs, **kwargs):\n del y, varargs, kwargs\n return x\n conc = func.get_concrete_function(*conc_args, **conc_kwargs)\n with self.assertRaisesRegex(exception, error):\n self.evaluate(conc._call_with_flat_signature(call_args, call_kwargs))", "docstring": "Tests for errors in the flat signature.\n\nArgs:\nconc_args: Positional arguments used for get_concrete_function.\nconc_kwargs: Keyword arguments used for get_concrete_function.\ncall_args: Positional arguments used to call the function.\ncall_kwargs: Keyword arguments used to call the function.\nerror: Expected exception message.\nexception: Expected exception type.", "source": "github-repos"} {"code": "def replace_with_vptq_linear(model, quantization_config=None, modules_to_not_convert=None, current_key_name=None, has_been_replaced=False):\n modules_to_not_convert = ['lm_head'] if not modules_to_not_convert else modules_to_not_convert\n for name, module in model.named_children():\n if current_key_name is None:\n current_key_name = []\n current_key_name.append(name)\n layer_name = '.'.join(current_key_name)\n shared_layer_config = quantization_config.shared_layer_config\n config_for_layers = quantization_config.config_for_layers\n if isinstance(module, nn.Linear) and layer_name not in modules_to_not_convert and (layer_name in config_for_layers or current_key_name[-1] in shared_layer_config):\n layer_params = config_for_layers.get(layer_name, None) or shared_layer_config.get(current_key_name[-1], None)\n with init_empty_weights():\n in_features = module.in_features\n out_features = module.out_features\n model._modules[name] = VQuantLinear(in_features, out_features, vector_lens=layer_params['vector_lens'], num_centroids=layer_params['num_centroids'], num_res_centroids=layer_params['num_res_centroids'], group_num=layer_params['group_num'], group_size=layer_params['group_size'], outlier_size=layer_params['outlier_size'], indices_as_float=layer_params['indices_as_float'], enable_norm=layer_params['enable_norm'], enable_perm=layer_params['enable_perm'], is_indice_packed=True, enable_proxy_error=False, bias=module.bias is not None)\n has_been_replaced = True\n model._modules[name].requires_grad_(False)\n if len(list(module.children())) > 0:\n _, has_been_replaced = replace_with_vptq_linear(module, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, has_been_replaced=has_been_replaced)\n current_key_name.pop(-1)\n return (model, has_been_replaced)", "docstring": "Public method that recursively replaces the Linear layers of the given model with VPTQ quantized layers.\n`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the\nconversion has been successful or not.\n\nArgs:\nmodel (`torch.nn.Module`):\nThe model to convert, can be any `torch.nn.Module` instance.\nquantization_config (`VptqConfig`):\nThe quantization config object that contains the quantization parameters.\nmodules_to_not_convert (`List[`str`]`, *optional*, defaults to `[\"lm_head\"]`):\nNames of the modules to not convert in `VQuantLinear`. In practice we keep the `lm_head` in full precision\nfor numerical stability reasons.\ncurrent_key_name (`list`, *optional*):\nA list that contains the current key name. This is used for recursion and should not be passed by the user.\nhas_been_replaced (`bool`, *optional*):\nA boolean that indicates if the conversion has been successful or not. This is used for recursion and\nshould not be passed by the user.", "source": "github-repos"} {"code": "def get_object(cls, api_token, droplet_id):\n droplet = cls(token=api_token, id=droplet_id)\n droplet.load()\n return droplet", "docstring": "Class method that will return a Droplet object by ID.\n\nArgs:\napi_token (str): token\ndroplet_id (int): droplet id", "source": "codesearchnet"} {"code": "def is_connected(self):\n if (self._client is not None):\n try:\n self._client.server_info()\n except ConnectionFailure:\n return False\n return True\n else:\n return False", "docstring": "Returns the connection status of the data store.\n\nReturns:\nbool: ``True`` if the data store is connected to the MongoDB server.", "source": "codesearchnet"} {"code": "def call_for_each_tower(\n towers, func, devices=None, use_vs=None):\n \n\n ret = []\n if devices is not None:\n assert len(devices) == len(towers)\n if use_vs is not None:\n assert len(use_vs) == len(towers)\n\n tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]\n\n for idx, t in enumerate(towers):\n device = devices[idx] if devices is not None else '/gpu:{}'.format(t)\n usevs = use_vs[idx] if use_vs is not None else False\n reuse = not usevs and idx > 0\n with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(\n tower_names[idx],\n vs_name=tower_names[idx] if usevs else '',\n index=idx, total=len(towers)):\n if len(str(device)) < 10: \n logger.info(\"Building graph for training tower {} on device {} ...\".format(idx, device))\n else:\n logger.info(\"Building graph for training tower {} ...\".format(idx))\n\n \n \n with override_to_local_variable(enable=usevs):\n ret.append(func())\n return ret", "docstring": "Run `func` on all GPUs (towers) and return the results.\n\nArgs:\ntowers (list[int]): a list of GPU id.\nfunc: a lambda to be called inside each tower\ndevices: a list of devices to be used. By default will use '/gpu:{tower}'\nuse_vs (list[bool]): list of use_vs to passed to TowerContext\n\nReturns:\nList of outputs of ``func``, evaluated on each tower.", "source": "juraj-google-style"} {"code": "def __init__(self, layers=None, name=None):\n super(functional.Functional, self).__init__(name=name, autocast=False)\n self.supports_masking = True\n self._compute_output_and_mask_jointly = True\n self._auto_track_sub_layers = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._input_dtype = None\n self._layer_call_argspecs = {}\n self._created_nodes = set()\n self._graph_initialized = False\n self._use_legacy_deferred_behavior = False\n if layers:\n if not isinstance(layers, (list, tuple)):\n layers = [layers]\n for layer in layers:\n self.add(layer)", "docstring": "Creates a `Sequential` model instance.\n\nArgs:\nlayers: Optional list of layers to add to the model.\nname: Optional name for the model.", "source": "github-repos"} {"code": "def has_neigh(tag_name, params=None, content=None, left=True):\n\n def has_neigh_closure(element):\n if ((not element.parent) or (not (element.isTag() and (not element.isEndTag())))):\n return False\n childs = element.parent.childs\n childs = filter((lambda x: ((x.isTag() and (not x.isEndTag())) or x.getContent().strip() or (x is element))), childs)\n if (len(childs) <= 1):\n return False\n ioe = childs.index(element)\n if (left and (ioe > 0)):\n return is_equal_tag(childs[(ioe - 1)], tag_name, params, content)\n if ((not left) and ((ioe + 1) < len(childs))):\n return is_equal_tag(childs[(ioe + 1)], tag_name, params, content)\n return False\n return has_neigh_closure", "docstring": "This function generates functions, which matches all tags with neighbours\ndefined by parameters.\n\nArgs:\ntag_name (str): Tag has to have neighbour with this tagname.\nparams (dict): Tag has to have neighbour with this parameters.\nparams (str): Tag has to have neighbour with this content.\nleft (bool, default True): Tag has to have neigbour on the left, or\nright (set to ``False``).\n\nReturns:\nbool: True for every matching tag.\n\nNote:\nThis function can be used as parameter for ``.find()`` method in\nHTMLElement.", "source": "codesearchnet"} {"code": "def inverse_stft_window_fn(frame_step, forward_window_fn=window_ops.hann_window, name=None):\n\n def inverse_stft_window_fn_inner(frame_length, dtype):\n \n with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):\n frame_step_ = ops.convert_to_tensor(frame_step, name='frame_step')\n frame_step_.shape.assert_has_rank(0)\n frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n frame_length.shape.assert_has_rank(0)\n forward_window = forward_window_fn(frame_length, dtype=dtype)\n denom = math_ops.square(forward_window)\n overlaps = -(-frame_length \n denom = array_ops.pad(denom, [(0, overlaps * frame_step_ - frame_length)])\n denom = array_ops.reshape(denom, [overlaps, frame_step_])\n denom = math_ops.reduce_sum(denom, 0, keepdims=True)\n denom = array_ops.tile(denom, [overlaps, 1])\n denom = array_ops.reshape(denom, [overlaps * frame_step_])\n return forward_window / denom[:frame_length]\n return inverse_stft_window_fn_inner", "docstring": "Generates a window function that can be used in `inverse_stft`.\n\nConstructs a window that is equal to the forward window with a further\npointwise amplitude correction. `inverse_stft_window_fn` is equivalent to\n`forward_window_fn` in the case where it would produce an exact inverse.\n\nSee examples in `inverse_stft` documentation for usage.\n\nArgs:\nframe_step: An integer scalar `Tensor`. The number of samples to step.\nforward_window_fn: window_fn used in the forward transform, `stft`.\nname: An optional name for the operation.\n\nReturns:\nA callable that takes a window length and a `dtype` keyword argument and\nreturns a `[window_length]` `Tensor` of samples in the provided datatype.\nThe returned window is suitable for reconstructing original waveform in\ninverse_stft.", "source": "github-repos"} {"code": "def _get_max_page(dom):\n div = dom.find('div', {'class': 'razeniKnihListovani'})\n if (not div):\n return 1\n links = div[0].find('a')\n max_page = filter((lambda x: (('href' in x.params) and ('pageindex=' in x.params['href']))), links)\n max_page = map((lambda x: x.params['href'].split('pageindex=')[(- 1)]), max_page)\n max_page = filter((lambda x: x.isdigit()), max_page)\n max_page = map((lambda x: int(x)), max_page)\n if (not max_page):\n return 1\n return max(max_page)", "docstring": "Try to guess how much pages are in book listing.\n\nArgs:\ndom (obj): HTMLElement container of the page with book list.\n\nReturns:\nint: Number of pages for given category.", "source": "codesearchnet"} {"code": "def mark_backward(output_tensor, used_node_names):\n \n op = output_tensor.op\n if op.name in used_node_names:\n return\n used_node_names.add(op.name)\n for input_tensor in op.inputs:\n mark_backward(input_tensor, used_node_names)\n for control_input_op in op.control_inputs:\n used_node_names.add(control_input_op.name)\n for input_tensor in control_input_op.inputs:\n mark_backward(input_tensor, used_node_names)", "docstring": "Function to propagate backwards in the graph and mark nodes as used.\n\nTraverses recursively through the graph from the end tensor, through the op\nthat generates the tensor, and then to the input tensors that feed the op.\nNodes encountered are stored in used_node_names.\n\nArgs:\noutput_tensor: A Tensor which we start the propagation.\nused_node_names: A list of strings, stores the name of nodes we've marked as\nvisited.", "source": "juraj-google-style"} {"code": "def get_plugin(self, identifier, cls=None):\n \n if ((cls is None or cls == 'provider')\n and identifier in self.available_providers):\n return self.available_providers[identifier]\n elif ((cls is None or cls == 'checker')\n and identifier in self.available_checkers):\n return self.available_checkers[identifier]\n return Config.load_local_plugin(identifier)", "docstring": "Return the plugin corresponding to the given identifier and type.\n\nArgs:\nidentifier (str): identifier of the plugin.\ncls (str): one of checker / provider.\n\nReturns:\nChecker/Provider: plugin class.", "source": "juraj-google-style"} {"code": "def _write_credentials_file(credentials_file, credentials):\n \n data = {'file_version': 2, 'credentials': {}}\n\n for key, credential in iteritems(credentials):\n credential_json = credential.to_json()\n encoded_credential = _helpers._from_bytes(base64.b64encode(\n _helpers._to_bytes(credential_json)))\n data['credentials'][key] = encoded_credential\n\n credentials_file.seek(0)\n json.dump(data, credentials_file)\n credentials_file.truncate()", "docstring": "Writes credentials to a file.\n\nRefer to :func:`_load_credentials_file` for the format.\n\nArgs:\ncredentials_file: An open file handle, must be read/write.\ncredentials: A dictionary mapping user-defined keys to an instance of\n:class:`oauth2client.client.Credentials`.", "source": "juraj-google-style"} {"code": "def __init__(self, primals, tangents):\n self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(False)\n self._recording = False\n primal_ids = set()\n for primal in nest.flatten(primals):\n if id(primal) in primal_ids:\n raise ValueError('Tensor {} was specified as a primal multiple times. This may indicate an error. If it was intended, please sum the corresponding tangents.')\n primal_ids.add(id(primal))\n self._watch(primals, tangents)", "docstring": "Specify tensors to watch and their Jacobian-vector products.\n\nMathematically, `tangents` is a vector right-multiplying the Jacobian matrix\n(a Jacobian-vector product) for the function computed while this accumulator\nis active. Since JVPs are computed in forward mode as the computation\nhappens, this vector must be supplied in advance.\n\nListing a single tensor multiple times in `primals` raises an\nexception. Excluding a tensor from `primals` is equivalent to watching it\nwith a tangent tensor of zeros.\n\nArgs:\nprimals: A tensor or nested structure of tensors to watch.\ntangents: A tensor or nested structure of tensors, with the same nesting\nstructure as `primals`, with each element being a vector with the same\nsize as the corresponding primal element.\n\nRaises:\nValueError: If the same tensor or variable is specified multiple times in\n`primals`.", "source": "github-repos"} {"code": "def pull(self, device_filename, dest_file=None, timeout_ms=None):\n \n should_return_data = dest_file is None\n if isinstance(dest_file, six.string_types):\n dest_file = open(dest_file, 'w')\n elif dest_file is None:\n dest_file = six.StringIO()\n self.filesync_service.recv(device_filename, dest_file,\n timeouts.PolledTimeout.from_millis(timeout_ms))\n if should_return_data:\n return dest_file.getvalue()", "docstring": "Pull file from device.\n\nArguments:\ndevice_filename: The filename on the device to pull.\ndest_file: If set, a filename or writable file-like object.\ntimeout_ms: Expected timeout for the pull.\n\nReturns:\nThe file data if dest_file is not set, None otherwise.", "source": "juraj-google-style"} {"code": "def load_fasta_file_as_dict_of_seqs(filename):\n \n\n results = {}\n records = load_fasta_file(filename)\n for r in records:\n results[r.id] = str(r.seq)\n\n return results", "docstring": "Load a FASTA file and return the sequences as a dict of {ID: sequence string}\n\nArgs:\nfilename (str): Path to the FASTA file to load\n\nReturns:\ndict: Dictionary of IDs to their sequence strings", "source": "juraj-google-style"} {"code": "def check(self, version):\n \n\n for disjunct in self._disjuncts:\n if self._check_insersection(version, disjunct):\n return True\n\n return False", "docstring": "Check that a version is inside this SemanticVersionRange\n\nArgs:\nversion (SemanticVersion): The version to check\n\nReturns:\nbool: True if the version is included in the range, False if not", "source": "juraj-google-style"} {"code": "def _clean_isbn(isbn):\n if isinstance(isbn, basestring):\n isbn = list(isbn.lower())\n isbn = filter((lambda x: (x.isdigit() or (x == 'x'))), isbn)\n return map((lambda x: (10 if (x == 'x') else int(x))), isbn)", "docstring": "Remove all non-digit and non \"x\" characters from given string.\n\nArgs:\nisbn (str): isbn string, which will be cleaned.\n\nReturns:\nlist: array of numbers (if \"x\" is found, it is converted to 10).", "source": "codesearchnet"} {"code": "def _register_callback(self, cb):\n \n if isinstance(cb, (list, tuple)):\n for x in cb:\n self._register_callback(x)\n return\n assert isinstance(cb, Callback), cb\n assert not isinstance(self._callbacks, Callbacks), \\\n \"Cannot register more callbacks after trainer was setup!\"\n if not self.is_chief and cb.chief_only:\n logger.warn(\"Callback {} is chief-only, skipped.\".format(str(cb)))\n return False\n else:\n self._callbacks.append(cb)\n return True", "docstring": "Register callbacks to the trainer.\nIt can only be called before :meth:`Trainer.train()`.\n\nArgs:\ncb (Callback or [Callback]): a callback or a list of callbacks\n\nReturns:\nsucceed or not", "source": "juraj-google-style"} {"code": "async def subscriptions(self, request):\n if (not self._accepting):\n return web.Response(status=503)\n web_sock = web.WebSocketResponse()\n (await web_sock.prepare(request))\n async for msg in web_sock:\n if (msg.type == aiohttp.WSMsgType.TEXT):\n (await self._handle_message(web_sock, msg.data))\n elif (msg.type == aiohttp.WSMsgType.ERROR):\n LOGGER.warning('Web socket connection closed with exception %s', web_sock.exception())\n (await web_sock.close())\n (await self._handle_unsubscribe(web_sock))\n return web_sock", "docstring": "Handles requests for new subscription websockets.\n\nArgs:\nrequest (aiohttp.Request): the incoming request\n\nReturns:\naiohttp.web.WebSocketResponse: the websocket response, when the\nresulting websocket is closed", "source": "codesearchnet"} {"code": "def create_d1_dn_subject(common_name_str):\n return cryptography.x509.Name([cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.COUNTRY_NAME, 'US'), cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.STATE_OR_PROVINCE_NAME, 'California'), cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.LOCALITY_NAME, 'San Francisco'), cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.ORGANIZATION_NAME, 'Root CA'), cryptography.x509.NameAttribute(cryptography.x509.oid.NameOID.COMMON_NAME, 'ca.ca.com')])", "docstring": "Create the DN Subject for certificate that will be used in a DataONE environment.\n\nThe DN is formatted into a DataONE subject, which is used in authentication,\nauthorization and event tracking.\n\nArgs:\ncommon_name_str: str\nDataONE uses simple DNs without physical location information, so only the\n``common_name_str`` (``CommonName``) needs to be specified.\n\nFor Member Node Client Side certificates or CSRs, ``common_name_str`` is the\n``node_id``, e.g., ``urn:node:ABCD`` for production, or\n``urn:node:mnTestABCD`` for the test environments.\n\nFor a local CA, something like ``localCA`` may be used.\n\nFor a locally trusted client side certificate, something like\n``localClient`` may be used.", "source": "codesearchnet"} {"code": "def define_both_methods(class_name, class_dict, old_name, new_name):\n assert ((old_name not in class_dict) or (new_name not in class_dict)), 'Class \"{}\" cannot define both \"{}\" and \"{}\" methods.'.format(class_name, old_name, new_name)\n if (old_name in class_dict):\n class_dict[new_name] = class_dict[old_name]\n elif (new_name in class_dict):\n class_dict[old_name] = class_dict[new_name]", "docstring": "Function to help CamelCase to PEP8 style class methods migration.\n\nFor any class definition:\n1. Assert it does not define both old and new methods,\notherwise it does not work.\n2. If it defines the old method, create the same new method.\n3. If it defines the new method, create the same old method.\n\nArgs:\nclass_name: the class name.\nclass_dict: the class dictionary.\nold_name: old method's name.\nnew_name: new method's name.\n\nRaises:\nAssertionError: raised when the class defines both the old_name and\nnew_name.", "source": "codesearchnet"} {"code": "def _GetNextLogCountPerToken(token):\n global _log_counter_per_token\n _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)\n return _log_counter_per_token[token]", "docstring": "Wrapper for _log_counter_per_token.\n\nArgs:\ntoken: The token for which to look up the count.\n\nReturns:\nThe number of times this function has been called with\n*token* as an argument (starting at 0)", "source": "github-repos"} {"code": "def requires_grad(self) -> bool:\n if self._rot_mats is not None:\n return self._rot_mats.requires_grad\n elif self._quats is not None:\n return self._quats.requires_grad\n else:\n raise ValueError('Both rotations are None')", "docstring": "Returns the requires_grad property of the underlying rotation\n\nReturns:\nThe requires_grad property of the underlying tensor", "source": "github-repos"} {"code": "def copy2(src, dst, metadata=None, retry_params=None):\n common.validate_file_path(src)\n common.validate_file_path(dst)\n if (metadata is None):\n metadata = {}\n copy_meta = 'COPY'\n else:\n copy_meta = 'REPLACE'\n metadata.update({'x-goog-copy-source': src, 'x-goog-metadata-directive': copy_meta})\n api = storage_api._get_storage_api(retry_params=retry_params)\n (status, resp_headers, content) = api.put_object(api_utils._quote_filename(dst), headers=metadata)\n errors.check_status(status, [200], src, metadata, resp_headers, body=content)", "docstring": "Copy the file content from src to dst.\n\nArgs:\nsrc: /bucket/filename\ndst: /bucket/filename\nmetadata: a dict of metadata for this copy. If None, old metadata is copied.\nFor example, {'x-goog-meta-foo': 'bar'}.\nretry_params: An api_utils.RetryParams for this call to GCS. If None,\nthe default one is used.\n\nRaises:\nerrors.AuthorizationError: if authorization failed.\nerrors.NotFoundError: if an object that's expected to exist doesn't.", "source": "codesearchnet"} {"code": "def __init__(self, dataframe, map_info):\n \n self.df = dataframe\n self.map_info = map_info", "docstring": "Reads genotypes from a pandas DataFrame.\n\nArgs:\ndataframe (pandas.DataFrame): The data.\nmap_info (pandas.DataFrame): The mapping information.\n\nNote\n====\nThe index of the dataframe should be the sample IDs. The index of\nthe map_info should be the variant name, and there should be\ncolumns named chrom and pos.", "source": "juraj-google-style"} {"code": "def prepare_for_send(self, full_url=False):\n \n assert self.url\n assert self.method\n assert self.version\n\n url_info = self.url_info\n\n if 'Host' not in self.fields:\n self.fields['Host'] = url_info.hostname_with_port\n\n if not full_url:\n if url_info.query:\n self.resource_path = '{0}?{1}'.format(url_info.path, url_info.query)\n else:\n self.resource_path = url_info.path\n else:\n self.resource_path = url_info.url", "docstring": "Modify the request to be suitable for HTTP server.\n\nArgs:\nfull_url (bool): Use full URL as the URI. By default, only\nthe path of the URL is given to the server.", "source": "juraj-google-style"} {"code": "def open_required(func):\n \n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n \n if not self.opened():\n raise errors.JLinkException('J-Link DLL is not open.')\n elif not self.connected():\n raise errors.JLinkException('J-Link connection has been lost.')\n return func(self, *args, **kwargs)\n return wrapper", "docstring": "Decorator to specify that the J-Link DLL must be opened, and a\nJ-Link connection must be established.\n\nArgs:\nfunc (function): function being decorated\n\nReturns:\nThe wrapper function.", "source": "juraj-google-style"} {"code": "def create_database_view(self, view: views.View, view_name: str) -> None:\n dataset = f'{self._view_dataset.project}.{self._view_dataset.dataset_id}'\n view_sql = f'CREATE OR REPLACE VIEW `{dataset}.{view_name}` AS\\n{self.to_sql(view)}'\n self._client.query(view_sql).result()", "docstring": "Creates a BigQuery view with the given name in the runner's view_dataset.\n\nArgs:\nview: the FHIR view that creates\nview_name: the view name passed to the CREATE OR REPLACE VIEW statement.\n\nRaises:\ngoogle.cloud.exceptions.GoogleAPICallError if the job failed.", "source": "github-repos"} {"code": "def save(self, *, auto_commit=False):\n \n try:\n db.session.add(self.resource)\n if auto_commit:\n db.session.commit()\n except SQLAlchemyError as ex:\n self.log.exception('Failed updating resource: {}'.format(ex))\n db.session.rollback()", "docstring": "Save the resource to the database\n\nArgs:\nauto_commit (bool): Automatically commit the transaction. Default: `False`\n\nReturns:\n`None`", "source": "juraj-google-style"} {"code": "def check_denotation(target_values, predicted_values):\n \n \n if len(target_values) != len(predicted_values):\n return False\n \n for target in target_values:\n if not any(target.match(pred) for pred in predicted_values):\n return False\n return True", "docstring": "Return True if the predicted denotation is correct.\n\nArgs:\ntarget_values (list[Value])\npredicted_values (list[Value])\nReturns:\nbool", "source": "juraj-google-style"} {"code": "def get_list_index(lst, index_or_name):\n \n if isinstance(index_or_name, six.integer_types):\n return index_or_name\n\n return lst.index(index_or_name)", "docstring": "Return the index of an element in the list.\n\nArgs:\nlst (list): The list.\nindex_or_name (int or str): The value of the reference element, or directly its numeric index.\n\nReturns:\n(int) The index of the element in the list.", "source": "juraj-google-style"} {"code": "def _text_io_wrapper(stream, mode, encoding, errors, newline):\n \n \n \n if \"t\" in mode and not hasattr(stream, 'encoding'):\n text_stream = TextIOWrapper(\n stream, encoding=encoding, errors=errors, newline=newline)\n yield text_stream\n text_stream.flush()\n\n \n else:\n yield stream", "docstring": "Wrap a binary stream to Text stream.\n\nArgs:\nstream (file-like object): binary stream.\nmode (str): Open mode.\nencoding (str): Stream encoding.\nerrors (str): Decoding error handling.\nnewline (str): Universal newlines", "source": "juraj-google-style"} {"code": "async def inspect(self, name: str) -> Mapping:\n response = (await self.docker._query_json('images/{name}/json'.format(name=name)))\n return response", "docstring": "Return low-level information about an image\n\nArgs:\nname: name of the image", "source": "codesearchnet"} {"code": "def name_from_base(base, max_length=63, short=False):\n timestamp = (sagemaker_short_timestamp() if short else sagemaker_timestamp())\n trimmed_base = base[:((max_length - len(timestamp)) - 1)]\n return '{}-{}'.format(trimmed_base, timestamp)", "docstring": "Append a timestamp to the provided string.\n\nThis function assures that the total length of the resulting string is not\nlonger than the specified max length, trimming the input parameter if necessary.\n\nArgs:\nbase (str): String used as prefix to generate the unique name.\nmax_length (int): Maximum length for the resulting string.\nshort (bool): Whether or not to use a truncated timestamp.\n\nReturns:\nstr: Input parameter with appended timestamp.", "source": "codesearchnet"} {"code": "def setup_suite(self, config):\n pass", "docstring": "Function used to add test classes, has to be implemented by child class.\n\nArgs:\nconfig: config_parser.TestRunConfig, the config provided by google3 infra.\n\nRaises:\nError: when setup_suite is not implemented by child class.", "source": "github-repos"} {"code": "def load_orthologs(fo: IO, metadata: dict):\n version = metadata['metadata']['version']\n with timy.Timer('Load Orthologs') as timer:\n arango_client = arangodb.get_client()\n belns_db = arangodb.get_belns_handle(arango_client)\n arangodb.batch_load_docs(belns_db, orthologs_iterator(fo, version), on_duplicate='update')\n log.info('Load orthologs', elapsed=timer.elapsed, source=metadata['metadata']['source'])\n remove_old_ortholog_edges = f\n remove_old_ortholog_nodes = f\n arangodb.aql_query(belns_db, remove_old_ortholog_edges)\n arangodb.aql_query(belns_db, remove_old_ortholog_nodes)\n metadata['_key'] = f\"Orthologs_{metadata['metadata']['source']}\"\n try:\n belns_db.collection(arangodb.belns_metadata_name).insert(metadata)\n except ArangoError as ae:\n belns_db.collection(arangodb.belns_metadata_name).replace(metadata)", "docstring": "Load orthologs into ArangoDB\n\nArgs:\nfo: file obj - orthologs file\nmetadata: dict containing the metadata for orthologs", "source": "codesearchnet"} {"code": "def EWFGlobPathSpec(file_system, path_spec):\n if (not path_spec.HasParent()):\n raise errors.PathSpecError('Unsupported path specification without parent.')\n parent_path_spec = path_spec.parent\n parent_location = getattr(parent_path_spec, 'location', None)\n if (not parent_location):\n raise errors.PathSpecError('Unsupported parent path specification without location.')\n (parent_location, _, segment_extension) = parent_location.rpartition('.')\n segment_extension_start = segment_extension[0]\n segment_extension_length = len(segment_extension)\n if ((segment_extension_length not in [3, 4]) or (not segment_extension.endswith('01')) or ((segment_extension_length == 3) and (segment_extension_start not in ['E', 'e', 's'])) or ((segment_extension_length == 4) and (not segment_extension.startswith('Ex')))):\n raise errors.PathSpecError('Unsupported parent path specification invalid segment file extension: {0:s}'.format(segment_extension))\n segment_number = 1\n segment_files = []\n while True:\n segment_location = '{0:s}.{1:s}'.format(parent_location, segment_extension)\n kwargs = path_spec_factory.Factory.GetProperties(parent_path_spec)\n kwargs['location'] = segment_location\n if (parent_path_spec.parent is not None):\n kwargs['parent'] = parent_path_spec.parent\n segment_path_spec = path_spec_factory.Factory.NewPathSpec(parent_path_spec.type_indicator, **kwargs)\n if (not file_system.FileEntryExistsByPathSpec(segment_path_spec)):\n break\n segment_files.append(segment_path_spec)\n segment_number += 1\n if (segment_number <= 99):\n if (segment_extension_length == 3):\n segment_extension = '{0:s}{1:02d}'.format(segment_extension_start, segment_number)\n elif (segment_extension_length == 4):\n segment_extension = '{0:s}x{1:02d}'.format(segment_extension_start, segment_number)\n else:\n segment_index = (segment_number - 100)\n if (segment_extension_start in ['e', 's']):\n letter_offset = ord('a')\n else:\n letter_offset = ord('A')\n (segment_index, remainder) = divmod(segment_index, 26)\n third_letter = chr((letter_offset + remainder))\n (segment_index, remainder) = divmod(segment_index, 26)\n second_letter = chr((letter_offset + remainder))\n first_letter = chr((ord(segment_extension_start) + segment_index))\n if (first_letter in ['[', '{']):\n raise RuntimeError('Unsupported number of segment files.')\n if (segment_extension_length == 3):\n segment_extension = '{0:s}{1:s}{2:s}'.format(first_letter, second_letter, third_letter)\n elif (segment_extension_length == 4):\n segment_extension = '{0:s}x{1:s}{2:s}'.format(first_letter, second_letter, third_letter)\n return segment_files", "docstring": "Globs for path specifications according to the EWF naming schema.\n\nArgs:\nfile_system (FileSystem): file system.\npath_spec (PathSpec): path specification.\n\nReturns:\nlist[PathSpec]: path specifications that match the glob.\n\nRaises:\nPathSpecError: if the path specification is invalid.\nRuntimeError: if the maximum number of supported segment files is\nreached.", "source": "codesearchnet"} {"code": "def unwrap(self, value):\n return self._extended._local_results(value)", "docstring": "Returns the list of all local per-replica values contained in `value`.\n\nDEPRECATED: Please use `experimental_local_results` instead.\n\nNote: This only returns values on the workers initiated by this client.\nWhen using a `tf.distribute.Strategy` like\n`tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker\nwill be its own client, and this function will only return values\ncomputed on that worker.\n\nArgs:\nvalue: A value returned by `experimental_run()`,\n`extended.call_for_each_replica()`, or a variable created in `scope`.\n\nReturns:\nA tuple of values contained in `value`. If `value` represents a single\nvalue, this returns `(value,).`", "source": "github-repos"} {"code": "def _ShardTestEmbeddings(self, weights, biases, num_shards):\n with ops.Graph().as_default() as g:\n sharded_weights = variable_scope.get_variable('w', partitioner=partitioned_variables.fixed_size_partitioner(num_shards), initializer=constant_op.constant(weights))\n sharded_biases = variable_scope.get_variable('b', partitioner=partitioned_variables.fixed_size_partitioner(num_shards), initializer=constant_op.constant(biases))\n with self.session(graph=g) as sess:\n self.evaluate(variables.global_variables_initializer())\n return self.evaluate([list(sharded_weights), list(sharded_biases)])", "docstring": "Shards the weights and biases returned by _GenerateTestData.\n\nArgs:\nweights: The weights returned by _GenerateTestData.\nbiases: The biases returned by _GenerateTestData.\nnum_shards: The number of shards to create.\n\nReturns:\nsharded_weights: A list of size `num_shards` containing all the weights.\nsharded_biases: A list of size `num_shards` containing all the biases.", "source": "github-repos"} {"code": "def read_model_from_bytearray(model_bytearray):\n model = convert_bytearray_to_object(model_bytearray)\n if sys.byteorder == 'big':\n byte_swap_tflite_model_obj(model, 'little', 'big')\n for buffer in model.buffers:\n if buffer.offset:\n buffer.data = model_bytearray[buffer.offset:buffer.offset + buffer.size]\n buffer.offset = 0\n buffer.size = 0\n for subgraph in model.subgraphs:\n for op in subgraph.operators:\n if op.largeCustomOptionsOffset:\n op.customOptions = model_bytearray[op.largeCustomOptionsOffset:op.largeCustomOptionsOffset + op.largeCustomOptionsSize]\n op.largeCustomOptionsOffset = 0\n op.largeCustomOptionsSize = 0\n return model", "docstring": "Reads a tflite model as a python object.\n\nArgs:\nmodel_bytearray: TFLite model in bytearray format.\n\nReturns:\nA python object corresponding to the input tflite file.", "source": "github-repos"} {"code": "def ParseFloat(text):\n \n try:\n \n return float(text)\n except ValueError:\n \n if _FLOAT_INFINITY.match(text):\n if text[0] == '-':\n return float('-inf')\n else:\n return float('inf')\n elif _FLOAT_NAN.match(text):\n return float('nan')\n else:\n \n try:\n return float(text.rstrip('f'))\n except ValueError:\n raise ValueError('Couldn\\'t parse float: %s' % text)", "docstring": "Parse a floating point number.\n\nArgs:\ntext: Text to parse.\n\nReturns:\nThe number parsed.\n\nRaises:\nValueError: If a floating point number couldn't be parsed.", "source": "juraj-google-style"} {"code": "def get_branch(profile, name):\n \n ref = \"heads/\" + name\n data = refs.get_ref(profile, ref)\n return data", "docstring": "Fetch a branch.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nname\nThe name of the branch to fetch.\n\nReturns:\nA dict with data baout the branch.", "source": "juraj-google-style"} {"code": "def __init__(self, file_entry):\n \n super(LVMVolume, self).__init__(file_entry.name)\n self._file_entry = file_entry", "docstring": "Initializes a LVM volume.\n\nArgs:\nfile_entry (LVMFileEntry): a LVM file entry.", "source": "juraj-google-style"} {"code": "def SmartUnicode(string):\n \n if isinstance(string, Text):\n return string\n\n if isinstance(string, bytes):\n return string.decode(\"utf-8\", \"ignore\")\n\n \n \n if compatibility.PY2:\n return str(string).__native__()\n else:\n return str(string)", "docstring": "Returns a unicode object.\n\nThis function will always return a unicode object. It should be used to\nguarantee that something is always a unicode object.\n\nArgs:\nstring: The string to convert.\n\nReturns:\na unicode object.", "source": "juraj-google-style"} {"code": "def find_mapreduce_yaml(status_file=__file__):\n \n checked = set()\n yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked)\n if not yaml:\n yaml = _find_mapreduce_yaml(os.getcwd(), checked)\n return yaml", "docstring": "Traverse directory trees to find mapreduce.yaml file.\n\nBegins with the location of status.py and then moves on to check the working\ndirectory.\n\nArgs:\nstatus_file: location of status.py, overridable for testing purposes.\n\nReturns:\nthe path of mapreduce.yaml file or None if not found.", "source": "juraj-google-style"} {"code": "def __eq__(self, other):\n \n if type(self) is type(other) and \\\n self._name == other._name and \\\n self._params == other._params:\n return True\n return False", "docstring": "Two measurement options are the same if they are of the same type\nand have the same name and params.\n\nArgs:\nother (MeasOpts): Other Discriminator/Kernel.\n\nReturns:\nbool: are self and other equal.", "source": "juraj-google-style"} {"code": "def separated(self):\n separated_lls = collections.defaultdict(LabelList)\n for label in self.labels:\n separated_lls[label.value].add(label)\n for ll in separated_lls.values():\n ll.idx = self.idx\n return separated_lls", "docstring": "Create a separate Label-List for every distinct label-value.\n\nReturns:\ndict: A dictionary with distinct label-values as keys.\nEvery value is a LabelList containing only labels with the same value.\n\nExample:\n>>> ll = LabelList(idx='some', labels=[\n>>> Label('a', start=0, end=4),\n>>> Label('b', start=3.95, end=6.0),\n>>> Label('a', start=7.0, end=10.2),\n>>> Label('b', start=10.3, end=14.0)\n>>> ])\n>>> s = ll.separate()\n>>> s['a'].labels\n[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]\n>>> s['b'].labels\n[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]", "source": "codesearchnet"} {"code": "def _build_predicate_for_coding_in_value_set(expanded_value_set: value_set_pb2.ValueSet, coding_column: Optional[_sql_data_types.Identifier]=None) -> _sql_data_types.StandardSqlExpression:\n codes_per_system = {}\n for concept in expanded_value_set.expansion.contains:\n codes_per_system.setdefault(concept.system.value, []).append(concept.code.value)\n codes_per_system = list(codes_per_system.items())\n codes_per_system.sort(key=operator.itemgetter(0))\n for _, codes in codes_per_system:\n codes.sort()\n if coding_column is None:\n code_col = _sql_data_types.Identifier('code', _sql_data_types.String)\n system_col = _sql_data_types.Identifier('system', _sql_data_types.String)\n else:\n code_col = coding_column.dot('code', _sql_data_types.String)\n system_col = coding_column.dot('system', _sql_data_types.String)\n code_system_predicates = []\n for system, codes in codes_per_system:\n system = _sql_data_types.RawExpression('\"%s\"' % system, _sql_data_types.String)\n codes = [_sql_data_types.RawExpression('\"%s\"' % code, _sql_data_types.String) for code in codes]\n code_system_predicates.append(system_col.eq_(system).and_(code_col.in_(codes)))\n return functools.reduce(lambda acc, pred: acc.or_(pred), code_system_predicates)", "docstring": "Builds a predicate asserting the coding column is bound to the value_set.\n\nEnsures that the codings contained in `coding_column` are codings found in\n`expanded_value_set`.\nProduces SQL like:\n(`coding_column`.system = system1 AND `coding_column`.code IN (\ncode1, code2)) OR\n(`coding_column`.system = system2 AND `coding_column`.code IN (\ncode3, code4))\n\nArgs:\nexpanded_value_set: The expanded value set containing the coding values to\nassert membership against.\ncoding_column: The column containing the coding values. If given, columns\n`coding_column`.system and `coding_column`.code will be referenced in\nthe predicate. If not given, columns 'system' and 'code' will be\nreferenced.\n\nReturns:\nThe SQL for the value set binding predicate.", "source": "github-repos"} {"code": "def _build_dict(my_dict, keys, values):\n temp = my_dict\n for (depth, key) in enumerate(keys):\n if (depth < (len(keys) - 1)):\n if (key not in temp):\n temp[key] = dict()\n temp = temp[key]\n elif (key not in temp):\n temp[key] = values\n else:\n temp[key] = {**temp[key], **values}\n return my_dict", "docstring": "Build a dictionary from a set of redis hashes.\n\nkeys = ['a', 'b', 'c']\nvalues = {'value': 'foo'}\nmy_dict = {'a': {'b': {'c': {'value': 'foo'}}}}\n\nArgs:\nmy_dict (dict): Dictionary to add to\nkeys (list[str]): List of keys used to define hierarchy in my_dict\nvalues (dict): Values to add at to the dictionary at the key\nspecified by keys\n\nReturns:\ndict, new dictionary with values added at keys", "source": "codesearchnet"} {"code": "def _GetPropertyValue(self, parser_mediator, properties, property_name):\n property_value = properties.get(property_name, None)\n if isinstance(property_value, py2to3.BYTES_TYPE):\n try:\n property_value = property_value.decode('utf-8')\n except UnicodeDecodeError:\n parser_mediator.ProduceExtractionWarning('unable to decode property: {0:s}'.format(property_name))\n return property_value", "docstring": "Retrieves a property value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nproperties (dict[str, object]): properties.\nproperty_name (str): name of the property.\n\nReturns:\nstr: property value.", "source": "codesearchnet"} {"code": "def run_inference(examples, serving_bundle):\n \n batch_size = 64\n if serving_bundle.estimator and serving_bundle.feature_spec:\n \n preds = serving_bundle.estimator.predict(\n lambda: tf.data.Dataset.from_tensor_slices(\n tf.parse_example([ex.SerializeToString() for ex in examples],\n serving_bundle.feature_spec)).batch(batch_size))\n\n if serving_bundle.use_predict:\n preds_key = serving_bundle.predict_output_tensor\n elif serving_bundle.model_type == 'regression':\n preds_key = 'predictions'\n else:\n preds_key = 'probabilities'\n\n values = []\n for pred in preds:\n values.append(pred[preds_key])\n return common_utils.convert_prediction_values(values, serving_bundle)\n elif serving_bundle.custom_predict_fn:\n \n \n values = serving_bundle.custom_predict_fn(examples)\n return common_utils.convert_prediction_values(values, serving_bundle)\n else:\n return platform_utils.call_servo(examples, serving_bundle)", "docstring": "Run inference on examples given model information\n\nArgs:\nexamples: A list of examples that matches the model spec.\nserving_bundle: A `ServingBundle` object that contains the information to\nmake the inference request.\n\nReturns:\nA ClassificationResponse or RegressionResponse proto.", "source": "juraj-google-style"} {"code": "def reciprocal_no_nan(x, name=None):\n with ops.name_scope(name, 'reciprocal_no_nan', [x]) as scope:\n x = ops.convert_to_tensor(x, name='x')\n one = constant_op.constant(1, dtype=x.dtype.base_dtype, name='one')\n return gen_math_ops.div_no_nan(one, x, name=scope)", "docstring": "Performs a safe reciprocal operation, element wise.\n\nIf a particular element is zero, the reciprocal for that element is\nalso set to zero.\n\nFor example:\n```python\nx = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)\ntf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]\n```\n\nArgs:\nx: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or\n`complex128`.\nname: A name for the operation (optional).\n\nReturns:\nA `Tensor` of same shape and type as `x`.\n\nRaises:\nTypeError: x must be of a valid dtype.", "source": "github-repos"} {"code": "def _minimize_peak_memory_list(graph):\n schedule = []\n bytes_freed = {}\n users_of = collections.defaultdict(set)\n in_degree = collections.defaultdict(int)\n operation_id = {}\n priority_queue = []\n for (i, operation_name) in enumerate(graph.get_all_operation_names()):\n operation_id[operation_name] = i\n for input_name in graph.get_operation_input_names(operation_name):\n if (operation_name in users_of[input_name]):\n continue\n users_of[input_name].add(operation_name)\n in_degree[operation_name] += 1\n for operation_name in graph.get_all_operation_names():\n bytes_freed[operation_name] = 0\n for input_name in graph.get_operation_input_names(operation_name):\n if ((len(users_of[input_name]) == 1) and (not graph.is_tensor_final(input_name))):\n bytes_freed[operation_name] += graph.get_tensor_size(input_name)\n for output_name in graph.get_operation_output_names(operation_name):\n if (users_of[output_name] or graph.is_tensor_final(output_name)):\n bytes_freed[operation_name] -= graph.get_tensor_size(output_name)\n for operation_name in graph.get_all_operation_names():\n if (in_degree[operation_name] == 0):\n heapq.heappush(priority_queue, ((- bytes_freed[operation_name]), operation_name))\n while priority_queue:\n (neg_bytes_freed, operation_name) = heapq.heappop(priority_queue)\n if (bytes_freed[operation_name] != (- neg_bytes_freed)):\n continue\n schedule.append(operation_id[operation_name])\n bytes_freed[operation_name] = None\n for output_name in graph.get_operation_output_names(operation_name):\n for other_operation_name in users_of[output_name]:\n in_degree[other_operation_name] -= 1\n if (in_degree[other_operation_name] == 0):\n heapq.heappush(priority_queue, ((- bytes_freed[other_operation_name]), other_operation_name))\n for input_name in graph.get_operation_input_names(operation_name):\n if (operation_name not in users_of[input_name]):\n continue\n users_of[input_name].remove(operation_name)\n if ((len(users_of[input_name]) != 1) or graph.is_tensor_final(output_name)):\n continue\n (other_operation_name,) = users_of[input_name]\n bytes_freed[other_operation_name] += graph.get_tensor_size(input_name)\n if (in_degree[other_operation_name] > 0):\n continue\n heapq.heappush(priority_queue, ((- bytes_freed[other_operation_name]), other_operation_name))\n return schedule", "docstring": "Computes schedule according to the greedy list heuristic.\n\nGreedy list heuristic: schedule the operation which results in the most bytes\nof memory being (immediately) freed.\nTODO(joshuawang): Experiment with tiebreaking by preferring more successors.\n\nArgs:\ngraph: an mtf.auto_mtf.graph_interface.GraphInterface.\n\nReturns:\nan iterable of integers representing the schedule.", "source": "codesearchnet"} {"code": "def GetPluginObjects(cls, plugin_names):\n \n plugin_objects = {}\n for plugin_name, plugin_class in iter(cls._plugin_classes.items()):\n if plugin_name not in plugin_names:\n continue\n\n plugin_objects[plugin_name] = plugin_class()\n\n return plugin_objects", "docstring": "Retrieves the plugin objects.\n\nArgs:\nplugin_names (list[str]): names of plugins that should be retrieved.\n\nReturns:\ndict[str, AnalysisPlugin]: analysis plugins per name.", "source": "juraj-google-style"} {"code": "def _create_key_value_cache_tensors(self, shape: Tuple[int, ...], device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]:\n is_cpu_device = device == torch.device('cpu')\n key_cache = torch.zeros(shape, dtype=self._dtype, device=device, pin_memory=is_cpu_device)\n value_cache = torch.zeros(shape, dtype=self._dtype, device=device, pin_memory=is_cpu_device)\n torch._dynamo.mark_static_address(key_cache)\n torch._dynamo.mark_static_address(value_cache)\n return (key_cache, value_cache)", "docstring": "Creates K/V cache tensors on a device. Pins memory for CPU tensors. Marks them as static\naddresses for non-CPU tensors.\n\nArgs:\nshape (`Tuple[int, ...]`): Shape.\ndevice (`torch.device`): Device.\n\nReturns:\nKey and value cache tensors as a tuple.", "source": "github-repos"} {"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n vision_data = {}\n if image_sizes is not None:\n images_kwargs = Idefics3ProcessorKwargs._defaults.get('images_kwargs', {})\n images_kwargs.update(kwargs)\n num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]\n base_image_length = self.image_seq_len + 3\n col_length = self.image_seq_len + 2\n num_image_tokens = []\n for num_patches in num_image_patches:\n num_cols = num_rows = int(math.sqrt(num_patches - 1))\n row_length = col_length * num_cols + 1\n num_image_tokens.append(base_image_length + row_length * num_rows)\n vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"} {"code": "def wbmax(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `wbmax`'.format(value))\n\n self._wbmax = value", "docstring": "Corresponds to IDD Field `wbmax`\nExtreme maximum wet-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `wbmax`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def Print(self, output_writer):\n \n if self._names:\n output_writer.Write('\\tnames: {0:s}\\n'.format(\n ', '.join(self._names)))", "docstring": "Prints a human readable version of the filter.\n\nArgs:\noutput_writer (CLIOutputWriter): output writer.", "source": "juraj-google-style"} {"code": "def _set_class_parser(self, init_parser, methods_to_parse, cls):\n \n top_level_parents = [init_parser] if init_parser else []\n description = self._description or cls.__doc__\n top_level_parser = argparse.ArgumentParser(description=description,\n parents=top_level_parents,\n add_help=False,\n conflict_handler=\"resolve\")\n top_level_parser.add_argument(\"-h\", \"--help\", action=FullHelpAction,\n help=\"Display this help message\")\n parser_to_method = self._add_sub_parsers(top_level_parser,\n methods_to_parse,\n cls.__name__)\n \n \n if init_parser:\n parser_to_method[\"__init__\"] = \"__init__\"\n top_level_parser.call = self._get_parser_call_method(parser_to_method)\n cls.parser = top_level_parser", "docstring": "Creates the complete argument parser for the decorated class.\n\nArgs:\ninit_parser: argument parser for the __init__ method or None\nmethods_to_parse: dict of method name pointing to their associated\nargument parser\ncls: the class we are decorating\n\nReturns:\nThe decorated class with an added attribute 'parser'", "source": "juraj-google-style"} {"code": "def parse_branch_ref(filename):\n data = open(filename).read().strip()\n items = data.split(' ')\n if len(items) == 1:\n return None\n elif len(items) == 2 and items[0] == 'ref:':\n return items[1].strip()\n else:\n raise RuntimeError('Git directory has unparseable HEAD')", "docstring": "Given a filename of a .git/HEAD file return ref path.\n\nIn particular, if git is in detached head state, this will\nreturn None. If git is in attached head, it will return\nthe branch reference. E.g. if on 'master', the HEAD will\ncontain 'ref: refs/heads/master' so 'refs/heads/master'\nwill be returned.\n\nExample: parse_branch_ref(\".git/HEAD\")\nArgs:\nfilename: file to treat as a git HEAD file\nReturns:\nNone if detached head, otherwise ref subpath\nRaises:\nRuntimeError: if the HEAD file is unparseable.", "source": "github-repos"} {"code": "def get_categorical_feature_names(example):\n \n features = get_example_features(example)\n return sorted([\n feature_name for feature_name in features\n if features[feature_name].WhichOneof('kind') == 'bytes_list'\n ])", "docstring": "Returns a list of feature names for byte type features.\n\nArgs:\nexample: An example.\n\nReturns:\nA list of categorical feature names (e.g. ['education', 'marital_status'] )", "source": "juraj-google-style"} {"code": "def process_action(resource, action, action_issuer='unknown'):\n \n from cinq_collector_aws import AWSRegionCollector\n\n func_action = action_mapper[resource.resource_type][action]\n extra_info = {}\n action_status = ActionStatus.UNKNOWN\n\n if func_action:\n if action_mapper[resource.resource_type]['service_name'] == 'lambda':\n client = get_aws_session(\n AWSAccount.get(dbconfig.get('rds_collector_account', AWSRegionCollector.ns, ''))\n ).client(\n 'lambda',\n dbconfig.get('rds_collector_region', AWSRegionCollector.ns, '')\n )\n else:\n client = get_aws_session(AWSAccount(resource.account)).client(\n action_mapper[resource.resource_type]['service_name'],\n region_name=resource.location\n )\n try:\n logger.info(f'Trying to {action} resource {resource.id} for account {resource.account.account_name} / region {resource.location}')\n action_status, extra_info = func_action(client, resource)\n Enforcement.create(resource.account.account_id, resource.id, action, datetime.now(), extra_info)\n except Exception as ex:\n action_status = ActionStatus.FAILED\n logger.exception('Failed to apply action {} to {}: {}'.format(action, resource.id, ex))\n finally:\n auditlog(\n event='{}.{}.{}.{}'.format(action_issuer, resource.resource_type, action, action_status),\n actor=action_issuer,\n data={\n 'resource_id': resource.id,\n 'account_name': resource.account.account_name,\n 'location': resource.location,\n 'info': extra_info\n }\n )\n return action_status\n else:\n logger.error('Failed to apply action {} to {}: Not supported'.format(action, resource.id))\n return ActionStatus.FAILED", "docstring": "Process an audit action for a resource, if possible\n\nArgs:\nresource (:obj:`Resource`): A resource object to perform the action on\naction (`str`): Type of action to perform (`kill` or `stop`)\naction_issuer (`str`): The issuer of the action\nReturns:\n`ActionStatus`", "source": "juraj-google-style"} {"code": "def as_dict(self, verbosity: int=0) -> Dict:\n d = {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'matrix': self._matrix.tolist()}\n ((a, b, c), (alpha, beta, gamma)) = self.lengths_and_angles\n if (verbosity > 0):\n d.update({'a': a, 'b': b, 'c': c, 'alpha': alpha, 'beta': beta, 'gamma': gamma, 'volume': self.volume})\n return d", "docstring": "Json-serialization dict representation of the Lattice.\n\nArgs:\nverbosity (int): Verbosity level. Default of 0 only includes the\nmatrix representation. Set to 1 for more details.", "source": "codesearchnet"} {"code": "def from_millis(cls, timeout_ms):\n if hasattr(timeout_ms, 'has_expired'):\n return timeout_ms\n if (timeout_ms is None):\n return cls(None)\n return cls((timeout_ms / 1000.0))", "docstring": "Create a new PolledTimeout if needed.\n\nIf timeout_ms is already a PolledTimeout, just return it, otherwise create a\nnew PolledTimeout with the given timeout in milliseconds.\n\nArgs:\ntimeout_ms: PolledTimeout object, or number of milliseconds to use for\ncreating a new one.\n\nReturns:\nA PolledTimeout object that will expire in timeout_ms milliseconds, which\nmay be timeout_ms itself, or a newly allocated PolledTimeout.", "source": "codesearchnet"} {"code": "def _request(self, domain, type_name, search_command, db_method, body=None):\n headers = {'Content-Type': 'application/json', 'DB-Method': db_method}\n search_command = self._clean_datastore_path(search_command)\n url = '/v2/exchange/db/{}/{}/{}'.format(domain, type_name, search_command)\n r = self.tcex.session.post(url, data=body, headers=headers, params=self._params)\n data = []\n status = 'Failed'\n if ((not r.ok) or ('application/json' not in r.headers.get('content-type', ''))):\n self.tcex.handle_error(350, [r.status_code, r.text])\n data = r.json()\n status = 'Success'\n return {'data': data, 'response': r, 'status': status}", "docstring": "Make the API request for a Data Store CRUD operation\n\nArgs:\ndomain (string): One of 'local', 'organization', or 'system'.\ntype_name (string): This is a free form index type name. The ThreatConnect API will use\nthis resource verbatim.\nsearch_command (string): Search command to pass to ES.\ndb_method (string): The DB method 'DELETE', 'GET', 'POST', or 'PUT'\nbody (dict): JSON body", "source": "codesearchnet"} {"code": "def update_power_state(self, id_or_uri, power_state):\n uri = (self._client.build_uri(id_or_uri) + '/powerState')\n return self._client.update(power_state, uri)", "docstring": "Sets the power state of the specified power delivery device. The device must be an HP Intelligent Outlet.\n\nArgs:\nid_or_uri:\nCan be either the power device id or the uri\npower_state:\n{\"powerState\":\"On|Off\"}\n\nReturns:\nstr: The power state", "source": "codesearchnet"} {"code": "def has_unchecked_field(self, locator, **kwargs):\n \n\n kwargs[\"checked\"] = False\n return self.has_selector(\"field\", locator, **kwargs)", "docstring": "Checks if the page or current node has a radio button or checkbox with the given label,\nvalue, or id, that is currently unchecked.\n\nArgs:\nlocator (str): The label, name, or id of an unchecked field.\n**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.\n\nReturns:\nbool: Whether it exists.", "source": "juraj-google-style"} {"code": "def get_student_by_email(self, email, students=None):\n if (students is None):\n students = self.get_students()\n email = email.lower()\n for student in students:\n if (student['accountEmail'].lower() == email):\n return (student['studentId'], student)\n return (None, None)", "docstring": "Get a student based on an email address.\n\nCalls ``self.get_students()`` to get list of all students,\nif not passed as the ``students`` parameter.\n\nArgs:\nemail (str): student email\nstudents (list): dictionary of students to search, default: None\nWhen ``students`` is unspecified, all students in gradebook\nare retrieved.\n\nRaises:\nrequests.RequestException: Exception connection error\nValueError: Unable to decode response content\n\nReturns:\ntuple: tuple of student id and student dictionary.", "source": "codesearchnet"} {"code": "def _expand_terms(self, terms):\n \n ret = {\n 'keywords': list(),\n 'doc': list(),\n 'from': None,\n 'to': None}\n\n if not isinstance(terms, dict):\n stp = SearchTermParser()\n terms = stp.parse(terms, term_join=self.backend._and_join)\n\n if 'about' in terms:\n ret['doc'].append(terms['about'])\n\n if 'with' in terms:\n ret['doc'].append(terms['with'])\n\n if 'in' in terms:\n place_vids = self._expand_place_ids(terms['in'])\n ret['keywords'].append(place_vids)\n\n if 'by' in terms:\n ret['keywords'].append(terms['by'])\n ret['from'] = terms.get('from', None)\n ret['to'] = terms.get('to', None)\n return ret", "docstring": "Expands partition terms to the appropriate fields.\n\nArgs:\nterms (dict or str):\n\nReturns:\ndict: keys are field names, values are query strings", "source": "juraj-google-style"} {"code": "def _get_args_to_parse(args, sys_argv):\n \n arguments = args if args is not None else sys_argv[1:]\n _LOG.debug(\"Parsing arguments: %s\", arguments)\n return arguments", "docstring": "Return the given arguments if it is not None else sys.argv if it contains\nsomething, an empty list otherwise.\n\nArgs:\nargs: argument to be parsed\nsys_argv: arguments of the command line i.e. sys.argv", "source": "juraj-google-style"} {"code": "def get_args(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', required=True, help='Input file to process.')\n parser.add_argument('--output', required=True, help='Output file to write results to.')\n return parser.parse_known_args(argv)", "docstring": "Determines user specified arguments from the given list of arguments.\n\nArgs:\nargv: all arguments.\n\nReturns:\nA pair of argument lists containing known and remaining arguments.", "source": "github-repos"} {"code": "def WriteSessionStart(self):\n self._RaiseIfNotWritable()\n if (self._storage_type != definitions.STORAGE_TYPE_SESSION):\n raise IOError('Unsupported storage type.')\n session_start = self._session.CreateSessionStart()\n self._storage_file.WriteSessionStart(session_start)", "docstring": "Writes session start information.\n\nRaises:\nIOError: if the storage type is not supported or\nwhen the storage writer is closed.\nOSError: if the storage type is not supported or\nwhen the storage writer is closed.", "source": "codesearchnet"} {"code": "def convert_tiktoken_to_fast(encoding: Any, output_dir: str):\n output_dir = Path(output_dir)\n output_dir.mkdir(exist_ok=True)\n save_file = output_dir / 'tiktoken' / TIKTOKEN_VOCAB_FILE\n tokenizer_file = output_dir / TOKENIZER_FILE\n save_file_absolute = str(save_file.absolute())\n output_file_absolute = str(tokenizer_file.absolute())\n try:\n from tiktoken import get_encoding\n from tiktoken.load import dump_tiktoken_bpe\n if isinstance(encoding, str):\n encoding = get_encoding(encoding)\n dump_tiktoken_bpe(encoding._mergeable_ranks, save_file_absolute)\n except ImportError:\n raise ValueError('`tiktoken` is required to save a `tiktoken` file. Install it with `pip install tiktoken`.')\n tokenizer = TikTokenConverter(vocab_file=save_file_absolute, pattern=encoding._pat_str, additional_special_tokens=encoding._special_tokens).converted()\n tokenizer.save(output_file_absolute)", "docstring": "Converts given `tiktoken` encoding to `PretrainedTokenizerFast` and saves the configuration of converted tokenizer\non disk.\n\nArgs:\nencoding (`str` or `tiktoken.Encoding`):\nTokenizer from `tiktoken` library. If `encoding` is `str`, the tokenizer will be loaded with\n`tiktoken.get_encoding(encoding)`.\noutput_dir (`str`):\nSave path for converted tokenizer configuration file.", "source": "github-repos"} {"code": "def _contains_tensor(sample: repr_dataset.RepresentativeSample) -> bool:\n return any(map(lambda value: isinstance(value, core.Tensor), sample.values()))", "docstring": "Determines whether `sample` contains any tf.Tensors.\n\nArgs:\nsample: A `RepresentativeSample`.\n\nReturns:\nTrue iff `sample` contains at least tf.Tensors.", "source": "github-repos"} {"code": "def get_room(self, id):\n if (id not in self._rooms):\n self._rooms[id] = Room(self, id)\n return self._rooms[id]", "docstring": "Get room.\n\nReturns:\n:class:`Room`. Room", "source": "codesearchnet"} {"code": "class WarmUp(schedules.LearningRateSchedule):\n\n def __init__(self, initial_learning_rate: float, decay_schedule_fn: Callable, warmup_steps: int, power: float=1.0, name: Optional[str]=None):\n super().__init__()\n self.initial_learning_rate = initial_learning_rate\n self.warmup_steps = warmup_steps\n self.power = power\n self.decay_schedule_fn = decay_schedule_fn\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name or 'WarmUp') as name:\n global_step_float = tf.cast(step, tf.float32)\n warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)\n warmup_percent_done = global_step_float / warmup_steps_float\n warmup_learning_rate = self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power)\n return tf.cond(global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps), name=name)\n\n def get_config(self):\n return {'initial_learning_rate': self.initial_learning_rate, 'decay_schedule_fn': self.decay_schedule_fn, 'warmup_steps': self.warmup_steps, 'power': self.power, 'name': self.name}", "docstring": "Applies a warmup schedule on a given learning rate decay schedule.\n\nArgs:\ninitial_learning_rate (`float`):\nThe initial learning rate for the schedule after the warmup (so this will be the learning rate at the end\nof the warmup).\ndecay_schedule_fn (`Callable`):\nThe schedule function to apply after the warmup for the rest of training.\nwarmup_steps (`int`):\nThe number of steps for the warmup part of training.\npower (`float`, *optional*, defaults to 1.0):\nThe power to use for the polynomial warmup (defaults is a linear warmup).\nname (`str`, *optional*):\nOptional name prefix for the returned tensors during the schedule.", "source": "github-repos"} {"code": "def set_status_code(self, status_code_line):\n self._empty = False\n self.status_code = self._remove_structure_prefix(_InstrumentationStructurePrefixes.STATUS_CODE, status_code_line)\n if self.status_code == _InstrumentationStatusCodes.START:\n self.begin_time = utils.get_current_epoch_time()", "docstring": "Sets the status code for the instrumentation test method, used in\ndetermining the test result.\n\nArgs:\nstatus_code_line: string, the raw instrumentation output line that\ncontains the status code of the instrumentation block.", "source": "github-repos"} {"code": "def get_pair(self, term1, term2):\n key = self.key(term1, term2)\n return self.pairs.get(key, None)", "docstring": "Get the value for a pair of terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\n\nReturns:\nThe stored value.", "source": "codesearchnet"} {"code": "def __init__(self, olecf_item):\n \n super(OLECFPropertySetStream, self).__init__()\n self._properties = {}\n self.date_time_properties = {}\n\n self._ReadPropertySet(olecf_item.set)", "docstring": "Initialize an OLECF property set stream.\n\nArgs:\nolecf_item (pyolecf.property_set_stream): OLECF item.", "source": "juraj-google-style"} {"code": "def true_num_genes(model, custom_spont_id=None):\n true_num = 0\n for gene in model.genes:\n if (not is_spontaneous(gene, custom_id=custom_spont_id)):\n true_num += 1\n return true_num", "docstring": "Return the number of genes in a model ignoring spontaneously labeled genes.\n\nArgs:\nmodel (Model):\ncustom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001``\n\nReturns:\nint: Number of genes excluding spontaneous genes", "source": "codesearchnet"} {"code": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\nattention_mask (`torch.Tensor`):\nA 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n`(batch_size, 1, query_length, key_value_length)`.\nsequence_length (`int`):\nThe sequence length being processed.\ntarget_length (`int`):\nThe target length: when generating with static cache, the mask should be as long as the static cache,\nto account for the 0 padding, the part of the cache that is not filled yet.\ndtype (`torch.dtype`):\nThe dtype to use for the 4D attention mask.\ncache_position (`torch.Tensor`):\nIndices depicting the position of the input sequence tokens in the sequence.\nbatch_size (`torch.Tensor`):\nBatch size.", "source": "github-repos"} {"code": "def forward(self, outputs, targets):\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'auxiliary_outputs'}\n indices = self.matcher(outputs_without_aux, targets)\n num_boxes = sum((len(t['class_labels']) for t in targets))\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n world_size = 1\n if is_accelerate_available():\n if PartialState._shared_state != {}:\n num_boxes = reduce(num_boxes)\n world_size = PartialState().num_processes\n num_boxes = torch.clamp(num_boxes / world_size, min=1).item()\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n if 'auxiliary_outputs' in outputs:\n for i, auxiliary_outputs in enumerate(outputs['auxiliary_outputs']):\n indices = self.matcher(auxiliary_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n continue\n l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n return losses", "docstring": "This performs the loss computation.\n\nArgs:\noutputs (`dict`, *optional*):\nDictionary of tensors, see the output specification of the model for the format.\ntargets (`List[dict]`, *optional*):\nList of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the\nlosses applied, see each loss' doc.", "source": "github-repos"} {"code": "def _ReadEncryptedData(self, read_size):\n encrypted_data = self._file_object.read(read_size)\n read_count = len(encrypted_data)\n self._encrypted_data = b''.join([self._encrypted_data, encrypted_data])\n (self._decrypted_data, self._encrypted_data) = self._decrypter.Decrypt(self._encrypted_data)\n self._decrypted_data_size = len(self._decrypted_data)\n return read_count", "docstring": "Reads encrypted data from the file-like object.\n\nArgs:\nread_size (int): number of bytes of encrypted data to read.\n\nReturns:\nint: number of bytes of encrypted data read.", "source": "codesearchnet"} {"code": "def egg_info_writer(cmd, basename, filename):\n setupcfg = next((f for f in setuptools.findall() if (os.path.basename(f) == 'setup.cfg')), None)\n if (not setupcfg):\n return\n parser = six.moves.configparser.ConfigParser()\n parser.read(setupcfg)\n if ((not parser.has_section('rcli')) or (not parser.items('rcli'))):\n return\n config = dict(parser.items('rcli'))\n for (k, v) in six.iteritems(config):\n if (v.lower() in ('y', 'yes', 'true')):\n config[k] = True\n elif (v.lower() in ('n', 'no', 'false')):\n config[k] = False\n else:\n try:\n config[k] = json.loads(v)\n except ValueError:\n pass\n cmd.write_file(basename, filename, json.dumps(config))", "docstring": "Read rcli configuration and write it out to the egg info.\n\nArgs:\ncmd: An egg info command instance to use for writing.\nbasename: The basename of the file to write.\nfilename: The full path of the file to write into the egg info.", "source": "codesearchnet"} {"code": "def ExamineEvent(self, mediator, event):\n \n if self._tagging_rules is None:\n if self._autodetect_tag_file_attempt:\n \n \n return\n\n if not self._AttemptAutoDetectTagFile(mediator):\n logger.info(\n 'No tag definition file specified, and plaso was not able to '\n 'autoselect a tagging file. As no definitions were specified, '\n 'no events will be tagged.')\n return\n\n matched_label_names = []\n for label_name, filter_objects in iter(self._tagging_rules.items()):\n for filter_object in filter_objects:\n if filter_object.Match(event):\n matched_label_names.append(label_name)\n break\n\n if matched_label_names:\n event_tag = self._CreateEventTag(\n event, self._EVENT_TAG_COMMENT, matched_label_names)\n\n mediator.ProduceEventTag(event_tag)\n self._number_of_event_tags += 1", "docstring": "Analyzes an EventObject and tags it according to rules in the tag file.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between analysis\nplugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "juraj-google-style"} {"code": "def eval_adiabatic_limit(YABFGN, Ytilde, P0):\n \n Y, A, B, F, G, N = YABFGN\n\n Klim = (P0 * (B - A * Ytilde * A) * P0).expand().simplify_scalar()\n Hlim = ((Klim - Klim.dag())/2/I).expand().simplify_scalar()\n\n Ldlim = (P0 * (G - A * Ytilde * F) * P0).expand().simplify_scalar()\n\n dN = identity_matrix(N.shape[0]) + F.H * Ytilde * F\n Nlim = (P0 * N * dN * P0).expand().simplify_scalar()\n\n return SLH(Nlim.dag(), Ldlim.dag(), Hlim.dag())", "docstring": "Compute the limiting SLH model for the adiabatic approximation\n\nArgs:\nYABFGN: The tuple (Y, A, B, F, G, N)\nas returned by prepare_adiabatic_limit.\nYtilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.\nP0: The projector onto the null-space of Y.\n\nReturns:\nSLH: Limiting SLH model", "source": "juraj-google-style"} {"code": "def cond(self, name='cond'):\n with self._name_scope(name):\n return self._cond()", "docstring": "Returns the condition number of this linear operator.\n\nArgs:\nname: A name for this `Op`.\n\nReturns:\nShape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.", "source": "github-repos"} {"code": "def end_run_group(group, session):\n from datetime import datetime\n group.end = datetime.now()\n group.status = 'completed'\n session.commit()", "docstring": "End the run_group successfully.\n\nArgs:\ngroup: The run_group we want to complete.\nsession: The database transaction we will finish.", "source": "codesearchnet"} {"code": "def delete(self, key):\n dct = self\n keys = key.split('.')\n last_key = keys[(- 1)]\n for k in keys:\n if (k == last_key):\n del dct[k]\n break\n if isinstance(dct, DotDict):\n dct = super(DotDict, dct).__getitem__(k)\n else:\n dct = dct.__getitem__(k)\n if (not isinstance(dct, (DotDict, dict))):\n raise KeyError('Subkey \"{}\" in \"{}\" invalid for deletion'.format(k, key))", "docstring": "Remove a value from the `DotDict`.\n\nThe `key` parameter can either be a regular string key,\ne.g. \"foo\", or it can be a string key with dot notation,\ne.g. \"foo.bar.baz\", to signify a nested element.\n\nIf the key does not exist in the `DotDict`, it will continue\nsilently.\n\nArgs:\nkey (str): The key to remove.", "source": "codesearchnet"} {"code": "def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers, total_counts):\n update_ops = []\n for inp, cluster_idx in zip(inputs, cluster_idx_list):\n with ops.colocate_with(inp, ignore_existing=True):\n assert total_counts is not None\n cluster_idx = array_ops.reshape(cluster_idx, [-1])\n unique_ids, unique_idx = array_ops.unique(cluster_idx)\n num_unique_cluster_idx = array_ops.size(unique_ids)\n with ops.colocate_with(total_counts, ignore_existing=True):\n old_counts = array_ops.gather(total_counts, unique_ids)\n with ops.colocate_with(cluster_centers, ignore_existing=True):\n old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)\n count_updates = math_ops.unsorted_segment_sum(array_ops.ones_like(unique_idx, dtype=total_counts.dtype), unique_idx, num_unique_cluster_idx)\n cluster_center_updates = math_ops.unsorted_segment_sum(inp, unique_idx, num_unique_cluster_idx)\n broadcast_shape = array_ops.concat([array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones(array_ops.reshape(array_ops.rank(inp) - 1, [1]), dtype=dtypes.int32)], 0)\n cluster_center_updates -= math_ops.cast(array_ops.reshape(count_updates, broadcast_shape), inp.dtype) * old_cluster_centers\n learning_rate = math_ops.reciprocal(math_ops.cast(old_counts + count_updates, inp.dtype))\n learning_rate = array_ops.reshape(learning_rate, broadcast_shape)\n cluster_center_updates *= learning_rate\n update_counts = state_ops.scatter_add(total_counts, unique_ids, count_updates)\n update_cluster_centers = state_ops.scatter_add(cluster_centers, unique_ids, cluster_center_updates)\n update_ops.extend([update_counts, update_cluster_centers])\n return control_flow_ops.group(*update_ops)", "docstring": "Creates an op for training for mini batch case.\n\nArgs:\ninputs: list of input Tensors.\ncluster_idx_list: A vector (or list of vectors). Each element in the\nvector corresponds to an input row in 'inp' and specifies the cluster id\ncorresponding to the input.\ncluster_centers: Tensor Ref of cluster centers.\ntotal_counts: Tensor Ref of cluster counts.\n\nReturns:\nAn op for doing an update of mini-batch k-means.", "source": "github-repos"} {"code": "def execute_command(self, args, parent_environ=None, **subprocess_kwargs):\n if (parent_environ in (None, os.environ)):\n target_environ = {}\n else:\n target_environ = parent_environ.copy()\n interpreter = Python(target_environ=target_environ)\n executor = self._create_executor(interpreter, parent_environ)\n self._execute(executor)\n return interpreter.subprocess(args, **subprocess_kwargs)", "docstring": "Run a command within a resolved context.\n\nThis applies the context to a python environ dict, then runs a\nsubprocess in that namespace. This is not a fully configured subshell -\nshell-specific commands such as aliases will not be applied. To execute\na command within a subshell instead, use execute_shell().\n\nWarning:\nThis runs a command in a configured environ dict only, not in a true\nshell. To do that, call `execute_shell` using the `command` keyword\nargument.\n\nArgs:\nargs: Command arguments, can be a string.\nparent_environ: Environment to interpret the context within,\ndefaults to os.environ if None.\nsubprocess_kwargs: Args to pass to subprocess.Popen.\n\nReturns:\nA subprocess.Popen object.\n\nNote:\nThis does not alter the current python session.", "source": "codesearchnet"} {"code": "def _AddHeader(self, fp):\n \n text = textwrap.wrap(\n textwrap.dedent(self.config_header), break_on_hyphens=False)\n fp.write('\\n'.join(['\n fp.write('\\n\\n')", "docstring": "Create a file header in the config.\n\nArgs:\nfp: int, a file pointer for writing the header.", "source": "juraj-google-style"} {"code": "def extract_numerics_alert(event):\n value = event.summary.value[0]\n debugger_plugin_metadata_content = None\n if value.HasField('metadata'):\n plugin_data = value.metadata.plugin_data\n if (plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME):\n debugger_plugin_metadata_content = plugin_data.content\n if (not debugger_plugin_metadata_content):\n raise ValueError('Event proto input lacks debugger plugin SummaryMetadata.')\n debugger_plugin_metadata_content = tf.compat.as_text(debugger_plugin_metadata_content)\n try:\n content_object = json.loads(debugger_plugin_metadata_content)\n device_name = content_object['device']\n except (KeyError, ValueError) as e:\n raise ValueError(('Could not determine device from JSON string %r, %r' % (debugger_plugin_metadata_content, e)))\n debug_op_suffix = ':DebugNumericSummary'\n if (not value.node_name.endswith(debug_op_suffix)):\n raise ValueError(('Event proto input does not have the expected debug op suffix %s' % debug_op_suffix))\n tensor_name = value.node_name[:(- len(debug_op_suffix))]\n elements = tf_debug.load_tensor_from_event(event)\n nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX]\n neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX]\n pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX]\n if ((nan_count > 0) or (neg_inf_count > 0) or (pos_inf_count > 0)):\n return NumericsAlert(device_name, tensor_name, event.wall_time, nan_count, neg_inf_count, pos_inf_count)\n return None", "docstring": "Determines whether a health pill event contains bad values.\n\nA bad value is one of NaN, -Inf, or +Inf.\n\nArgs:\nevent: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary`\nops.\n\nReturns:\nAn instance of `NumericsAlert`, if bad values are found.\n`None`, if no bad values are found.\n\nRaises:\nValueError: if the event does not have the expected tag prefix or the\ndebug op name is not the expected debug op name suffix.", "source": "codesearchnet"} {"code": "def limitReal(x, max_denominator=1000000):\n f = Fraction(x).limit_denominator(max_denominator)\n return Real((f.numerator, f.denominator))", "docstring": "Creates an pysmt Real constant from x.\n\nArgs:\nx (number): A number to be cast to a pysmt constant.\nmax_denominator (int, optional): The maximum size of the denominator.\nDefault 1000000.\n\nReturns:\nA Real constant with the given value and the denominator limited.", "source": "codesearchnet"} {"code": "def __call__(self, text, toLang, fromLang=None):\n \n return self.skype.conn(\"GET\", \"{0}/skype/translate\".format(SkypeConnection.API_TRANSLATE),\n params={\"from\": fromLang or \"\", \"to\": toLang, \"text\": text},\n auth=SkypeConnection.Auth.SkypeToken).json()", "docstring": "Attempt translation of a string. Supports automatic language detection if ``fromLang`` is not specified.\n\nArgs:\ntext (str): input text to be translated\ntoLang (str): country code of output language\nfromLang (str): country code of input language", "source": "juraj-google-style"} {"code": "def forward(self, hidden_states: torch.Tensor, level_index: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n if self.pre_norm:\n outputs = self.forward_pre(hidden_states=hidden_states, level_index=level_index, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)\n else:\n outputs = self.forward_post(hidden_states=hidden_states, level_index=level_index, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)\n return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(seq_len, batch, embed_dim)`.\nattention_mask (`torch.FloatTensor`):\nAttention mask of shape `(1, seq_len, tgt_len, src_len)`.\nposition_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings that are added to the keys in the masked-attention layer.\nquery_position_embeddings (`torch.FloatTensor`, *optional*):\nPosition embeddings that are added to the queries and keys in the self-attention layer.\nencoder_hidden_states (`torch.FloatTensor`):\nCross attention input to the layer of shape `(seq_len, batch, embed_dim)`.\nencoder_attention_mask (`torch.FloatTensor`):\nEncoder attention mask of size`(1, seq_len, tgt_len, src_len)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"} {"code": "def enable_encryption(self):\n try:\n self.send_state_event('m.room.encryption', {'algorithm': 'm.megolm.v1.aes-sha2'})\n self.encrypted = True\n return True\n except MatrixRequestError:\n return False", "docstring": "Enables encryption in the room.\n\nNOTE: Once enabled, encryption cannot be disabled.\n\nReturns:\nTrue if successful, False if not", "source": "codesearchnet"} {"code": "def _download_items(db, last_id):\n \n MAX_RETRY = 20 \n MAX_DOC_ID = 10000000 \n\n not_found_cnt = 0 \n for doc_id in xrange(last_id, MAX_DOC_ID):\n doc_id += 1\n print \"Downloading %d..\" % (doc_id)\n\n if not_found_cnt >= MAX_RETRY:\n print \"It looks like this is an end:\", doc_id - MAX_RETRY\n break\n\n try:\n record = _download(doc_id)\n except (DocumentNotFoundException, InvalidAlephBaseException):\n print \"\\tnot found, skipping\"\n not_found_cnt += 1\n continue\n\n not_found_cnt = 0\n db[\"item_%d\" % doc_id] = record\n db[\"last_id\"] = doc_id - MAX_RETRY if doc_id > MAX_RETRY else 1\n\n if doc_id % 100 == 0:\n db.commit()", "docstring": "Download items from the aleph and store them in `db`. Start from `last_id`\nif specified.\n\nArgs:\ndb (obj): Dictionary-like object used as DB.\nlast_id (int): Start from this id.", "source": "juraj-google-style"} {"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]\n return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel\nTransformer sequence pair mask has the following format:\n\n```\n2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n| first sequence | second sequence |\n```\n\nIf `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).", "source": "github-repos"} {"code": "def occurrence(self, file_name=None, path=None, date=None):\n if (self._indicator_data.get('type') != 'File'):\n return None\n occurrence_obj = FileOccurrence(file_name, path, date)\n self._occurrences.append(occurrence_obj)\n return occurrence_obj", "docstring": "Add a file Occurrence.\n\nArgs:\nfile_name (str, optional): The file name for this occurrence.\npath (str, optional): The file path for this occurrence.\ndate (str, optional): The datetime expression for this occurrence.\n\nReturns:\nobj: An instance of Occurrence.", "source": "codesearchnet"} {"code": "def add_tasks_r(addon_module, package_module, package_name):\n module_dict = package_module.__dict__\n for (attr_name, attr_val) in module_dict.items():\n if isinstance(attr_val, fabric.tasks.WrappedCallableTask):\n addon_module.__dict__[attr_name] = attr_val\n elif ((attr_name != package_name) and isinstance(attr_val, types.ModuleType) and attr_val.__name__.startswith('fabsetup_') and (attr_name.split('.')[(- 1)] != package_name)):\n submodule_name = flo('{addon_module.__name__}.{attr_name}')\n submodule = get_or_create_module_r(submodule_name)\n package_module = attr_val\n add_tasks_r(submodule, package_module, package_name)\n addon_module.__dict__[attr_name] = submodule", "docstring": "Recursively iterate through 'package_module' and add every fabric task\nto the 'addon_module' keeping the task hierarchy.\n\nArgs:\naddon_module(types.ModuleType)\npackage_module(types.ModuleType)\npackage_name(str): Required, to avoid redundant addition of tasks\n\nReturn: None", "source": "codesearchnet"} {"code": "def copy(self, source_file_names, destination_file_names):\n if not len(source_file_names) == len(destination_file_names):\n message = 'Unable to copy unequal number of sources and destinations.'\n raise BeamIOError(message)\n src_dest_pairs = list(zip(source_file_names, destination_file_names))\n return self._blobstorageIO().copy_paths(src_dest_pairs)", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\nsource_file_names: list of source file objects that needs to be copied\ndestination_file_names: list of destination of the new object\n\nRaises:\n``BeamIOError``: if any of the copy operations fail", "source": "github-repos"} {"code": "def undetoured_new(cls, *args, **kwargs) -> Any:\n new_method = _global_detour_context.get_original_new(cls)\n if new_method is object.__new__:\n instance = new_method(cls)\n else:\n instance = new_method(cls, *args, **kwargs)\n instance.__init__(*args, **kwargs)\n return instance", "docstring": "Create a new instance of cls without detouring.\n\nIf cls.__init__ creates sub-objects, creation of sub-objects\nmaybe detoured based on current context. For example::\n\nclass A:\n\ndef __init__(self, x):\nif x < 0:\nself.child = A(x)\nelse:\nself.x = x\n\nwith pg.detour([A, B]):\na = A(-1)\nassert isinstance(a, A)\nassert isinstance(a.child, B)\n\nArgs:\ncls: The class whose instance will be created.\n*args: Positional arguments to be passed to class __init__ method.\n**kwargs: Keyword arguments to be passed to class __init__ method.\n\nReturns:\nA instance of `cls`.", "source": "github-repos"} {"code": "def GetAPFSFileEntryByPathSpec(self, path_spec):\n location = getattr(path_spec, 'location', None)\n identifier = getattr(path_spec, 'identifier', None)\n if (identifier is not None):\n fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(identifier)\n elif (location is not None):\n fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)\n else:\n raise errors.PathSpecError('Path specification missing location and identifier.')\n return fsapfs_file_entry", "docstring": "Retrieves the APFS file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\npyfsapfs.file_entry: file entry.\n\nRaises:\nPathSpecError: if the path specification is missing location and\nidentifier.", "source": "codesearchnet"} {"code": "def create_team(self, name):\n \n request = self._get_request()\n return request.post(self.TEAM_CREATE_URL, {\"name\": name})", "docstring": "Creates a new Team\n\nCreates a new Team and makes you a member. You must not currently belong to a team to invoke.\n\nArgs:\n\nname (str): The name of your team\n\nReturns:\nA Team object", "source": "juraj-google-style"} {"code": "def _createBitpattern(functioncode, value):\n \n _checkFunctioncode(functioncode, [5, 15])\n _checkInt(value, minvalue=0, maxvalue=1, description='inputvalue')\n\n if functioncode == 5:\n if value == 0:\n return '\\x00\\x00'\n else:\n return '\\xff\\x00'\n\n elif functioncode == 15:\n if value == 0:\n return '\\x00'\n else:\n return '\\x01'", "docstring": "Create the bit pattern that is used for writing single bits.\n\nThis is basically a storage of numerical constants.\n\nArgs:\n* functioncode (int): can be 5 or 15\n* value (int): can be 0 or 1\n\nReturns:\nThe bit pattern (string).\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"} {"code": "def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):\n \n self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(\n path_spec)\n\n try:\n extraction_worker.ProcessPathSpec(parser_mediator, path_spec)\n\n except KeyboardInterrupt:\n self._abort = True\n\n self._processing_status.aborted = True\n if self._status_update_callback:\n self._status_update_callback(self._processing_status)\n\n \n \n except dfvfs_errors.CacheFullError:\n \n self._abort = True\n logger.error((\n 'ABORT: detected cache full error while processing '\n 'path spec: {0:s}').format(self._current_display_name))\n\n \n \n except Exception as exception: \n parser_mediator.ProduceExtractionWarning((\n 'unable to process path specification with error: '\n '{0!s}').format(exception), path_spec=path_spec)\n\n if getattr(self._processing_configuration, 'debug_output', False):\n logger.warning(\n 'Unhandled exception while processing path spec: {0:s}.'.format(\n self._current_display_name))\n logger.exception(exception)\n\n pdb.post_mortem()", "docstring": "Processes a path specification.\n\nArgs:\nextraction_worker (worker.ExtractionWorker): extraction worker.\nparser_mediator (ParserMediator): parser mediator.\npath_spec (dfvfs.PathSpec): path specification.", "source": "juraj-google-style"} {"code": "def _call_method(self, method, req, resp_class):\n payload = req.SerializeToString()\n headers = {'Content-Type': 'application/x-protobuf', 'Content-Length': str(len(payload)), 'X-Goog-Api-Format-Version': '2'}\n (response, content) = self._http.request(('%s:%s' % (self._url, method)), method='POST', body=payload, headers=headers)\n if (response.status != 200):\n raise _make_rpc_error(method, response, content)\n resp = resp_class()\n resp.ParseFromString(content)\n return resp", "docstring": "_call_method call the given RPC method over HTTP.\n\nIt uses the given protobuf message request as the payload and\nreturns the deserialized protobuf message response.\n\nArgs:\nmethod: RPC method name to be called.\nreq: protobuf message for the RPC request.\nresp_class: protobuf message class for the RPC response.\n\nReturns:\nDeserialized resp_class protobuf message instance.\n\nRaises:\nRPCError: The rpc method call failed.", "source": "codesearchnet"} {"code": "def __init__(self, tlv_type=127, value=None):\n \n super().__init__()\n self.tlv_type = tlv_type\n self._value = BinaryData() if value is None else value", "docstring": "Create an instance and set its attributes.\n\nArgs:\ntlv_type (int): Type used by this class. Defaults to 127.\nvalue (:class:`~pyof.foundation.basic_types.BinaryData`):\nValue stored by GenericTLV.", "source": "juraj-google-style"} {"code": "def search(self, term):\n return self._result(self._get(self._url('/images/search'), params={'term': term}), True)", "docstring": "Search for images on Docker Hub. Similar to the ``docker search``\ncommand.\n\nArgs:\nterm (str): A term to search for.\n\nReturns:\n(list of dicts): The response of the search.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"} {"code": "def _fill_from_default(self, default_job_config):\n \n if self._job_type != default_job_config._job_type:\n raise TypeError(\n \"attempted to merge two incompatible job types: \"\n + repr(self._job_type)\n + \", \"\n + repr(default_job_config._job_type)\n )\n\n new_job_config = self.__class__()\n\n default_job_properties = copy.deepcopy(default_job_config._properties)\n for key in self._properties:\n if key != self._job_type:\n default_job_properties[key] = self._properties[key]\n\n default_job_properties[self._job_type].update(self._properties[self._job_type])\n new_job_config._properties = default_job_properties\n\n return new_job_config", "docstring": "Merge this job config with a default job config.\n\nThe keys in this object take precedence over the keys in the default\nconfig. The merge is done at the top-level as well as for keys one\nlevel below the job type.\n\nArguments:\ndefault_job_config (google.cloud.bigquery.job._JobConfig):\nThe default job config that will be used to fill in self.\n\nReturns:\ngoogle.cloud.bigquery.job._JobConfig A new (merged) job config.", "source": "juraj-google-style"} {"code": "def method(self, method):\n \n self._request.method = method\n self.add_matcher(matcher('MethodMatcher', method))", "docstring": "Defines the HTTP method to match.\nUse ``*`` to match any method.\n\nArguments:\nmethod (str): method value to match. E.g: ``GET``.\n\nReturns:\nself: current Mock instance.", "source": "juraj-google-style"} {"code": "def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):\n with tf.variable_scope('sigmoid_cross_entropy_one_hot', values=[logits, labels]):\n del weights_fn\n cross_entropy = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits)\n return (cross_entropy, tf.constant(1.0))", "docstring": "Calculate sigmoid cross entropy for one-hot lanels and logits.\n\nArgs:\nlogits: Tensor of size [batch-size, o=1, p=1, num-classes]\nlabels: Tensor of size [batch-size, o=1, p=1, num-classes]\nweights_fn: Function that takes in labels and weighs examples (unused)\nReturns:\ncross_entropy (scalar), weights", "source": "codesearchnet"} {"code": "def exit(self, code=None, msg=None):\n \n \n if msg is not None:\n if code in [0, 3] or (code is None and self.exit_code in [0, 3]):\n self.log.info(msg)\n else:\n self.log.error(msg)\n self.message_tc(msg)\n\n if code is None:\n code = self.exit_code\n elif code in [0, 1, 3]:\n pass\n else:\n self.log.error(u'Invalid exit code')\n code = 1\n\n if self.default_args.tc_aot_enabled:\n \n self.playbook.aot_rpush(code)\n\n self.log.info(u'Exit Code: {}'.format(code))\n sys.exit(code)", "docstring": "Application exit method with proper exit code\n\nThe method will run the Python standard sys.exit() with the exit code\npreviously defined via :py:meth:`~tcex.tcex.TcEx.exit_code` or provided\nduring the call of this method.\n\nArgs:\ncode (Optional [integer]): The exit code value for the app.\nmsg (Optional [string]): A message to log and add to message tc output.", "source": "juraj-google-style"} {"code": "def num_employers(self, num_employers):\n \n\n if num_employers < 2:\n self._logger.log(\n 'warn',\n 'Two employers are needed: setting to two'\n )\n num_employers = 2\n self._num_employers = num_employers\n self._logger.log('debug', 'Number of employers set to {}'.format(\n num_employers\n ))\n self._limit = num_employers * len(self._value_ranges)\n self._logger.log('debug', 'Limit set to {}'.format(self._limit))", "docstring": "Sets the number of employer bees; at least two are required\n\nArgs:\nnum_employers (int): number of employer bees", "source": "juraj-google-style"} {"code": "def get_apod(cls, date=None, hd=False):\n instance = cls('planetary/apod')\n filters = {'date': date, 'hd': hd}\n return instance.get_resource(**filters)", "docstring": "Returns Astronomy Picture of the Day\n\nArgs:\ndate: date instance (default = today)\n\nhd: bool if high resolution should be included\n\nReturns:\njson", "source": "codesearchnet"} {"code": "def _set_route(self, ip_dest, next_hop, **kwargs):\n commands = self._build_commands(ip_dest, next_hop, **kwargs)\n delete = kwargs.get('delete', False)\n default = kwargs.get('default', False)\n if delete:\n commands = ('no ' + commands)\n elif default:\n commands = ('default ' + commands)\n return self.configure(commands)", "docstring": "Configure a static route\n\nArgs:\nip_dest (string): The ip address of the destination in the\nform of A.B.C.D/E\nnext_hop (string): The next hop interface or ip address\n**kwargs['next_hop_ip'] (string): The next hop address on\ndestination interface\n**kwargs['distance'] (string): Administrative distance for this\nroute\n**kwargs['tag'] (string): Route tag\n**kwargs['route_name'] (string): Route name\n**kwargs['delete'] (boolean): If true, deletes the specified route\ninstead of creating or setting values for the route\n**kwargs['default'] (boolean): If true, defaults the specified\nroute instead of creating or setting values for the route\n\nReturns:\nTrue if the operation succeeds, otherwise False.", "source": "codesearchnet"} {"code": "def advth(step):\n \n rbot, rtop = misc.get_rbounds(step)\n rmean = 0.5 * (rbot + rtop)\n rad = step.rprof['r'].values + rbot\n radio = step.timeinfo['H_int']\n if rbot != 0: \n th_adv = -(rtop**3 - rad**3) / rmean**2 / 3\n else:\n th_adv = rad - rtop\n th_adv *= radio\n th_adv += step.timeinfo['Nutop']\n return th_adv, None", "docstring": "Theoretical advection.\n\nThis compute the theoretical profile of total advection as function of\nradius.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nReturns:\ntuple of :class:`numpy.array` and None: the theoretical advection.\nThe second element of the tuple is None.", "source": "juraj-google-style"} {"code": "def _ProcessZipFileWithPlugins(self, parser_mediator, zip_file):\n archive_members = zip_file.namelist()\n for plugin in self._plugins:\n try:\n plugin.UpdateChainAndProcess(parser_mediator, zip_file=zip_file, archive_members=archive_members)\n except errors.WrongCompoundZIPPlugin as exception:\n logger.debug('[{0:s}] wrong plugin: {1!s}'.format(self.NAME, exception))", "docstring": "Processes a zip file using all compound zip files.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nzip_file (zipfile.ZipFile): the zip file. It should not be closed in\nthis method, but will be closed in ParseFileObject().", "source": "codesearchnet"} {"code": "def _popitem(self, indices=None, name=None):\n if name is None:\n name = '%s_get_nokey' % self._name\n indices, dtypes = self._get_indices_and_dtypes(indices)\n with ops.colocate_with(self._coloc_op):\n key, result = self._popitem_fn(shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n key = self._create_device_transfers(key)[0]\n result = self._get_return_value(result, indices)\n return (key, result)", "docstring": "If the staging area is ordered, the (key, value) with the smallest key will be returned.\n\nOtherwise, a random (key, value) will be returned.\nIf the staging area is empty when this operation executes,\nit will block until there is an element to dequeue.\n\nArgs:\nkey: Key associated with the required data\nindices: Partial list of tensors to retrieve (optional).\nA list of integer or string indices.\nString indices are only valid if the Staging Area\nhas names associated with it.\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"} {"code": "def match_any(patterns, name):\n \n \n if not patterns:\n return True\n return any(match(pattern, name) for pattern in patterns)", "docstring": "Test if a name matches any of a list of patterns.\n\nWill return `True` if ``patterns`` is an empty list.\n\nArguments:\npatterns (list): A list of wildcard pattern, e.g ``[\"*.py\",\n\"*.pyc\"]``\nname (str): A filename.\n\nReturns:\nbool: `True` if the name matches at least one of the patterns.", "source": "juraj-google-style"} {"code": "def apply_range_set(self, hist: Hist) -> None:\n \n \n axis = self.axis(hist)\n \n \n assert not isinstance(self.min_val, float)\n assert not isinstance(self.max_val, float)\n \n min_val = self.min_val(axis)\n max_val = self.max_val(axis)\n \n \n \n self.axis(hist).SetRange(min_val, max_val)", "docstring": "Apply the associated range set to the axis of a given hist.\n\nNote:\nThe min and max values should be bins, not user ranges! For more, see the binning\nexplanation in ``apply_func_to_find_bin(...)``.\n\nArgs:\nhist: Histogram to which the axis range restriction should be applied.\nReturns:\nNone. The range is set on the axis.", "source": "juraj-google-style"} {"code": "def ShapeEquals(tensor_proto, shape):\n if not isinstance(tensor_proto, tensor_pb2.TensorProto):\n raise TypeError(f'`tensor_proto` must be a tensor_pb2.TensorProto object, but got type {type(tensor_proto)}.')\n if isinstance(shape, tensor_shape_pb2.TensorShapeProto):\n shape = [d.size for d in shape.dim]\n elif not isinstance(shape, (list, tuple)):\n raise TypeError(f'`shape` must be a list or tuple, but got type {type(shape)}.')\n tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]\n return all((x == y for x, y in zip(tensor_shape_list, shape)))", "docstring": "Returns True if \"tensor_proto\" has the given \"shape\".\n\nArgs:\ntensor_proto: A TensorProto.\nshape: A tensor shape, expressed as a TensorShape, list, or tuple.\n\nReturns:\nTrue if \"tensor_proto\" has the given \"shape\", otherwise False.\n\nRaises:\nTypeError: If \"tensor_proto\" is not a TensorProto, or shape is not a\nTensorShape, list, or tuple.", "source": "github-repos"} {"code": "def compute_batch_indices(batch_size, beam_size):\n \n batch_pos = tf.range(batch_size * beam_size) \n batch_pos = tf.reshape(batch_pos, [batch_size, beam_size])\n return batch_pos", "docstring": "Computes the i'th coordinate that contains the batch index for gathers.\n\nBatch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which\nbatch the beam item is in. This will create the i of the i,j coordinate\nneeded for the gather.\n\nArgs:\nbatch_size: Batch size\nbeam_size: Size of the beam.\nReturns:\nbatch_pos: [batch_size, beam_size] tensor of ids", "source": "juraj-google-style"} {"code": "def check(self):\n for info in self.get_info():\n if (info.free < info.limit):\n return info", "docstring": "Check resource levels.\n\nReturns:\nNone, ResourceInfo: If None is provided, no levels are exceeded.\nOtherwise, the first ResourceInfo exceeding limits is returned.", "source": "codesearchnet"} {"code": "def get_bel_versions() -> List[str]:\n spec_dir = config['bel']['lang']['specifications']\n fn = f'{spec_dir}/versions.json'\n with open(fn, 'r') as f:\n versions = json.load(f)\n return versions", "docstring": "Get BEL Language versions supported\n\nGet the list of all BEL Language versions supported. The file this depends\non is generated by belspec_yaml2json and is kept up to date using\n`make update_ebnf` or `make update_parsers`. You can also run `belspec_yaml2json`\ndirectly as it's added as a command by pip install.\n\nReturns:\nList[str]: list of versions", "source": "codesearchnet"} {"code": "def parse_tensor_name_with_slicing(in_str):\n if in_str.count('[') == 1 and in_str.endswith(']'):\n tensor_name = in_str[:in_str.index('[')]\n tensor_slicing = in_str[in_str.index('['):]\n else:\n tensor_name = in_str\n tensor_slicing = ''\n return (tensor_name, tensor_slicing)", "docstring": "Parse tensor name, potentially suffixed by slicing string.\n\nArgs:\nin_str: (str) Input name of the tensor, potentially followed by a slicing\nstring. E.g.: Without slicing string: \"hidden/weights/Variable:0\", with\nslicing string: \"hidden/weights/Variable:0[1, :]\"\n\nReturns:\n(str) name of the tensor\n(str) slicing string, if any. If no slicing string is present, return \"\".", "source": "github-repos"} {"code": "def prepare_framework(estimator, s3_operations):\n \n if estimator.code_location is not None:\n bucket, key = fw_utils.parse_s3_url(estimator.code_location)\n key = os.path.join(key, estimator._current_job_name, 'source', 'sourcedir.tar.gz')\n else:\n bucket = estimator.sagemaker_session._default_bucket\n key = os.path.join(estimator._current_job_name, 'source', 'sourcedir.tar.gz')\n script = os.path.basename(estimator.entry_point)\n if estimator.source_dir and estimator.source_dir.lower().startswith('s3:\n code_dir = estimator.source_dir\n estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script)\n else:\n code_dir = 's3:\n estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script)\n s3_operations['S3Upload'] = [{\n 'Path': estimator.source_dir or script,\n 'Bucket': bucket,\n 'Key': key,\n 'Tar': True\n }]\n estimator._hyperparameters[sagemaker.model.DIR_PARAM_NAME] = code_dir\n estimator._hyperparameters[sagemaker.model.SCRIPT_PARAM_NAME] = script\n estimator._hyperparameters[sagemaker.model.CLOUDWATCH_METRICS_PARAM_NAME] = \\\n estimator.enable_cloudwatch_metrics\n estimator._hyperparameters[sagemaker.model.CONTAINER_LOG_LEVEL_PARAM_NAME] = estimator.container_log_level\n estimator._hyperparameters[sagemaker.model.JOB_NAME_PARAM_NAME] = estimator._current_job_name\n estimator._hyperparameters[sagemaker.model.SAGEMAKER_REGION_PARAM_NAME] = \\\n estimator.sagemaker_session.boto_region_name", "docstring": "Prepare S3 operations (specify where to upload `source_dir`) and environment variables\nrelated to framework.\n\nArgs:\nestimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update.\ns3_operations (dict): The dict to specify s3 operations (upload `source_dir`).", "source": "juraj-google-style"} {"code": "def get_connection_count(self, id=None, endpoint=None):\n \n return self._call_endpoint(GET_CONNECTION_COUNT, id=id, endpoint=endpoint)", "docstring": "Gets the number of nodes connected to the endpoint\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"} {"code": "def ge(self, other, axis=\"columns\", level=None):\n \n return self._binary_op(\"ge\", other, axis=axis, level=level)", "docstring": "Checks element-wise that this is greater than or equal to other.\n\nArgs:\nother: A DataFrame or Series or scalar to compare to.\naxis: The axis to perform the gt over.\nlevel: The Multilevel index level to apply gt over.\n\nReturns:\nA new DataFrame filled with Booleans.", "source": "juraj-google-style"} {"code": "def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata['countries'].get(iso3.upper())\n if (country is not None):\n return country\n if (exception is not None):\n raise exception\n return None", "docstring": "Get country information from ISO3 code\n\nArgs:\niso3 (str): ISO3 code for which to get country information\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[Dict[str]]: country information", "source": "codesearchnet"} {"code": "def tag_versions(repo_path):\n repo = dulwich.repo.Repo(repo_path)\n tags = get_tags(repo)\n maj_version = 0\n feat_version = 0\n fix_version = 0\n last_maj_version = 0\n last_feat_version = 0\n result = []\n for (commit_sha, children) in reversed(get_children_per_first_parent(repo_path).items()):\n commit = get_repo_object(repo, commit_sha)\n (maj_version, feat_version, fix_version) = get_version(commit=commit, tags=tags, maj_version=maj_version, feat_version=feat_version, fix_version=fix_version, children=children)\n if ((last_maj_version != maj_version) or (last_feat_version != feat_version)):\n last_maj_version = maj_version\n last_feat_version = feat_version\n tag_name = ('refs/tags/v%d.%d' % (maj_version, feat_version))\n if ON_PYTHON3:\n repo[str.encode(tag_name)] = commit\n else:\n repo[tag_name] = commit\n result.append(('v%d.%d -> %s' % (maj_version, feat_version, commit_sha)))\n return '\\n'.join(result)", "docstring": "Given a repo will add a tag for each major version.\n\nArgs:\nrepo_path(str): path to the git repository to tag.", "source": "codesearchnet"} {"code": "def get_tool_context(self, tool_alias):\n \n tools_dict = self.get_tools()\n data = tools_dict.get(tool_alias)\n if data:\n return data[\"context_name\"]\n return None", "docstring": "Given a visible tool alias, return the name of the context it\nbelongs to.\n\nArgs:\ntool_alias (str): Tool alias to search for.\n\nReturns:\n(str): Name of the context that exposes a visible instance of this\ntool alias, or None if the alias is not available.", "source": "juraj-google-style"} {"code": "def stop(self, timeout_s=None):\n \n self._stopping.set()\n with self._current_phase_thread_lock:\n phase_thread = self._current_phase_thread\n if not phase_thread:\n return\n\n if phase_thread.is_alive():\n phase_thread.kill()\n\n _LOG.debug('Waiting for cancelled phase to exit: %s', phase_thread)\n timeout = timeouts.PolledTimeout.from_seconds(timeout_s)\n while phase_thread.is_alive() and not timeout.has_expired():\n time.sleep(0.1)\n _LOG.debug('Cancelled phase %s exit',\n \"didn't\" if phase_thread.is_alive() else 'did')\n \n self.test_state.stop_running_phase()", "docstring": "Stops execution of the current phase, if any.\n\nIt will raise a ThreadTerminationError, which will cause the test to stop\nexecuting and terminate with an ERROR state.\n\nArgs:\ntimeout_s: int or None, timeout in seconds to wait for the phase to stop.", "source": "juraj-google-style"} {"code": "def do_conneg(accept, supported):\n \n for result in parse_accept_header(accept):\n mime_type = result[0]\n if (mime_type in supported):\n return mime_type\n return None", "docstring": "Parse accept header and look for preferred type in supported list.\n\nArguments:\naccept - HTTP Accept header\nsupported - list of MIME type supported by the server\n\nReturns:\nsupported MIME type with highest q value in request, else None.\n\nFIXME - Should replace this with negotiator2", "source": "juraj-google-style"} {"code": "def merge(self, dataset):\n \n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)", "docstring": "Merge the specified dataset on top of the existing data.\n\nThis replaces all values in the existing dataset with the values from the\ngiven dataset.\n\nArgs:\ndataset (TaskData): A reference to the TaskData object that should be merged\non top of the existing object.", "source": "juraj-google-style"} {"code": "def write_wav(path, samples, sr=16000):\n max_value = np.abs(np.iinfo(np.int16).min)\n data = (samples * max_value).astype(np.int16)\n scipy.io.wavfile.write(path, sr, data)", "docstring": "Write to given samples to a wav file.\nThe samples are expected to be floating point numbers\nin the range of -1.0 to 1.0.\n\nArgs:\npath (str): The path to write the wav to.\nsamples (np.array): A float array .\nsr (int): The sampling rate.", "source": "codesearchnet"} {"code": "def pose_inv(pose):\n \n\n \n \n\n \n \n \n \n \n \n\n pose_inv = np.zeros((4, 4))\n pose_inv[:3, :3] = pose[:3, :3].T\n pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose[:3, 3])\n pose_inv[3, 3] = 1.0\n return pose_inv", "docstring": "Computes the inverse of a homogenous matrix corresponding to the pose of some\nframe B in frame A. The inverse is the pose of frame A in frame B.\n\nArgs:\npose: numpy array of shape (4,4) for the pose to inverse\n\nReturns:\nnumpy array of shape (4,4) for the inverse pose", "source": "juraj-google-style"} {"code": "def pad(x, paddings, dim_name, name=None):\n \n return PadOperation(\n x, paddings, dim_name, name=name).outputs[0]", "docstring": "Slice operation.\n\nArgs:\nx: a list of Tensors\npaddings: list of integers of size 2, padding size before and after for dim.\ndim_name: string, name for the padding dim\nname: an optional string\nReturns:\na Tensor with shape extended by output_shape for the last axis.", "source": "juraj-google-style"} {"code": "def ParsePathItem(item, opts=None):\n \n if item == os.path.curdir:\n return CurrentComponent()\n\n if item == os.path.pardir:\n return ParentComponent()\n\n recursion = PATH_RECURSION_REGEX.search(item)\n if recursion is None:\n return GlobComponent(item, opts)\n\n start, end = recursion.span()\n if not (start == 0 and end == len(item)):\n raise ValueError(\"malformed recursive component\")\n\n if recursion.group(\"max_depth\"):\n max_depth = int(recursion.group(\"max_depth\"))\n else:\n max_depth = None\n\n return RecursiveComponent(max_depth=max_depth, opts=opts)", "docstring": "Parses string path component to an `PathComponent` instance.\n\nArgs:\nitem: A path component string to be parsed.\nopts: A `PathOpts` object.\n\nReturns:\n`PathComponent` instance corresponding to given path fragment.\n\nRaises:\nValueError: If the path item contains a recursive component fragment but\ncannot be parsed as such.", "source": "juraj-google-style"} {"code": "def _CountClientStatisticByLabel(self, day_buckets, extract_statistic_fn):\n counts = collections.defaultdict(int)\n now = rdfvalue.RDFDatetime.Now()\n for info in self.IterateAllClientsFullInfo(batch_size=db.MAX_COUNT):\n if (not info.metadata.ping):\n continue\n statistic_value = extract_statistic_fn(info)\n for client_label in info.GetLabelsNames(owner='GRR'):\n for day_bucket in day_buckets:\n time_boundary = (now - rdfvalue.Duration.FromDays(day_bucket))\n if (info.metadata.ping > time_boundary):\n counts[(statistic_value, client_label, day_bucket)] += 1\n return dict(counts)", "docstring": "Returns client-activity metrics for a particular statistic.\n\nArgs:\nday_buckets: A set of n-day-active buckets.\nextract_statistic_fn: A function that extracts the statistic's value from\na ClientFullInfo object.", "source": "codesearchnet"} {"code": "def translate(self, start=None, end=None, arch_mode=None):\n \n start_addr = start if start else self.binary.ea_start\n end_addr = end if end else self.binary.ea_end\n\n self.ir_translator.reset()\n\n for addr, asm, _ in self.disassemble(start=start_addr, end=end_addr, arch_mode=arch_mode):\n yield addr, asm, self.ir_translator.translate(asm)", "docstring": "Translate to REIL instructions.\n\nArgs:\nstart (int): Start address.\nend (int): End address.\narch_mode (int): Architecture mode.\n\nReturns:\n(int, Instruction, list): A tuple of the form (address, assembler instruction, REIL instructions).", "source": "juraj-google-style"} {"code": "def _handle_missing_parameters(parameter_values, all_params, required_params, existing_stack=None):\n missing_params = list((set(all_params) - set(parameter_values.keys())))\n if (existing_stack and ('Parameters' in existing_stack)):\n stack_parameters = [p['ParameterKey'] for p in existing_stack['Parameters']]\n for p in missing_params:\n if (p in stack_parameters):\n logger.debug('Using previous value for parameter %s from existing stack', p)\n parameter_values[p] = UsePreviousParameterValue\n final_missing = list((set(required_params) - set(parameter_values.keys())))\n if final_missing:\n raise MissingParameterException(final_missing)\n return list(parameter_values.items())", "docstring": "Handles any missing parameters.\n\nIf an existing_stack is provided, look up missing parameters there.\n\nArgs:\nparameter_values (dict): key/value dictionary of stack definition\nparameters\nall_params (list): A list of all the parameters used by the\ntemplate/blueprint.\nrequired_params (list): A list of all the parameters required by the\ntemplate/blueprint.\nexisting_stack (dict): A dict representation of the stack. If\nprovided, will be searched for any missing parameters.\n\nReturns:\nlist of tuples: The final list of key/value pairs returned as a\nlist of tuples.\n\nRaises:\nMissingParameterException: Raised if a required parameter is\nstill missing.", "source": "codesearchnet"} {"code": "def diff_prettyHtml(self, diffs):\n html = []\n for (op, data) in diffs:\n text = data.replace('&', '&').replace('<', '<').replace('>', '>').replace('\\n', '¶
')\n if (op == self.DIFF_INSERT):\n html.append(('%s' % text))\n return ''.join(html)", "docstring": "Convert a diff array into a pretty HTML report.\n\nArgs:\ndiffs: Array of diff tuples.\n\nReturns:\nHTML representation.", "source": "codesearchnet"} {"code": "def restrict_condition(node, var, condition):\n dnf = []\n restricted = False\n for b in var.bindings:\n match_result = _match_condition(b.data, condition)\n if match_result:\n dnf.append([b])\n else:\n restricted = True\n if not dnf:\n _restrict_counter.inc('unsatisfiable')\n return UNSATISFIABLE\n elif restricted:\n _restrict_counter.inc('restricted')\n return Condition(node, dnf)\n else:\n _restrict_counter.inc('unrestricted')\n return None", "docstring": "Return a restricted condition based on filtered bindings.\n\nArgs:\nnode: The CFGNode.\nvar: A variable.\ncondition: A value that we will check each binding for compatibility with.\n\nReturns:\nA Condition or None. Each binding of the variable is checked for\ncompatibility with the condition. If either no bindings match, or all\nbindings match, then None is returned. Otherwise a new Condition is built\nfrom the specified, compatible, bindings.", "source": "github-repos"} {"code": "def get_fba_flux(self, objective):\n \n flux_result = self.solve_fba(objective)\n fba_fluxes = {}\n\n \n for key in self._model.reactions:\n fba_fluxes[key] = flux_result.get_value(self._v_wt[key])\n return fba_fluxes", "docstring": "Return a dictionary of all the fluxes solved by FBA.\n\nDictionary of fluxes is used in :meth:`.lin_moma` and :meth:`.moma`\nto minimize changes in the flux distributions following model\nperturbation.\n\nArgs:\nobjective: The objective reaction that is maximized.\n\nReturns:\nDictionary of fluxes for each reaction in the model.", "source": "juraj-google-style"} {"code": "def get_variable_scope_name(value):\n \n \n value = getattr(value, \"variable_scope\", value)\n if isinstance(value, tf.VariableScope):\n return value.name\n elif isinstance(value, six.string_types):\n return value\n else:\n raise ValueError(\"Not a variable scope: {}\".format(value))", "docstring": "Returns the name of the variable scope indicated by the given value.\n\nArgs:\nvalue: String, variable scope, or object with `variable_scope` attribute\n(e.g., Sonnet module).\n\nReturns:\nThe name (a string) of the corresponding variable scope.\n\nRaises:\nValueError: If `value` does not identify a variable scope.", "source": "juraj-google-style"} {"code": "def _constrain_L2_grad(op, grad):\n \n inp = op.inputs[0]\n inp_norm = tf.norm(inp)\n unit_inp = inp / inp_norm\n\n grad_projection = dot(unit_inp, grad)\n parallel_grad = unit_inp * grad_projection\n\n is_in_ball = tf.less_equal(inp_norm, 1)\n is_pointed_inward = tf.less(grad_projection, 0)\n allow_grad = tf.logical_or(is_in_ball, is_pointed_inward)\n clip_grad = tf.logical_not(allow_grad)\n\n clipped_grad = tf.cond(clip_grad, lambda: grad - parallel_grad, lambda: grad)\n\n return clipped_grad", "docstring": "Gradient for constrained optimization on an L2 unit ball.\n\nThis function projects the gradient onto the ball if you are on the boundary\n(or outside!), but leaves it untouched if you are inside the ball.\n\nArgs:\nop: the tensorflow op we're computing the gradient for.\ngrad: gradient we need to backprop\n\nReturns:\n(projected if necessary) gradient.", "source": "juraj-google-style"} {"code": "def GetFileSystem(self, path_spec):\n \n identifier = self._GetFileSystemCacheIdentifier(path_spec)\n return self._file_system_cache.GetObject(identifier)", "docstring": "Retrieves a file system object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nFileSystem: a file system object or None if not cached.", "source": "juraj-google-style"} {"code": "def __call__(self, inputs: Union[str, List[str]], **kwargs: Any) -> Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]]:\n outputs = super().__call__(inputs, **kwargs)\n if isinstance(inputs, list) and len(inputs) == 1:\n return outputs[0]\n return outputs", "docstring": "Fill the masked token in the text(s) given as inputs.\n\nArgs:\ninputs (`str` or `List[str]`):\nOne or several texts (or one list of prompts) with masked tokens.\ntargets (`str` or `List[str]`, *optional*):\nWhen passed, the model will limit the scores to the passed targets instead of looking up in the whole\nvocab. If the provided targets are not in the model vocab, they will be tokenized and the first\nresulting token will be used (with a warning, and that might be slower).\ntop_k (`int`, *optional*):\nWhen passed, overrides the number of predictions to return.\n\nReturn:\nA list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:\n\n- **sequence** (`str`) -- The corresponding input with the mask token prediction.\n- **score** (`float`) -- The corresponding probability.\n- **token** (`int`) -- The predicted token id (to replace the masked one).\n- **token_str** (`str`) -- The predicted token (to replace the masked one).", "source": "github-repos"} {"code": "def fts(self, segment):\n \n match = self.seg_regex.match(segment)\n if match:\n pre, base, post = match.group('pre'), match.group('base'), match.group('post')\n seg = copy.deepcopy(self.bases[base])\n for m in reversed(pre):\n seg = update_ft_set(seg, self.prefix_dias[m])\n for m in post:\n seg = update_ft_set(seg, self.postfix_dias[m])\n return set(seg)\n else:\n return None", "docstring": "Return features corresponding to segment as list of (value,\nfeature) tuples\n\nArgs:\nsegment (unicode): segment for which features are to be returned as\nUnicode string\n\nReturns:\nlist: None if `segment` cannot be parsed; otherwise, a list of the\nfeatures of `segment` as (value, feature) pairs", "source": "juraj-google-style"} {"code": "def add_metric(self, labels, count_value, sum_value, timestamp=None):\n \n self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp))\n self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp))", "docstring": "Add a metric to the metric family.\n\nArgs:\nlabels: A list of label values\ncount_value: The count value of the metric.\nsum_value: The sum value of the metric.", "source": "juraj-google-style"} {"code": "def _padding_to_conv_op_padding(padding):\n if (not isinstance(padding, tuple)):\n raise ValueError('padding should be a tuple.')\n if all(((p == SAME) for p in padding)):\n return SAME\n else:\n return VALID", "docstring": "Whether to use SAME or VALID for the underlying convolution op.\n\nArgs:\npadding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from\n`_fill_and_verify_padding`.\n\nReturns:\nOne of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the\nunderlying convolution op.\n\nRaises:\nValueError: If padding is not a tuple.", "source": "codesearchnet"} {"code": "def _get_entity(self):\n if self._is_ndb():\n return self._model.get_by_id(self._key_name)\n else:\n return self._model.get_by_key_name(self._key_name)", "docstring": "Retrieve entity from datastore.\n\nUses a different model method for db or ndb models.\n\nReturns:\nInstance of the model corresponding to the current storage object\nand stored using the key name of the storage object.", "source": "codesearchnet"} {"code": "async def ban_user(channel, user):\n data = datatools.get_data()\n server_id = channel.server.id\n try:\n (await client.ban(user))\n except discord.errors.Forbidden:\n (await client.send_typing(channel))\n embed = ui_embed.error(channel, 'Ban Error', 'I do not have the permissions to ban that person.')\n (await embed.send())\n return\n if ('warnings' in data['discord']['servers'][server_id][_data.modulename]):\n if (user.id in data['discord']['servers'][server_id][_data.modulename]['warnings']):\n data['discord']['servers'][server_id][_data.modulename]['warnings'][user.id] = 0\n datatools.write_data(data)\n (await client.send_typing(channel))\n embed = ui_embed.user_ban(channel, user)\n (await embed.send())\n try:\n response = \"You have been banned from the server '{}' contact the owners to resolve this issue.\".format(channel.server.name)\n (await client.send_message(user, response))\n except Exception as e:\n logger.exception(e)", "docstring": "Bans a user from a server\n\nArgs:\nchannel: The channel to send the warning message in\nuser: The user to give the warning to", "source": "codesearchnet"} {"code": "def vasp_version_from_outcar(filename='OUTCAR'):\n with open(filename) as f:\n line = f.readline().strip()\n return line", "docstring": "Returns the first line from a VASP OUTCAR file, to get the VASP source version string.\n\nArgs:\nfilename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.\n\nReturns:\n(Str): The first line read from the OUTCAR file.", "source": "codesearchnet"} {"code": "def update_mp_firware_version(self, timeout=(- 1)):\n uri = '{}/mpFirmwareVersion'.format(self.data['uri'])\n return self._helper.do_put(uri, None, timeout, None)", "docstring": "Updates the iLO firmware on a physical server to a minimum ILO firmware version required by OneView to\nmanage the server.\n\nArgs:\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\nReturns:\nResource", "source": "codesearchnet"} {"code": "def configfile_from_path(path, strict=True):\n extension = path.split('.')[(- 1)]\n conf_type = FILE_TYPES.get(extension)\n if (not conf_type):\n raise exc.UnrecognizedFileExtension('Cannot parse file of type {0}. Choices are {1}.'.format(extension, FILE_TYPES.keys()))\n return conf_type(path=path, strict=strict)", "docstring": "Get a ConfigFile object based on a file path.\n\nThis method will inspect the file extension and return the appropriate\nConfigFile subclass initialized with the given path.\n\nArgs:\npath (str): The file path which represents the configuration file.\nstrict (bool): Whether or not to parse the file in strict mode.\n\nReturns:\nconfpy.loaders.base.ConfigurationFile: The subclass which is\nspecialized for the given file path.\n\nRaises:\nUnrecognizedFileExtension: If there is no loader for the path.", "source": "codesearchnet"} {"code": "def create_magic_packet(macaddress):\n if (len(macaddress) == 12):\n pass\n elif (len(macaddress) == 17):\n sep = macaddress[2]\n macaddress = macaddress.replace(sep, '')\n else:\n raise ValueError('Incorrect MAC address format')\n data = (b'FFFFFFFFFFFF' + (macaddress * 16).encode())\n send_data = b''\n for i in range(0, len(data), 2):\n send_data += struct.pack(b'B', int(data[i:(i + 2)], 16))\n return send_data", "docstring": "Create a magic packet.\n\nA magic packet is a packet that can be used with the for wake on lan\nprotocol to wake up a computer. The packet is constructed from the\nmac address given as a parameter.\n\nArgs:\nmacaddress (str): the mac address that should be parsed into a\nmagic packet.", "source": "codesearchnet"} {"code": "def get_reference_points(spatial_shapes_list, valid_ratios, device):\n reference_points_list = []\n for lvl, (height, width) in enumerate(spatial_shapes_list):\n ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), indexing='ij')\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points", "docstring": "Get reference points for each feature map. Used in decoder.\n\nArgs:\nspatial_shapes_list (`list` of `tuple`):\nSpatial shapes of the backbone feature maps as a list of tuples.\nvalid_ratios (`torch.FloatTensor`):\nValid ratios of each feature map, has shape of `(batch_size, num_feature_levels, 2)`.\ndevice (`torch.device`):\nDevice on which to create the tensors.\nReturns:\n`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`", "source": "github-repos"} {"code": "def check_missing_atoms(self, template=None, ha_only=True):\n \n \n missing_atoms = {}\n \n if not template:\n import protein_residues\n template = protein_residues.normal \n \n for residue in self.get_residues():\n \n if not template.has_key(residue.resname):\n \n raise ValueError('Residue name (%s) not in the template' %residue.resname )\n \n if ha_only:\n heavy_atoms = [ atom for atom in template[residue.resname]['atoms'].keys() \n if atom[0] != 'H' and not (atom[0].isdigit() and atom[1] == 'H')]\n reference_set = set(heavy_atoms)\n else:\n reference_set = set(template[residue.resname]['atoms'].keys())\n \n structure_set = set(residue.child_dict.keys())\n \n diff = reference_set.difference(structure_set)\n \n if diff:\n residue_uniq_id = (residue.parent.id, residue.resname, residue.get_id()[1]) \n missing_atoms[residue_uniq_id] = list(diff)\n \n return missing_atoms", "docstring": "Checks for missing atoms based on a template.\nDefault: Searches for missing heavy atoms (not Hydrogen) based on Bio.Struct.protein_residues\n\nArguments:\n- template, dictionary, keys are residue names, values list of atom names.\n- ha_only, boolean, default True, restrict check to heavy atoms.\n\nReturns a dictionary of tuples with the missing atoms per residue.", "source": "juraj-google-style"} {"code": "def _infer_type(str_val, na_value, prev_type):\n if str_val in ('', na_value):\n return prev_type\n type_list = [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string]\n type_functions = [_is_valid_int32, _is_valid_int64, lambda str_val: _is_valid_float(str_val, dtypes.float32), lambda str_val: _is_valid_float(str_val, dtypes.float64), lambda str_val: True]\n for i in range(len(type_list)):\n validation_fn = type_functions[i]\n if validation_fn(str_val) and (prev_type is None or prev_type in type_list[:i + 1]):\n return type_list[i]", "docstring": "Given a string, infers its tensor type.\n\nInfers the type of a value by picking the least 'permissive' type possible,\nwhile still allowing the previous type inference for this column to be valid.\n\nArgs:\nstr_val: String value to infer the type of.\nna_value: Additional string to recognize as a NA/NaN CSV value.\nprev_type: Type previously inferred based on values of this column that\nwe've seen up till now.\nReturns:\nInferred dtype.", "source": "github-repos"} {"code": "def bash_complete(self, path, cmd, *cmds):\n path = pathlib.Path(path)\n subcmds = list(self.subcmds.keys())\n with path.open('w') as bcf:\n print('_{}() {{'.format(cmd), file=bcf)\n print('COMPREPLY=()', file=bcf)\n print('local cur=${COMP_WORDS[COMP_CWORD]}', end='\\n\\n', file=bcf)\n optstr = ' '.join(self._bash_comp_command(None))\n print('local options=\"{}\"'.format(optstr), end='\\n\\n', file=bcf)\n if subcmds:\n print('local commands=\"{}\"'.format(' '.join(subcmds)), file=bcf)\n print('declare -A suboptions', file=bcf)\n for sub in subcmds:\n optstr = ' '.join(self._bash_comp_command(sub))\n print('suboptions[{}]=\"{}\"'.format(sub, optstr), file=bcf)\n condstr = 'if'\n for sub in subcmds:\n print(condstr, '[[ \"${COMP_LINE}\" == *\"', sub, '\"* ]] ; then', file=bcf)\n print('COMPREPLY=( `compgen -W \"${suboptions[', sub, ']}\" -- ${cur}` )', sep='', file=bcf)\n condstr = 'elif'\n print(condstr, '[[ ${cur} == -* ]] ; then', file=bcf)\n print('COMPREPLY=( `compgen -W \"${options}\" -- ${cur}`)', file=bcf)\n if subcmds:\n print('else', file=bcf)\n print('COMPREPLY=( `compgen -W \"${commands}\" -- ${cur}`)', file=bcf)\n print('fi', file=bcf)\n print('}', end='\\n\\n', file=bcf)\n print('complete -F _{0} {0}'.format(cmd), *cmds, file=bcf)", "docstring": "Write bash complete script.\n\nArgs:\npath (path-like): desired path of the complete script.\ncmd (str): command name that should be completed.\ncmds (str): extra command names that should be completed.", "source": "codesearchnet"} {"code": "def has_attribute(self, attribute: str) -> bool:\n return any([(key_node.value == attribute) for (key_node, _) in self.yaml_node.value])", "docstring": "Whether the node has an attribute with the given name.\n\nUse only if is_mapping() returns True.\n\nArgs:\nattribute: The name of the attribute to check for.\n\nReturns:\nTrue iff the attribute is present.", "source": "codesearchnet"} {"code": "def setup_logger(config):\n logger = logging.getLogger(PROGRAM_NAME)\n num_level = getattr(logging, config.get('daemon', 'loglevel').upper(), None)\n logger.setLevel(num_level)\n lengths = []\n for section in config:\n lengths.append(len(section))\n width = (sorted(lengths)[(- 1)] + 1)\n\n def log_format():\n 'Produce a log format line.'\n supported_keys = ['asctime', 'levelname', 'process', 'threadName', 'message']\n return ' '.join(['%({0:s})'.format(i) for i in supported_keys])\n custom_format = log_format()\n json_formatter = CustomJsonFormatter(custom_format, prefix=(PROGRAM_NAME + ': '))\n formatter = logging.Formatter('%(asctime)s {program}[%(process)d] %(levelname)-8s %(threadName)-{width}s %(message)s'.format(program=PROGRAM_NAME, width=width))\n if config.has_option('daemon', 'log_file'):\n file_handler = logging.handlers.RotatingFileHandler(config.get('daemon', 'log_file'), maxBytes=config.getint('daemon', 'log_maxbytes'), backupCount=config.getint('daemon', 'log_backups'))\n if config.getboolean('daemon', 'json_log_file'):\n file_handler.setFormatter(json_formatter)\n else:\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n if config.has_option('daemon', 'log_server'):\n udp_handler = logging.handlers.SysLogHandler((config.get('daemon', 'log_server'), config.getint('daemon', 'log_server_port')))\n if config.getboolean('daemon', 'json_log_server'):\n udp_handler.setFormatter(json_formatter)\n else:\n udp_handler.setFormatter(formatter)\n logger.addHandler(udp_handler)\n if ((not config.has_option('daemon', 'log_file')) and (not config.has_option('daemon', 'log_server'))):\n stream_handler = logging.StreamHandler()\n if config.getboolean('daemon', 'json_stdout'):\n stream_handler.setFormatter(json_formatter)\n else:\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n if config.has_option('daemon', 'stderr_file'):\n sys.stderr = CustomRotatingFileLogger(filepath=config.get('daemon', 'stderr_file'), maxbytes=config.getint('daemon', 'log_maxbytes'), backupcount=config.getint('daemon', 'log_backups'))\n elif (config.has_option('daemon', 'stderr_log_server') and (not config.has_option('daemon', 'stderr_file'))):\n sys.stderr = CustomUdpLogger(server=config.get('daemon', 'log_server'), port=config.getint('daemon', 'log_server_port'))\n else:\n print('messages for unhandled exceptions will go to STDERR')\n return logger", "docstring": "Configure the logging environment.\n\nNotice:\nBy default logging will go to STDOUT and messages for unhandled\nexceptions or crashes will go to STDERR. If log_file and/or log_server\nis set then we don't log to STDOUT. Messages for unhandled exceptions\nor crashes can only go to either STDERR or to stderr_file or to\nstderr_log_server.\n\nArguments:\nconfig (obj): A configparser object which holds our configuration.\n\nReturns:\nA logger with all possible handlers configured.", "source": "codesearchnet"} {"code": "def apply_orderby(e: exp.Order, df: pd.DataFrame) -> pd.DataFrame:\n orderby_columns = []\n orderby_columns_order = []\n for el in e.expressions:\n orderby_column = el.find(exp.Column).find(exp.Identifier).this\n order = not bool(el.args['desc'])\n orderby_columns.append(orderby_column)\n orderby_columns_order.append(order)\n df = df.sort_values(orderby_columns, ascending=orderby_columns_order)\n return df", "docstring": "Apply ORDER BY clause to the DataFrame.\n\nArgs:\n- e (exp.Order): Parsed ORDER BY expression.\n- df (pd.DataFrame): DataFrame to be sorted.\n\nReturns:\n- pd.DataFrame: Sorted DataFrame based on the ORDER BY clause.", "source": "github-repos"} {"code": "def get_export_outputs(export_outputs, predictions):\n if export_outputs is None:\n default_output = export_output_lib.PredictOutput(predictions)\n export_outputs = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output}\n if not isinstance(export_outputs, dict):\n raise TypeError(f'`export_outputs` must be dict, received: {export_outputs}.')\n for v in export_outputs.values():\n if not isinstance(v, export_output_lib.ExportOutput):\n raise TypeError(f'Values in `export_outputs` must be ExportOutput objects, received: {export_outputs}.')\n _maybe_add_default_serving_output(export_outputs)\n return export_outputs", "docstring": "Validate export_outputs or create default export_outputs.\n\nArgs:\nexport_outputs: Describes the output signatures to be exported to\n`SavedModel` and used during serving. Should be a dict or None.\npredictions: Predictions `Tensor` or dict of `Tensor`.\n\nReturns:\nValid export_outputs dict\n\nRaises:\nTypeError: if export_outputs is not a dict or its values are not\nExportOutput instances.", "source": "github-repos"} {"code": "def _ReportSameIdButNotMerged(self, entity_id, reason):\n self.feed_merger.problem_reporter.SameIdButNotMerged(self, entity_id, reason)", "docstring": "Report that two entities have the same id but could not be merged.\n\nArgs:\nentity_id: The id of the entities.\nreason: A string giving a reason why they could not be merged.", "source": "codesearchnet"} {"code": "def __init__(self, name, description=None, color=None):\n \n self._label_data = {'name': name}\n \n if description is not None:\n self._label_data['description'] = description\n if color is not None:\n self._label_data['color'] = color", "docstring": "Initialize Class Properties.\n\nArgs:\nname (str): The value for this security label.\ndescription (str): A description for this security label.\ncolor (str): A color (hex value) for this security label.", "source": "juraj-google-style"} {"code": "def seed(s):\n try:\n s = int(s)\n except TypeError:\n raise ValueError(f'Argument `s` got an invalid value {s}. Only integers are supported.')\n random_seed.set_seed(s)", "docstring": "Sets the seed for the random number generator.\n\nUses `tf.set_random_seed`.\n\nArgs:\ns: an integer.", "source": "github-repos"} {"code": "def showbox(self, force_rerun=False):\n log.debug('{}: running box maker...'.format(self.id))\n if (not self.sphsel_path):\n return ValueError('Please run sphere_selector_using_residues')\n boxfile = op.join(self.dock_dir, '{}_box.pdb'.format(self.id))\n boxscript = op.join(self.dock_dir, '{}_box.in'.format(self.id))\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=boxfile):\n with open(boxscript, 'w') as f:\n f.write('Y\\n')\n f.write('0\\n')\n f.write('{}\\n'.format(op.basename(self.sphsel_path)))\n f.write('1\\n')\n f.write('{}'.format(op.basename(boxfile)))\n cmd = 'showbox < {}'.format(boxscript)\n os.chdir(self.dock_dir)\n os.system(cmd)\n if ssbio.utils.is_non_zero_file(boxfile):\n self.box_path = boxfile\n log.debug('{}: successful box creation'.format(self.box_path))\n else:\n log.critical('{}: showbox failed to run on selected spheres file'.format(self.sphsel_path))", "docstring": "Create the dummy PDB box around the selected spheres.\n\nArgs:\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "codesearchnet"} {"code": "def filter_parts(cls, part_info):\n filtered = OrderedDict()\n for (part_name, info_list) in part_info.items():\n if ((info_list is None) or isinstance(info_list, Exception)):\n continue\n info_list = [i for i in info_list if isinstance(i, cls)]\n if info_list:\n filtered[part_name] = info_list\n return filtered", "docstring": "Filter the part_info dict looking for instances of our class\n\nArgs:\npart_info (dict): {part_name: [Info] or None} as returned from\nController.run_hook()\n\nReturns:\ndict: {part_name: [info]} where info is a subclass of cls", "source": "codesearchnet"} {"code": "def FindUnspentCoinsByAssetAndTotal(self, asset_id, amount, from_addr=None, use_standard=False, watch_only_val=0, reverse=False):\n coins = self.FindUnspentCoinsByAsset(asset_id, from_addr=from_addr, use_standard=use_standard, watch_only_val=watch_only_val)\n sum = Fixed8(0)\n for coin in coins:\n sum = (sum + coin.Output.Value)\n if (sum < amount):\n return None\n coins = sorted(coins, key=(lambda coin: coin.Output.Value.value))\n if reverse:\n coins.reverse()\n total = Fixed8(0)\n for coin in coins:\n if (coin.Output.Value == amount):\n return [coin]\n to_ret = []\n for coin in coins:\n total = (total + coin.Output.Value)\n to_ret.append(coin)\n if (total >= amount):\n break\n return to_ret", "docstring": "Finds unspent coin objects totalling a requested value in the wallet limited to those of a certain asset type.\n\nArgs:\nasset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain.\namount (int): the amount of unspent coins that are being requested.\nfrom_addr (UInt160): a bytearray (len 20) representing an address.\nuse_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ).\nwatch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses.\n\nReturns:\nlist: a list of ``neo.Wallet.Coin`` in the wallet that are not spent. this list is empty if there are not enough coins to satisfy the request.", "source": "codesearchnet"} {"code": "def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch):\n if self._training_state is not None:\n return self._training_state.maybe_load_initial_epoch_from_ckpt(initial_epoch, mode=ModeKeys.TRAIN)\n return initial_epoch", "docstring": "Maybe load initial epoch from ckpt considering possible worker recovery.\n\nRefer to tensorflow/python/keras/distribute/worker_training_state.py\nfor more information.\n\nArgs:\ninitial_epoch: The original initial_epoch user passes in in `fit()`.\n\nReturns:\nIf the training is recovering from previous failure under multi-worker\ntraining setting, return the epoch the training is supposed to continue\nat. Otherwise, return the `initial_epoch` the user passes in.", "source": "github-repos"} {"code": "def _serve_audio_metadata(self, request):\n tag = request.args.get('tag')\n run = request.args.get('run')\n sample = int(request.args.get('sample', 0))\n events = self._multiplexer.Tensors(run, tag)\n response = self._audio_response_for_run(events, run, tag, sample)\n return http_util.Respond(request, response, 'application/json')", "docstring": "Given a tag and list of runs, serve a list of metadata for audio.\n\nNote that the actual audio data are not sent; instead, we respond\nwith URLs to the audio. The frontend should treat these URLs as\nopaque and should not try to parse information about them or\ngenerate them itself, as the format may change.\n\nArgs:\nrequest: A werkzeug.wrappers.Request object.\n\nReturns:\nA werkzeug.Response application.", "source": "codesearchnet"} {"code": "def from_bytes(value):\n \n result = (value.decode('utf-8')\n if isinstance(value, six.binary_type) else value)\n if isinstance(result, six.text_type):\n return result\n else:\n raise ValueError(\n '{0!r} could not be converted to unicode'.format(value))", "docstring": "Converts bytes to a string value, if necessary.\n\nArgs:\nvalue (Union[str, bytes]): The value to be converted.\n\nReturns:\nstr: The original value converted to unicode (if bytes) or as passed in\nif it started out as unicode.\n\nRaises:\nValueError: If the value could not be converted to unicode.", "source": "juraj-google-style"} {"code": "def codify(combination):\n\n\t\n\n\tif (isinstance(combination, int) and\n\t\t(combination < 0 or combination >= LIMIT)):\n\t\traise errors.FlagError(\"Out-of-range flag-combination!\")\n\n\tcodes = []\n\n\tfor enum in (Style, Color, Fill):\n\t\tfor flag in enum:\n\t\t\tif combination & flag:\n\t\t\t\tcodes.append(str(flag))\n\n\treturn \";\".join(codes)", "docstring": "Gets escape-codes for flag combinations.\n\nArguments:\ncombination (int): Either a single integer-convertible flag\nor an OR'd flag-combination.\nReturns:\nA semi-colon-delimited string of appropriate escape sequences.\n\nRaises:\nerrors.FlagError if the combination is out-of-range.", "source": "juraj-google-style"} {"code": "def validate_context(context):\n \n \n if not context.service_registry.service_record(context.service_name):\n fail(\"service: {} not found in service registry: {}\".format(\n context.service_name, context.service_registry.filespec))\n service_type = context.service_registry.service_record(context.service_name)[\"type\"]\n\n \n if context.key not in EFConfig.VERSION_KEYS:\n fail(\"invalid key: {}; see VERSION_KEYS in ef_config for supported keys\".format(context.key))\n\n \n if \"allowed_types\" in EFConfig.VERSION_KEYS[context.key] and \\\n service_type not in EFConfig.VERSION_KEYS[context.key][\"allowed_types\"]:\n fail(\"service_type: {} is not allowed for key {}; see VERSION_KEYS[KEY]['allowed_types']\"\n \"in ef_config and validate service registry entry\".format(service_type, context.key))\n\n return True", "docstring": "Set the key for the current context.\nArgs:\ncontext: a populated EFVersionContext object", "source": "juraj-google-style"} {"code": "def generate(organization, package, destination):\n gen = ResourceGenerator(organization, package)\n tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)\n try:\n tmp.write(gen.conf())\n finally:\n tmp.close()\n shutil.copy(tmp.name, os.path.join(destination, 'conf.py'))\n tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)\n try:\n tmp.write(gen.makefile())\n finally:\n tmp.close()\n shutil.copy(tmp.name, os.path.join(destination, 'Makefile'))", "docstring": "Generates the Sphinx configuration and Makefile.\n\nArgs:\norganization (str): the organization name.\npackage (str): the package to be documented.\ndestination (str): the destination directory.", "source": "codesearchnet"} {"code": "def clear_events(self, event_name):\n self.lock.acquire()\n try:\n q = self.get_event_q(event_name)\n q.queue.clear()\n except queue.Empty:\n return\n finally:\n self.lock.release()", "docstring": "Clear all events of a particular name.\n\nArgs:\nevent_name: Name of the events to be popped.", "source": "codesearchnet"} {"code": "def to_bazelrc_lines(self, dpav: DiscoverablePathsAndVersions) -> list[str]:\n dpav.get_relevant_paths_and_versions(self)\n rc = []\n build_and_test_tag_filters = list(_DEFAULT_BUILD_AND_TEST_TAG_FILTERS)\n if self.os == OS.DARWIN:\n build_and_test_tag_filters.append('-no_mac')\n if self.host_compiler == HostCompiler.GCC:\n rc.append(f'build --action_env GCC_HOST_COMPILER_PATH={dpav.gcc_path}')\n elif self.host_compiler == HostCompiler.CLANG:\n rc.append(f'build --action_env CLANG_COMPILER_PATH={dpav.clang_path}')\n rc.append(f'build --repo_env CC={dpav.clang_path}')\n rc.append(f'build --repo_env BAZEL_COMPILER={dpav.clang_path}')\n self.compiler_options.append('-Wno-error=unused-command-line-argument')\n if dpav.lld_path:\n rc.append(f'build --linkopt --ld-path={dpav.lld_path}')\n if self.backend == Backend.CPU:\n build_and_test_tag_filters.append('-gpu')\n elif self.backend == Backend.CUDA:\n build_and_test_tag_filters.append('-rocm-only')\n build_and_test_tag_filters.append('-sycl-only')\n compiler_pair = (self.cuda_compiler, self.host_compiler)\n if compiler_pair == (CudaCompiler.CLANG, HostCompiler.CLANG):\n rc.append('build --config cuda_clang')\n rc.append(f'build --action_env CLANG_CUDA_COMPILER_PATH={dpav.clang_path}')\n elif compiler_pair == (CudaCompiler.NVCC, HostCompiler.CLANG):\n rc.append('build --config cuda_nvcc')\n rc.append(f'build --action_env CLANG_CUDA_COMPILER_PATH={dpav.clang_path}')\n elif compiler_pair == (CudaCompiler.NVCC, HostCompiler.GCC):\n rc.append('build --config cuda')\n else:\n raise NotImplementedError('CUDA clang with host compiler gcc not supported')\n if dpav.cuda_version:\n rc.append(f'build:cuda --repo_env HERMETIC_CUDA_VERSION={dpav.cuda_version}')\n rc.append(f'build:cuda --repo_env HERMETIC_CUDA_COMPUTE_CAPABILITIES={','.join(dpav.cuda_compute_capabilities)}')\n if dpav.cudnn_version:\n rc.append(f'build:cuda --repo_env HERMETIC_CUDNN_VERSION={dpav.cudnn_version}')\n if dpav.local_cuda_path:\n rc.append(f'build:cuda --repo_env LOCAL_CUDA_PATH={dpav.local_cuda_path}')\n if dpav.local_cudnn_path:\n rc.append(f'build:cuda --repo_env LOCAL_CUDNN_PATH={dpav.local_cudnn_path}')\n if dpav.local_nccl_path:\n rc.append(f'build:cuda --repo_env LOCAL_NCCL_PATH={dpav.local_nccl_path}')\n if not self.using_nccl:\n rc.append('build --config nonccl')\n elif self.backend == Backend.ROCM:\n build_and_test_tag_filters.append('-cuda-only')\n build_and_test_tag_filters.append('-sycl-only')\n compiler_pair = (self.rocm_compiler, self.host_compiler)\n if compiler_pair == (RocmCompiler.HIPCC, HostCompiler.CLANG):\n rc.append('build --config rocm')\n rc.append(f'build --action_env CLANG_COMPILER_PATH={dpav.clang_path}')\n elif compiler_pair == (RocmCompiler.HIPCC, HostCompiler.GCC):\n rc.append('build --config rocm')\n else:\n raise NotImplementedError('ROCm clang with host compiler not supported')\n elif self.backend == Backend.SYCL:\n build_and_test_tag_filters.append('-cuda-only')\n build_and_test_tag_filters.append('-rocm-only')\n build_and_test_tag_filters.append('-no-sycl')\n rc.append('build --config sycl')\n if dpav.ld_library_path:\n rc.append(f'build --action_env LD_LIBRARY_PATH={dpav.ld_library_path}')\n if dpav.clang_major_version in (16, 17, 18):\n self.compiler_options.append('-Wno-gnu-offsetof-extensions')\n if dpav.clang_major_version and dpav.clang_major_version >= 19:\n self.compiler_options.append('-Wno-c23-extensions')\n if dpav.clang_major_version is not None and dpav.clang_major_version < 16 or (dpav.gcc_major_version is not None and dpav.gcc_major_version < 13):\n rc.append('build --define=xnn_enable_avxvnniint8=false')\n if dpav.clang_major_version is not None and dpav.clang_major_version < 14 or (dpav.gcc_major_version is not None and dpav.gcc_major_version < 12):\n rc.append('build --define=xnn_enable_avx512fp16=false')\n rc.append(f'build --action_env PYTHON_BIN_PATH={self.python_bin_path}')\n rc.append(f'build --python_path {self.python_bin_path}')\n rc.append('test --test_env LD_LIBRARY_PATH')\n rc.append('test --test_size_filters small,medium')\n rc.extend([f'build --copt {compiler_option}' for compiler_option in self.compiler_options])\n build_and_test_tag_filters = ','.join(build_and_test_tag_filters)\n rc.append(f'build --build_tag_filters {build_and_test_tag_filters}')\n rc.append(f'build --test_tag_filters {build_and_test_tag_filters}')\n rc.append(f'test --build_tag_filters {build_and_test_tag_filters}')\n rc.append(f'test --test_tag_filters {build_and_test_tag_filters}')\n return rc", "docstring": "Creates a bazelrc given an XLAConfigOptions.\n\nNecessary paths are provided by the user, or retrieved via\n`self._get_relevant_paths`.\n\nArgs:\ndpav: DiscoverablePathsAndVersions that may hold user-specified paths and\nversions. The dpav will then read from `self` to determine what to try\nto auto-configure.\n\nReturns:\nThe lines of a bazelrc.", "source": "github-repos"} {"code": "def num_gpus():\n try:\n cmd = shlex.split('nvidia-smi --list-gpus')\n output = subprocess.check_output(cmd).decode('utf-8')\n return sum([1 for x in output.split('\\n') if x.startswith('GPU ')])\n except (OSError, subprocess.CalledProcessError):\n logger.info('No GPUs detected (normal if no gpus installed)')\n return 0", "docstring": "The number of gpus available in the current container.\n\nReturns:\nint: number of gpus available in the current container.", "source": "codesearchnet"} {"code": "def register_event(self, direction, verb, child_fn, priority=10):\n \n if verb not in self._event_handlers:\n self._event_handlers[verb] = []\n\n self._event_handlers[verb].append({\n 'handler': child_fn,\n 'direction': direction,\n 'priority': priority,\n })\n\n for name, server in self.servers.items():\n server.register_event(direction, verb, child_fn, priority=priority)", "docstring": "Register an event with all servers.\n\nArgs:\ndirection (str): `in`, `out`, `both`, `raw`.\nverb (str): Event name.\nchild_fn (function): Handler function.\npriority (int): Handler priority (lower priority executes first).", "source": "juraj-google-style"} {"code": "def absolute_coords(self):\n if (self.type != EventType.POINTER_MOTION_ABSOLUTE):\n raise AttributeError(_wrong_prop.format(self.type))\n abs_x = self._libinput.libinput_event_pointer_get_absolute_x(self._handle)\n abs_y = self._libinput.libinput_event_pointer_get_absolute_y(self._handle)\n return (abs_x, abs_y)", "docstring": "The current absolute coordinates of the pointer event,\nin mm from the top left corner of the device.\n\nTo get the corresponding output screen coordinate, use\n:meth:`transform_absolute_coords`.\n\nFor pointer events that are not of type\n:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,\nthis property raises :exc:`AttributeError`.\n\nReturns:\n(float, float): The current absolute coordinates.\nRaises:\nAttributeError", "source": "codesearchnet"} {"code": "def recover_last_checkpoints(self, checkpoint_paths):\n checkpoints_with_mtimes = []\n for checkpoint_path in checkpoint_paths:\n try:\n mtime = checkpoint_management.get_checkpoint_mtimes([checkpoint_path])\n except errors.NotFoundError:\n continue\n if mtime:\n checkpoints_with_mtimes.append((checkpoint_path, mtime[0]))\n self.set_last_checkpoints_with_time(checkpoints_with_mtimes)", "docstring": "Recovers the internal saver state after a crash.\n\nThis method is useful for recovering the \"self._last_checkpoints\" state.\n\nGlobs for the checkpoints pointed to by `checkpoint_paths`. If the files\nexist, use their mtime as the checkpoint timestamp.\n\nArgs:\ncheckpoint_paths: a list of checkpoint paths.", "source": "github-repos"} {"code": "def on_epoch_end(self, epoch, logs=None):", "docstring": "Called at the end of an epoch.\n\nSubclasses should override for any actions to run. This function should only\nbe called during TRAIN mode.\n\nArgs:\nepoch: Integer, index of epoch.\nlogs: Dict, metric results for this training epoch, and for the\nvalidation epoch if validation is performed. Validation result keys\nare prefixed with `val_`. For training epoch, the values of the\n`Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy':\n0.7}`.", "source": "github-repos"} {"code": "def convert_attribute_tag_to_name(value):\n if (not isinstance(value, Tags)):\n raise ValueError('The attribute tag must be a Tags enumeration.')\n for entry in attribute_name_tag_table:\n if (value == entry[1]):\n return entry[0]\n raise ValueError('Unrecognized attribute tag: {}'.format(value))", "docstring": "A utility function that converts an attribute tag into the corresponding\nattribute name string.\n\nFor example: enums.Tags.STATE -> 'State'\n\nArgs:\nvalue (enum): The Tags enumeration value of the attribute.\n\nReturns:\nstring: The attribute name string that corresponds to the attribute\ntag.\n\nRaises:\nValueError: if the attribute tag is not a Tags enumeration or if it\nis unrecognized attribute tag", "source": "codesearchnet"} {"code": "def html_for_env_var(key):\n value = os.getenv(key)\n return KEY_VALUE_TEMPLATE.format(key, value)", "docstring": "Returns an HTML snippet for an environment variable.\n\nArgs:\nkey: A string representing an environment variable name.\n\nReturns:\nString HTML representing the value and variable.", "source": "codesearchnet"} {"code": "def create_vnet(access_token, subscription_id, resource_group, name, location, address_prefix='10.0.0.0/16', subnet_prefix='10.0.0.0/16', nsg_id=None):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', name, '?api-version=', NETWORK_API])\n vnet_body = {'location': location}\n properties = {'addressSpace': {'addressPrefixes': [address_prefix]}}\n subnet = {'name': 'subnet'}\n subnet['properties'] = {'addressPrefix': subnet_prefix}\n if (nsg_id is not None):\n subnet['properties']['networkSecurityGroup'] = {'id': nsg_id}\n properties['subnets'] = [subnet]\n vnet_body['properties'] = properties\n body = json.dumps(vnet_body)\n return do_put(endpoint, body, access_token)", "docstring": "Create a VNet with specified name and location. Optional subnet address prefix..\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nname (str): Name of the new VNet.\nlocation (str): Azure data center location. E.g. westus.\naddress_prefix (str): Optional VNet address prefix. Default '10.0.0.0/16'.\nsubnet_prefix (str): Optional subnet address prefix. Default '10.0.0.0/16'.\nnsg_id (str): Optional Netwrok Security Group resource Id. Default None.\n\nReturns:\nHTTP response. VNet JSON body.", "source": "codesearchnet"} {"code": "def pages(self):\n (avro_schema, column_names) = _avro_schema(self._read_session)\n for block in self._reader:\n self._status = block.status\n (yield ReadRowsPage(avro_schema, column_names, block))", "docstring": "A generator of all pages in the stream.\n\nReturns:\ntypes.GeneratorType[google.cloud.bigquery_storage_v1beta1.ReadRowsPage]:\nA generator of pages.", "source": "codesearchnet"} {"code": "def consume(self, expect_class=None):\n if (expect_class and (not isinstance(self.current_token, expect_class))):\n raise InvalidTokenError(('Unexpected token at %d: got %r, expected %s' % (self.current_pos, self.current_token, expect_class.__name__)))\n current_token = self.current_token\n self._forward()\n return current_token", "docstring": "Retrieve the current token, then advance the parser.\n\nIf an expected class is provided, it will assert that the current token\nmatches that class (is an instance).\n\nNote that when calling a token's nud() or led() functions, the \"current\"\ntoken is the token following the token whose method has been called.\n\nReturns:\nToken: the previous current token.\n\nRaises:\nInvalidTokenError: If an expect_class is provided and the current\ntoken doesn't match that class.", "source": "codesearchnet"} {"code": "class DepthAnythingPreActResidualLayer(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.activation1 = nn.ReLU()\n self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True)\n self.activation2 = nn.ReLU()\n self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True)\n\n def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n residual = hidden_state\n hidden_state = self.activation1(hidden_state)\n hidden_state = self.convolution1(hidden_state)\n hidden_state = self.activation2(hidden_state)\n hidden_state = self.convolution2(hidden_state)\n return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\nconfig (`[DepthAnythingConfig]`):\nModel configuration class defining the model architecture.", "source": "github-repos"} {"code": "def __init__(self, daemon=False, *args, **kwargs):\n \n super(ThreadReturn, self).__init__(*args, **kwargs)\n self.daemon = daemon\n self._return = None", "docstring": "Initializes the thread.\n\nArgs:\nself (ThreadReturn): the ``ThreadReturn`` instance\ndaemon (bool): if the thread should be spawned as a daemon\nargs: optional list of arguments\nkwargs: optional key-word arguments\n\nReturns:\n``None``", "source": "juraj-google-style"} {"code": "def normalise(self, to_currency):\n out = Money(currency=to_currency)\n for money in self._money_obs:\n out += converter.convert(money, to_currency)\n return Balance([out])", "docstring": "Normalise this balance into a single currency\n\nArgs:\nto_currency (str): Destination currency\n\nReturns:\n(Balance): A new balance object containing a single Money value in the specified currency", "source": "codesearchnet"} {"code": "def _make_assets_key_collection(saved_model_proto, export_path):\n asset_filenames = {}\n used_asset_filenames = set()\n\n def _make_asset_filename(original_filename):\n 'Returns the asset filename to use for the file.'\n if (original_filename in asset_filenames):\n return asset_filenames[original_filename]\n basename = os.path.basename(original_filename)\n suggestion = basename\n index = 0\n while (suggestion in used_asset_filenames):\n suggestion = ('%s%d' % (basename, index))\n index += 1\n asset_filenames[original_filename] = suggestion\n used_asset_filenames.add(suggestion)\n return suggestion\n for meta_graph in saved_model_proto.meta_graphs:\n collection_def = meta_graph.collection_def.get(tf_v1.GraphKeys.ASSET_FILEPATHS)\n if (collection_def is None):\n continue\n if (collection_def.WhichOneof('kind') != 'node_list'):\n raise ValueError('MetaGraph collection ASSET_FILEPATHS is not a list of tensors.')\n for tensor in collection_def.node_list.value:\n if (not tensor.endswith(':0')):\n raise ValueError('Unexpected tensor in ASSET_FILEPATHS collection.')\n asset_nodes = set([_get_node_name_from_tensor(tensor) for tensor in collection_def.node_list.value])\n tensor_filename_map = {}\n for node in meta_graph.graph_def.node:\n if (node.name in asset_nodes):\n _check_asset_node_def(node)\n filename = node.attr['value'].tensor.string_val[0]\n tensor_filename_map[(node.name + ':0')] = filename\n node.attr['value'].tensor.string_val[0] = tf.compat.as_bytes('SAVEDMODEL-ASSET')\n if tensor_filename_map:\n assets_key_collection = meta_graph.collection_def[tf_v1.saved_model.constants.ASSETS_KEY]\n for (tensor, filename) in sorted(tensor_filename_map.items()):\n asset_proto = meta_graph_pb2.AssetFileDef()\n asset_proto.filename = _make_asset_filename(filename)\n asset_proto.tensor_info.name = tensor\n assets_key_collection.any_list.value.add().Pack(asset_proto)\n return {original_filename: _get_asset_filename(export_path, asset_filename) for (original_filename, asset_filename) in asset_filenames.items()}", "docstring": "Creates an ASSETS_KEY collection in the GraphDefs in saved_model_proto.\n\nAdds an ASSETS_KEY collection to the GraphDefs in the SavedModel and returns\na map from original asset filename to filename when exporting the SavedModel\nto `export_path`.\n\nThis is roughly the inverse operation of `_merge_assets_key_collection`.\n\nArgs:\nsaved_model_proto: SavedModel proto to be modified.\nexport_path: string with path where the saved_model_proto will be exported.\n\nReturns:\nA map from original asset filename to asset filename when exporting the\nSavedModel to path.\n\nRaises:\nValueError: on unsuported/unexpected SavedModel.", "source": "codesearchnet"} {"code": "def goto(directory, create=False):\n current = os.getcwd()\n directory = os.path.abspath(directory)\n if (os.path.isdir(directory) or (create and mkdir(directory))):\n logger.info('goto -> %s', directory)\n os.chdir(directory)\n try:\n (yield True)\n finally:\n logger.info('goto <- %s', directory)\n os.chdir(current)\n else:\n logger.info('goto(%s) - directory does not exist, or cannot be created.', directory)\n (yield False)", "docstring": "Context object for changing directory.\n\nArgs:\ndirectory (str): Directory to go to.\ncreate (bool): Create directory if it doesn't exists.\n\nUsage::\n\n>>> with goto(directory) as ok:\n... if not ok:\n... print 'Error'\n... else:\n... print 'All OK'", "source": "codesearchnet"} {"code": "def BuildChecks(self, request):\n result = []\n if (request.HasField('start_time') or request.HasField('end_time')):\n\n def FilterTimestamp(file_stat, request=request):\n return (file_stat.HasField('st_mtime') and ((file_stat.st_mtime < request.start_time) or (file_stat.st_mtime > request.end_time)))\n result.append(FilterTimestamp)\n if (request.HasField('min_file_size') or request.HasField('max_file_size')):\n\n def FilterSize(file_stat, request=request):\n return (file_stat.HasField('st_size') and ((file_stat.st_size < request.min_file_size) or (file_stat.st_size > request.max_file_size)))\n result.append(FilterSize)\n if request.HasField('perm_mode'):\n\n def FilterPerms(file_stat, request=request):\n return ((file_stat.st_mode & request.perm_mask) != request.perm_mode)\n result.append(FilterPerms)\n if request.HasField('uid'):\n\n def FilterUID(file_stat, request=request):\n return (file_stat.st_uid != request.uid)\n result.append(FilterUID)\n if request.HasField('gid'):\n\n def FilterGID(file_stat, request=request):\n return (file_stat.st_gid != request.gid)\n result.append(FilterGID)\n if request.HasField('path_regex'):\n regex = request.path_regex\n\n def FilterPath(file_stat, regex=regex):\n 'Suppress any filename not matching the regular expression.'\n return (not regex.Search(file_stat.pathspec.Basename()))\n result.append(FilterPath)\n if request.HasField('data_regex'):\n\n def FilterData(file_stat, **_):\n 'Suppress files that do not match the content.'\n return (not self.TestFileContent(file_stat))\n result.append(FilterData)\n return result", "docstring": "Parses request and returns a list of filter callables.\n\nEach callable will be called with the StatEntry and returns True if the\nentry should be suppressed.\n\nArgs:\nrequest: A FindSpec that describes the search.\n\nReturns:\na list of callables which return True if the file is to be suppressed.", "source": "codesearchnet"} {"code": "def BSearchRound(a, x, lo=0, hi=None):\n \n if len(a) == 0: return -1\n hi = hi if hi is not None else len(a)\n pos = bisect_left(a, x, lo, hi)\n\n if pos >= hi:\n return hi - 1\n elif a[pos] == x or pos == lo:\n return pos\n else:\n return pos - 1 if x - a[pos - 1] <= a[pos] - x else pos", "docstring": "Returns index of a that is closest to x.\n\nArguments:\na -- ordered numeric sequence\nx -- element to search within a\nlo -- lowest index to consider in search*\nhi -- highest index to consider in search*\n\n*bisect.bisect_left capability that we don't need to loose.", "source": "juraj-google-style"} {"code": "def list_runs(self, project, entity=None):\n query = gql('\\n query Buckets($model: String!, $entity: String!) {\\n model(name: $model, entityName: $entity) {\\n buckets(first: 10) {\\n edges {\\n node {\\n id\\n name\\n description\\n }\\n }\\n }\\n }\\n }\\n ')\n return self._flatten_edges(self.gql(query, variable_values={'entity': (entity or self.settings('entity')), 'model': (project or self.settings('project'))})['model']['buckets'])", "docstring": "Lists runs in W&B scoped by project.\n\nArgs:\nproject (str): The project to scope the runs to\nentity (str, optional): The entity to scope this project to. Defaults to public models\n\nReturns:\n[{\"id\",name\",\"description\"}]", "source": "codesearchnet"} {"code": "def get_config_value(name, path_to_file='config.txt'):\n \n\n \n if not os.path.isfile(path_to_file):\n path_to_file = os.path.join('../instruments/', path_to_file)\n\n path_to_file = os.path.abspath(path_to_file)\n\n if not os.path.isfile(path_to_file):\n print(('path_to_file', path_to_file))\n \n return None\n\n f = open(path_to_file, 'r')\n string_of_file_contents = f.read()\n\n if name[-1] is not ':':\n name += ':'\n\n if name not in string_of_file_contents:\n return None\n else:\n config_value = [line.split(name)[1] for line in string_of_file_contents.split('\\n')\n if len(line.split(name)) > 1][0].strip()\n return config_value", "docstring": "gets the value for \"name\" from \"path_to_file\" config file\nArgs:\nname: name of varibale in config file\npath_to_file: path to config file\n\nReturns: path to dll if name exists in the file; otherwise, returns None", "source": "juraj-google-style"} {"code": "def to_html(self):\n \n\n text = self.text\n if text is None:\n text = self.uri\n return '%s' % (\n self.uri, self.html_attributes(), text)", "docstring": "Render as html\n\nArgs:\nNone\n\nReturns:\nStr the html representation\n\nRaises:\nErrors are propagated", "source": "juraj-google-style"} {"code": "def verify_no_new_dims(input_shapes, output_shape):\n \n all_input_dims = set(sum([s.dims for s in input_shapes], []))\n all_output_dims = set(output_shape.dims)\n if not all_output_dims.issubset(all_input_dims):\n raise ValueError(\n \"No new dimensions allowed in output\"\n \" input_shapes = %s output_shape= %s\"\n % ([s.dims for s in input_shapes], output_shape.dims))", "docstring": "Verifies that all dimensions in the output are in at least one input.\n\nArgs:\ninput_shapes: a list of Shapes\noutput_shape: a Shape\nRaises:\nValueError: if there are new dimensions in the output.", "source": "juraj-google-style"} {"code": "def update_caseid(self, case_obj, family_id):\n new_case = deepcopy(case_obj)\n new_case['_id'] = family_id\n for case_variants in ['suspects', 'causatives']:\n new_variantids = []\n for variant_id in case_obj.get(case_variants, []):\n case_variant = self.variant(variant_id)\n if (not case_variant):\n continue\n new_variantid = get_variantid(case_variant, family_id)\n new_variantids.append(new_variantid)\n new_case[case_variants] = new_variantids\n for acmg_obj in self.acmg_collection.find({'case_id': case_obj['_id']}):\n LOG.info('update ACMG classification: %s', acmg_obj['classification'])\n acmg_variant = self.variant(acmg_obj['variant_specific'])\n new_specific_id = get_variantid(acmg_variant, family_id)\n self.acmg_collection.find_one_and_update({'_id': acmg_obj['_id']}, {'$set': {'case_id': family_id, 'variant_specific': new_specific_id}})\n institute_obj = self.institute(case_obj['owner'])\n for event_obj in self.events(institute_obj, case=case_obj):\n LOG.info('update event: %s', event_obj['verb'])\n self.event_collection.find_one_and_update({'_id': event_obj['_id']}, {'$set': {'case': family_id}})\n self.case_collection.insert_one(new_case)\n self.case_collection.find_one_and_delete({'_id': case_obj['_id']})\n return new_case", "docstring": "Update case id for a case across the database.\n\nThis function is used when a case is a rerun or updated for another reason.\n\nArgs:\ncase_obj(dict)\nfamily_id(str): The new family id\n\nReturns:\nnew_case(dict): The updated case object", "source": "codesearchnet"} {"code": "def diff_fromDelta(self, text1, delta):\n diffs = []\n pointer = 0\n tokens = delta.split('\\t')\n for token in tokens:\n if (token == ''):\n continue\n param = token[1:]\n if (token[0] == '+'):\n param = urllib.parse.unquote(param)\n diffs.append((self.DIFF_INSERT, param))\n elif ((token[0] == '-') or (token[0] == '=')):\n try:\n n = int(param)\n except ValueError:\n raise ValueError(('Invalid number in diff_fromDelta: ' + param))\n if (n < 0):\n raise ValueError(('Negative number in diff_fromDelta: ' + param))\n text = text1[pointer:(pointer + n)]\n pointer += n\n if (token[0] == '='):\n diffs.append((self.DIFF_EQUAL, text))\n else:\n diffs.append((self.DIFF_DELETE, text))\n else:\n raise ValueError(('Invalid diff operation in diff_fromDelta: ' + token[0]))\n if (pointer != len(text1)):\n raise ValueError(('Delta length (%d) does not equal source text length (%d).' % (pointer, len(text1))))\n return diffs", "docstring": "Given the original text1, and an encoded string which describes the\noperations required to transform text1 into text2, compute the full diff.\n\nArgs:\ntext1: Source string for the diff.\ndelta: Delta text.\n\nReturns:\nArray of diff tuples.\n\nRaises:\nValueError: If invalid input.", "source": "codesearchnet"} {"code": "def CleanseComments(line):\n \n commentpos = line.find('\n if commentpos != -1 and not IsCppString(line[:commentpos]):\n line = line[:commentpos].rstrip()\n \n return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)", "docstring": "Removes //-comments and single-line C-style /* */ comments.\n\nArgs:\nline: A line of C++ source.\n\nReturns:\nThe line with single-line comments removed.", "source": "juraj-google-style"} {"code": "def _hexvalue_to_rgb(hexvalue):\n \n r = int(hexvalue[0:2], 16)\n g = int(hexvalue[2:4], 16)\n b = int(hexvalue[4:6], 16)\n\n return (r, g, b)", "docstring": "Converts the hexvalue used by tuya for colour representation into\nan RGB value.\n\nArgs:\nhexvalue(string): The hex representation generated by BulbDevice._rgb_to_hexvalue()", "source": "juraj-google-style"} {"code": "def create_box_reminder(self, box_key, message, remind_date, remind_follwers, **kwargs):\n\t\t\n\t\turi = '/'.join([\n\t\t\t\t\t\tself.api_uri,\n\t\t\t\t\t\tself.boxes_suffix, \n\t\t\t\t\t\tbox_key,\n\t\t\t\t\t\tself.reminders_suffix\n\t\t\t\t\t\t])\n\t\tkwargs.update({\t'message':message, \n\t\t\t\t\t\t'remindDate':remind_date, \n\t\t\t\t\t\t'remindFollowers': remind_follwers})\n\n\t\tnew_rem = StreakReminder(**kwargs)\n\t\t\n\t\tcode, data = self._req('put', uri, new_rem.to_dict(rw = True))\n\t\t\n\t\treturn code, data", "docstring": "Creates a reminder with the provided attributes.\nArgs:\nbox_key \t\t\tspecifying the box to add the field to\nmessage\t\t\t\tmessage for the reminder\nremind_date\t\t\tdate to remind on in ticks.\nremind_followers\ttrue/false\nkwargs\t\t\t\t{..} see StreakReminder object for details\nreturn\t\t\t\t(status code, reminder dict)", "source": "juraj-google-style"} {"code": "def get_list(self, name, default=None):\n if (name not in self):\n if (default is not None):\n return default\n raise EnvironmentError.not_found(self._prefix, name)\n return list(self[name])", "docstring": "Retrieves an environment variable as a list.\n\nNote that while implicit access of environment variables\ncontaining tuples will return tuples, using this method will\ncoerce tuples to lists.\n\nArgs:\nname (str): The case-insensitive, unprefixed variable name.\ndefault: If provided, a default value will be returned\ninstead of throwing ``EnvironmentError``.\n\nReturns:\nlist: The environment variable's value as a list.\n\nRaises:\nEnvironmentError: If the environment variable does not\nexist, and ``default`` was not provided.\nValueError: If the environment variable value is not an\ninteger with base 10.", "source": "codesearchnet"} {"code": "def reduce(cls, requirements: Iterable['FetchRequirement']) \\\n -> 'FetchRequirement':\n \n return reduce(lambda x, y: x | y, requirements, cls.NONE)", "docstring": "Reduce a set of fetch requirements into a single requirement.\n\nArgs:\nrequirements: The set of fetch requirements.", "source": "juraj-google-style"} {"code": "class UpsampleOneStep(nn.Module):\n\n def __init__(self, scale, in_channels, out_channels):\n super().__init__()\n self.conv = nn.Conv2d(in_channels, scale ** 2 * out_channels, 3, 1, 1)\n self.pixel_shuffle = nn.PixelShuffle(scale)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pixel_shuffle(x)\n return x", "docstring": "UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)\n\nUsed in lightweight SR to save parameters.\n\nArgs:\nscale (int):\nScale factor. Supported scales: 2^n and 3.\nin_channels (int):\nChannel number of intermediate features.\nout_channels (int):\nChannel number of output features.", "source": "github-repos"} {"code": "def connect(self, db_uri, debug=False):\n \n kwargs = {'echo': debug, 'convert_unicode': True}\n \n if 'mysql' in db_uri:\n kwargs['pool_recycle'] = 3600\n elif ':\n logger.debug(\"detected sqlite path URI: {}\".format(db_uri))\n db_path = os.path.abspath(os.path.expanduser(db_uri))\n db_uri = \"sqlite:\n\n self.engine = create_engine(db_uri, **kwargs)\n logger.debug('connection established successfully')\n \n BASE.metadata.bind = self.engine\n \n self.session = scoped_session(sessionmaker(bind=self.engine))\n \n self.query = self.session.query\n return self", "docstring": "Configure connection to a SQL database.\n\nArgs:\ndb_uri (str): path/URI to the database to connect to\ndebug (Optional[bool]): whether to output logging information", "source": "juraj-google-style"} {"code": "def _reduced_stack(istart=3, iend=5, ipython=True):\n \n import inspect\n return [i[istart:iend] for i in inspect.stack() if _decorated_path(i[1])]", "docstring": "Returns the reduced function call stack that includes only relevant\nfunction calls (i.e., ignores any that are not part of the specified package\nor acorn.\n\nArgs:\npackage (str): name of the package that the logged method belongs to.", "source": "juraj-google-style"} {"code": "def lines(start=None, end=None, reverse=False, selection=False):\n \n if selection:\n start, end = get_selection()\n\n else:\n start, end = fix_addresses(start, end)\n\n if not reverse:\n item = idaapi.get_item_head(start)\n while item < end:\n yield Line(item)\n item += idaapi.get_item_size(item)\n\n else: \n item = idaapi.get_item_head(end - 1)\n while item >= start:\n yield Line(item)\n item = idaapi.get_item_head(item - 1)", "docstring": "Iterate lines in range.\n\nArgs:\nstart: Starting address, start of IDB if `None`.\nend: End address, end of IDB if `None`.\nreverse: Set to true to iterate in reverse order.\nselection: If set to True, replaces start and end with current selection.\n\nReturns:\niterator of `Line` objects.", "source": "juraj-google-style"} {"code": "def CreateShapeFromDtypeAndTuple(dtype, shape_tuple):\n element_type = types_.MAP_DTYPE_TO_RECORD[str(dtype)].primitive_type\n return Shape(element_type, shape_tuple)", "docstring": "Create a shape from a Numpy dtype and a sequence of nonnegative integers.\n\nArgs:\ndtype: a numpy dtype, e.g. np.dtype('int32').\nshape_tuple: a sequence of nonnegative integers.\n\nReturns:\nA Shape object.", "source": "github-repos"} {"code": "def get_alexa_rankings(self, domains):\n \n api_name = 'alexa_rankings'\n\n (all_responses, domains) = self._bulk_cache_lookup(api_name, domains)\n responses = self._request_reports(domains)\n\n for domain, response in zip(domains, responses):\n xml_response = self._extract_response_xml(domain, response)\n if self._cache:\n self._cache.cache_value(api_name, domain, response)\n all_responses[domain] = xml_response\n\n return all_responses", "docstring": "Retrieves the most recent VT info for a set of domains.\n\nArgs:\ndomains: list of string domains.\nReturns:\nA dict with the domain as key and the VT report as value.", "source": "juraj-google-style"} {"code": "def parse_client_table(redis_client):\n \n NIL_CLIENT_ID = ray.ObjectID.nil().binary()\n message = redis_client.execute_command(\"RAY.TABLE_LOOKUP\",\n ray.gcs_utils.TablePrefix.CLIENT,\n \"\", NIL_CLIENT_ID)\n\n \n \n if message is None:\n return []\n\n node_info = {}\n gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)\n\n ordered_client_ids = []\n\n \n \n for i in range(gcs_entry.EntriesLength()):\n client = (ray.gcs_utils.ClientTableData.GetRootAsClientTableData(\n gcs_entry.Entries(i), 0))\n\n resources = {\n decode(client.ResourcesTotalLabel(i)):\n client.ResourcesTotalCapacity(i)\n for i in range(client.ResourcesTotalLabelLength())\n }\n client_id = ray.utils.binary_to_hex(client.ClientId())\n\n \n \n \n if not client.IsInsertion():\n assert client_id in node_info, \"Client removed not found!\"\n assert node_info[client_id][\"IsInsertion\"], (\n \"Unexpected duplicate removal of client.\")\n else:\n ordered_client_ids.append(client_id)\n\n node_info[client_id] = {\n \"ClientID\": client_id,\n \"IsInsertion\": client.IsInsertion(),\n \"NodeManagerAddress\": decode(\n client.NodeManagerAddress(), allow_none=True),\n \"NodeManagerPort\": client.NodeManagerPort(),\n \"ObjectManagerPort\": client.ObjectManagerPort(),\n \"ObjectStoreSocketName\": decode(\n client.ObjectStoreSocketName(), allow_none=True),\n \"RayletSocketName\": decode(\n client.RayletSocketName(), allow_none=True),\n \"Resources\": resources\n }\n \n \n \n \n \n \n return [node_info[client_id] for client_id in ordered_client_ids]", "docstring": "Read the client table.\n\nArgs:\nredis_client: A client to the primary Redis shard.\n\nReturns:\nA list of information about the nodes in the cluster.", "source": "juraj-google-style"} {"code": "def _render_batch(self, non_fluents: NonFluents, states: Fluents, actions: Fluents, interms: Fluents, rewards: np.array, horizon: Optional[int]=None) -> None:\n if (horizon is None):\n horizon = len(states[0][1])\n self._render_round_init(horizon, non_fluents)\n for t in range(horizon):\n s = [(s[0], s[1][t]) for s in states]\n f = [(f[0], f[1][t]) for f in interms]\n a = [(a[0], a[1][t]) for a in actions]\n r = rewards[t]\n self._render_timestep(t, s, a, f, r)\n self._render_round_end(rewards)", "docstring": "Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`\nfor given `horizon`.\n\nArgs:\nstates (Sequence[Tuple[str, np.array]]): A state trajectory.\nactions (Sequence[Tuple[str, np.array]]): An action trajectory.\ninterms (Sequence[Tuple[str, np.array]]): An interm state trajectory.\nrewards (np.array): Sequence of rewards (1-dimensional array).\nhorizon (Optional[int]): Number of timesteps.", "source": "codesearchnet"} {"code": "def validate(cls, mapper_spec):\n \n reader_spec = cls.get_params(mapper_spec, allow_old=False)\n\n \n if cls.BUCKET_NAME_PARAM not in reader_spec:\n raise errors.BadReaderParamsError(\n \"%s is required for Google Cloud Storage\" %\n cls.BUCKET_NAME_PARAM)\n try:\n cloudstorage.validate_bucket_name(\n reader_spec[cls.BUCKET_NAME_PARAM])\n except ValueError, error:\n raise errors.BadReaderParamsError(\"Bad bucket name, %s\" % (error))\n\n \n if cls.OBJECT_NAMES_PARAM not in reader_spec:\n raise errors.BadReaderParamsError(\n \"%s is required for Google Cloud Storage\" %\n cls.OBJECT_NAMES_PARAM)\n filenames = reader_spec[cls.OBJECT_NAMES_PARAM]\n if not isinstance(filenames, list):\n raise errors.BadReaderParamsError(\n \"Object name list is not a list but a %s\" %\n filenames.__class__.__name__)\n for filename in filenames:\n if not isinstance(filename, basestring):\n raise errors.BadReaderParamsError(\n \"Object name is not a string but a %s\" %\n filename.__class__.__name__)\n if cls.DELIMITER_PARAM in reader_spec:\n delimiter = reader_spec[cls.DELIMITER_PARAM]\n if not isinstance(delimiter, basestring):\n raise errors.BadReaderParamsError(\n \"%s is not a string but a %s\" %\n (cls.DELIMITER_PARAM, type(delimiter)))", "docstring": "Validate mapper specification.\n\nArgs:\nmapper_spec: an instance of model.MapperSpec\n\nRaises:\nBadReaderParamsError: if the specification is invalid for any reason such\nas missing the bucket name or providing an invalid bucket name.", "source": "juraj-google-style"} {"code": "def cache_memlimit(self, memlimit):\n self._fetch_cmd(b'cache_memlimit', [str(int(memlimit))], False)\n return True", "docstring": "The memcached \"cache_memlimit\" command.\n\nArgs:\nmemlimit: int, the number of megabytes to set as the new cache memory\nlimit.\n\nReturns:\nIf no exception is raised, always returns True.", "source": "codesearchnet"} {"code": "def load_nips2011_papers(path):\n path = os.path.expanduser(path)\n filename = 'NIPS_1987-2015.csv'\n filepath = os.path.join(path, filename)\n if (not os.path.exists(filepath)):\n url = 'https:\n if (not tf.io.gfile.exists(path)):\n tf.io.gfile.makedirs(path)\n print(('Downloading %s to %s' % (url, filepath)))\n urllib.request.urlretrieve(url, filepath)\n with open(filepath) as f:\n iterator = csv.reader(f)\n documents = next(iterator)[1:]\n words = []\n x_train = []\n for row in iterator:\n words.append(row[0])\n x_train.append(row[1:])\n x_train = np.array(x_train, dtype=np.int)\n doc_idx = [i for (i, document) in enumerate(documents) if document.startswith('2011')]\n documents = [documents[doc] for doc in doc_idx]\n x_train = x_train[(:, doc_idx)]\n word_idx = np.logical_and((np.sum((x_train != 0), 1) >= 2), (np.sum(x_train, 1) >= 10))\n words = [word for (word, idx) in zip(words, word_idx) if idx]\n bag_of_words = x_train[(word_idx, :)].T\n return (bag_of_words, words)", "docstring": "Loads NIPS 2011 conference papers.\n\nThe NIPS 1987-2015 data set is in the form of a 11,463 x 5,812 matrix of\nper-paper word counts, containing 11,463 words and 5,811 NIPS conference\npapers (Perrone et al., 2016). We subset to papers in 2011 and words appearing\nin at least two documents and having a total word count of at least 10.\n\nBuilt from the Observations Python package.\n\nArgs:\npath: str.\nPath to directory which either stores file or otherwise file will\nbe downloaded and extracted there. Filename is `NIPS_1987-2015.csv`.\n\nReturns:\nbag_of_words: np.ndarray of shape [num_documents, num_words]. Each element\ndenotes the number of occurrences of a specific word in a specific\ndocument.\nwords: List of strings, denoting the words for `bag_of_words`'s columns.", "source": "codesearchnet"} {"code": "def pretty_dnf(dnf):\n if not dnf:\n return 'false'\n else:\n return ' | '.join((pretty_conjunction(c) for c in dnf))", "docstring": "Pretty-print a disjunctive normal form (disjunction of conjunctions).\n\nE.g. [[\"a\", \"b\"], [\"c\"]] -> \"(a & b) | c\".\n\nArgs:\ndnf: A list of list of strings. (Disjunction of conjunctions of strings)\n\nReturns:\nA pretty-printed string.", "source": "github-repos"} {"code": "def parse_config_output(self, output):\n \n regexp = re.compile('^(config |edit |set |end$|next$)(.*)')\n current_block = self\n\n if isinstance(output, py23_compat.string_types):\n output = output.splitlines()\n\n for line in output:\n if 'uuid' in line:\n continue\n if 'snmp-index' in line:\n continue\n line = line.strip()\n result = regexp.match(line)\n\n if result is not None:\n action = result.group(1).strip()\n data = result.group(2).strip()\n\n if action == 'config' or action == 'edit':\n data = data.replace('\"', '')\n if data not in current_block.get_block_names():\n config_block = FortiConfig(data, action, current_block)\n current_block[data] = config_block\n else:\n config_block = current_block[data]\n current_block = config_block\n elif action == 'end' or action == 'next':\n current_block = current_block.get_parent()\n elif action == 'set':\n split_data = data.split(' ')\n parameter = split_data[0]\n data = split_data[1:]\n current_block.set_param(parameter, ' '.join(data))", "docstring": "This method will parse a string containing FortiOS config and will load it into the current\n:class:`~pyFG.forticonfig.FortiConfig` object.\n\nArgs:\n- **output** (string) - A string containing a supported version of FortiOS config", "source": "juraj-google-style"} {"code": "def __init__(self, name=None, description=None, providers=None,\n checkers=None):\n \n self.name = name\n self.description = description\n self.providers = providers or []\n self.checkers = checkers or []\n self.results = []", "docstring": "Initialization method.\n\nArgs:\nname (str): the group name.\ndescription (str): the group description.\nproviders (list): the list of providers.\ncheckers (list): the list of checkers.", "source": "juraj-google-style"} {"code": "def CheckRedundantVirtual(filename, clean_lines, linenum, error):\n line = clean_lines.elided[linenum]\n virtual = Match('^(.*)(\\\\bvirtual\\\\b)(.*)$', line)\n if (not virtual):\n return\n if (Search('\\\\b(public|protected|private)\\\\s+$', virtual.group(1)) or Match('^\\\\s+(public|protected|private)\\\\b', virtual.group(3))):\n return\n if Match('^.*[^:]:[^:].*$', line):\n return\n end_col = (- 1)\n end_line = (- 1)\n start_col = len(virtual.group(2))\n for start_line in xrange(linenum, min((linenum + 3), clean_lines.NumLines())):\n line = clean_lines.elided[start_line][start_col:]\n parameter_list = Match('^([^(]*)\\\\(', line)\n if parameter_list:\n (_, end_line, end_col) = CloseExpression(clean_lines, start_line, (start_col + len(parameter_list.group(1))))\n break\n start_col = 0\n if (end_col < 0):\n return\n for i in xrange(end_line, min((end_line + 3), clean_lines.NumLines())):\n line = clean_lines.elided[i][end_col:]\n match = Search('\\\\b(override|final)\\\\b', line)\n if match:\n error(filename, linenum, 'readability/inheritance', 4, ('\"virtual\" is redundant since function is already declared as \"%s\"' % match.group(1)))\n end_col = 0\n if Search('[^\\\\w]\\\\s*$', line):\n break", "docstring": "Check if line contains a redundant \"virtual\" function-specifier.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"} {"code": "def sheets_batch_update(config, auth, sheet_url_or_name, data):\n sheet_id = sheets_id(config, auth, sheet_url_or_name)\n API_Sheets(config, auth).spreadsheets().batchUpdate(spreadsheetId=sheet_id, body=data).execute()", "docstring": "Helper for performing batch operations.\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nsheet_url_or_name - one of: URL, document title, or id\ndata - JSON data for sending to batch request\n\nNo Return", "source": "github-repos"} {"code": "def get_extrema(self, normalize_rxn_coordinate=True):\n \n x = np.arange(0, np.max(self.r), 0.01)\n y = self.spline(x) * 1000\n\n scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]\n min_extrema = []\n max_extrema = []\n for i in range(1, len(x) - 1):\n if y[i] < y[i-1] and y[i] < y[i+1]:\n min_extrema.append((x[i] * scale, y[i]))\n elif y[i] > y[i-1] and y[i] > y[i+1]:\n max_extrema.append((x[i] * scale, y[i]))\n return min_extrema, max_extrema", "docstring": "Returns the positions of the extrema along the MEP. Both local\nminimums and maximums are returned.\n\nArgs:\nnormalize_rxn_coordinate (bool): Whether to normalize the\nreaction coordinate to between 0 and 1. Defaults to True.\n\nReturns:\n(min_extrema, max_extrema), where the extrema are given as\n[(x1, y1), (x2, y2), ...].", "source": "juraj-google-style"} {"code": "def guess_strategy_type(file_name_or_ext):\n \n if '.' not in file_name_or_ext:\n ext = file_name_or_ext\n else:\n name, ext = os.path.splitext(file_name_or_ext)\n ext = ext.lstrip('.')\n file_type_map = get_file_type_map()\n return file_type_map.get(ext, None)", "docstring": "Guess strategy type to use for file by extension.\n\nArgs:\nfile_name_or_ext: Either a file name with an extension or just\nan extension\n\nReturns:\nStrategy: Type corresponding to extension or None if there's no\ncorresponding strategy type", "source": "juraj-google-style"} {"code": "def interact_GxE_1dof(snps, pheno, env, K=None, covs=None, test='lrt'):\n N = snps.shape[0]\n if (K is None):\n K = SP.eye(N)\n if (covs is None):\n covs = SP.ones((N, 1))\n assert ((env.shape[0] == N) and (pheno.shape[0] == N) and (K.shape[0] == N) and (K.shape[1] == N) and (covs.shape[0] == N)), 'shapes missmatch'\n Inter0 = SP.ones((N, 1))\n pv = SP.zeros((env.shape[1], snps.shape[1]))\n print(('starting %i interaction scans for %i SNPs each.' % (env.shape[1], snps.shape[1])))\n t0 = time.time()\n for i in range(env.shape[1]):\n t0_i = time.time()\n cov_i = SP.concatenate((covs, env[(:, i:(i + 1))]), 1)\n lm_i = simple_interaction(snps=snps, pheno=pheno, covs=cov_i, Inter=env[(:, i:(i + 1))], Inter0=Inter0, test=test)\n pv[(i, :)] = lm_i.getPv()[(0, :)]\n t1_i = time.time()\n print(('Finished %i out of %i interaction scans in %.2f seconds.' % ((i + 1), env.shape[1], (t1_i - t0_i))))\n t1 = time.time()\n print(('-----------------------------------------------------------\\nFinished all %i interaction scans in %.2f seconds.' % (env.shape[1], (t1 - t0))))\n return pv", "docstring": "Univariate GxE fixed effects interaction linear mixed model test for all\npairs of SNPs and environmental variables.\n\nArgs:\nsnps: [N x S] SP.array of S SNPs for N individuals\npheno: [N x 1] SP.array of 1 phenotype for N individuals\nenv: [N x E] SP.array of E environmental variables for N individuals\nK: [N x N] SP.array of LMM-covariance/kinship koefficients (optional)\nIf not provided, then linear regression analysis is performed\ncovs: [N x D] SP.array of D covariates for N individuals\ntest: 'lrt' for likelihood ratio test (default) or 'f' for F-test\n\nReturns:\npv: [E x S] SP.array of P values for interaction tests between all\nE environmental variables and all S SNPs", "source": "codesearchnet"} {"code": "def parse_cluster_spec(cluster_spec, cluster, verbose=False):\n job_strings = cluster_spec.split(',')\n if not cluster_spec:\n raise ValueError('Empty cluster_spec string')\n for job_string in job_strings:\n job_def = cluster.job.add()\n if job_string.count('|') != 1:\n raise ValueError(\"Not exactly one instance of '|' in cluster_spec\")\n job_name = job_string.split('|')[0]\n if not job_name:\n raise ValueError('Empty job_name in cluster_spec')\n job_def.name = job_name\n if verbose:\n logging.info('Added job named \"%s\"', job_name)\n job_tasks = job_string.split('|')[1].split(';')\n for i in range(len(job_tasks)):\n if not job_tasks[i]:\n raise ValueError('Empty task string at position %d' % i)\n job_def.tasks[i] = job_tasks[i]\n if verbose:\n logging.info(' Added task \"%s\" to job \"%s\"', job_tasks[i], job_name)", "docstring": "Parse content of cluster_spec string and inject info into cluster protobuf.\n\nArgs:\ncluster_spec: cluster specification string, e.g.,\n\"local|localhost:2222;localhost:2223\"\ncluster: cluster protobuf.\nverbose: If verbose logging is requested.\n\nRaises:\nValueError: if the cluster_spec string is invalid.", "source": "github-repos"} {"code": "def match_unknown_against_protocol(self, matcher, solver, unknown, complete):\n assert is_unknown(unknown)\n assert is_complete(complete)\n type_params = {p.type_param: matcher.type_parameter(unknown, complete, p) for p in complete.template}\n subst = type_params.copy()\n implication = matcher.match_Protocol_against_Unknown(complete, unknown, subst)\n if implication is not booleq.FALSE and type_params:\n for param in type_params.values():\n solver.register_variable(param.name)\n solver.implies(booleq.Eq(unknown.name, complete.name), implication)", "docstring": "Given an ~unknown, match it against a class.\n\nArgs:\nmatcher: An instance of pytd.type_match.TypeMatch.\nsolver: An instance of pytd.booleq.Solver.\nunknown: The unknown class to match\ncomplete: A complete class to match against. (E.g. a built-in or a user\ndefined class)\n\nReturns:\nAn instance of pytd.booleq.BooleanTerm.", "source": "github-repos"} {"code": "def compress_pdf(filepath, output_path, ghostscript_binary):\n \n if not filepath.endswith(PDF_EXTENSION):\n raise ValueError(\"Filename must end with .pdf!\\n%s does not.\" % filepath)\n try:\n file_size = os.stat(filepath).st_size\n if file_size < FILE_SIZE_LOWER_LIMIT:\n LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT))\n process = subprocess.Popen(['cp', filepath, output_path])\n else:\n LOGGER.info(COMPRESSING.format(filepath))\n process = subprocess.Popen(\n [ghostscript_binary, \"-sDEVICE=pdfwrite\",\n \"-dCompatabilityLevel=1.4\", \"-dPDFSETTINGS=/ebook\",\n \"-dNOPAUSE\", \"-dQUIET\", \"-dBATCH\",\n \"-sOutputFile=%s\" % output_path, filepath]\n )\n except FileNotFoundError:\n msg = GS_NOT_INSTALLED.format(ghostscript_binary)\n raise FileNotFoundError(msg)\n process.communicate()\n LOGGER.info(FILE_DONE.format(output_path))", "docstring": "Compress a single PDF file.\n\nArgs:\nfilepath (str): Path to the PDF file.\noutput_path (str): Output path.\nghostscript_binary (str): Name/alias of the Ghostscript binary.\n\nRaises:\nValueError\nFileNotFoundError", "source": "juraj-google-style"} {"code": "def constant(cls, value: Value, dtype: tf.DType=tf.float32) -> 'TensorFluent':\n t = tf.constant(value, dtype=dtype)\n scope = []\n batch = False\n return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a constant `value` TensorFluent with given `dtype`.\n\nArgs:\nvalue: The constant value.\ndtype: The output's data type.\n\nReturns:\nA constant TensorFluent.", "source": "codesearchnet"} {"code": "def add_enum(name=None, index=None, flags=idaapi.hexflag(), bitfield=False):\n \n if name is not None:\n with ignored(exceptions.EnumNotFound):\n _get_enum(name)\n raise exceptions.EnumAlreadyExists()\n\n if index is None or index < 0:\n index = idaapi.get_enum_qty()\n\n eid = idaapi.add_enum(index, name, flags)\n\n if eid == idaapi.BADADDR:\n raise exceptions.EnumCreationFailed('Failed creating enum \"{}\"'.format(name))\n\n if bitfield:\n idaapi.set_enum_bf(eid, bitfield)\n\n return Enum(eid=eid)", "docstring": "Create a new enum.\n\nArgs:\nname: Name of the enum to create.\nindex: The index of the enum. Leave at default to append the enum as the last enum.\nflags: Enum type flags.\nbitfield: Is the enum a bitfield.\n\nReturns:\nAn `Enum` object.", "source": "juraj-google-style"} {"code": "def is53(msg):\n \n\n if allzeros(msg):\n return False\n\n d = hex2bin(data(msg))\n\n \n\n if wrongstatus(d, 1, 3, 12):\n return False\n\n if wrongstatus(d, 13, 14, 23):\n return False\n\n if wrongstatus(d, 24, 25, 33):\n return False\n\n if wrongstatus(d, 34, 35, 46):\n return False\n\n if wrongstatus(d, 47, 49, 56):\n return False\n\n ias = ias53(msg)\n if ias is not None and ias > 500:\n return False\n\n mach = mach53(msg)\n if mach is not None and mach > 1:\n return False\n\n tas = tas53(msg)\n if tas is not None and tas > 500:\n return False\n\n vr = vr53(msg)\n if vr is not None and abs(vr) > 8000:\n return False\n\n return True", "docstring": "Check if a message is likely to be BDS code 5,3\n(Air-referenced state vector)\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"} {"code": "def mols_to_file(mols, path):\n \n with open(path, 'w') as f:\n f.write(mols_to_text(mols))", "docstring": "Save molecules to the SDFile format file\n\nArgs:\nmols: list of molecule objects\npath: file path to save", "source": "juraj-google-style"} {"code": "def delete(filename, retry_params=None, _account_id=None):\n api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id)\n common.validate_file_path(filename)\n filename = api_utils._quote_filename(filename)\n (status, resp_headers, content) = api.delete_object(filename)\n errors.check_status(status, [204], filename, resp_headers=resp_headers, body=content)", "docstring": "Delete a Google Cloud Storage file.\n\nArgs:\nfilename: A Google Cloud Storage filename of form '/bucket/filename'.\nretry_params: An api_utils.RetryParams for this call to GCS. If None,\nthe default one is used.\n_account_id: Internal-use only.\n\nRaises:\nerrors.NotFoundError: if the file doesn't exist prior to deletion.", "source": "codesearchnet"} {"code": "def sync(coro, timeout=None):\n \n loop = initloop()\n return asyncio.run_coroutine_threadsafe(coro, loop).result(timeout)", "docstring": "Schedule a coroutine to run on the global loop and return it's result.\n\nArgs:\ncoro (coroutine): The coroutine instance.\n\nNotes:\nThis API is thread safe and should only be called by non-loop threads.", "source": "juraj-google-style"} {"code": "def feed(self, data_len, feed_time=None):\n self._bytes_transferred += data_len\n self._collected_bytes_transferred += data_len\n time_now = (feed_time or time.time())\n time_diff = (time_now - self._last_feed_time)\n if (time_diff < self._sample_min_time):\n return\n self._last_feed_time = time.time()\n if ((data_len == 0) and (time_diff >= self._stall_time)):\n self._stalled = True\n return\n self._samples.append((time_diff, self._collected_bytes_transferred))\n self._collected_bytes_transferred = 0", "docstring": "Update the bandwidth meter.\n\nArgs:\ndata_len (int): The number of bytes transfered since the last\ncall to :func:`feed`.\nfeed_time (float): Current time.", "source": "codesearchnet"} {"code": "def convert(self):\n if self.experimental_lower_to_saved_model:\n saved_model_convert_result = self._convert_as_saved_model()\n if saved_model_convert_result:\n return saved_model_convert_result\n graph_def, input_tensors, output_tensors, frozen_func = self._freeze_concrete_function()\n graph_def = self._optimize_tf_model(graph_def, input_tensors, output_tensors, frozen_func)\n return super(TFLiteFrozenGraphConverterV2, self).convert(graph_def, input_tensors, output_tensors)", "docstring": "Converts a TensorFlow GraphDef based on instance variables.\n\nReturns:\nThe converted data in serialized format.\n\nRaises:\nValueError:\nNo concrete function is specified.\nMultiple concrete functions are specified.\nInput shape is not specified.\nInvalid quantization parameters.", "source": "github-repos"} {"code": "def pull_datapackage(descriptor, name, backend, **backend_options):\n \n\n \n warnings.warn(\n 'Functions \"push/pull_datapackage\" are deprecated. '\n 'Please use \"Package\" class',\n UserWarning)\n\n \n datapackage_name = name\n\n \n plugin = import_module('jsontableschema.plugins.%s' % backend)\n storage = plugin.Storage(**backend_options)\n\n \n resources = []\n for table in storage.buckets:\n\n \n schema = storage.describe(table)\n base = os.path.dirname(descriptor)\n path, name = _restore_path(table)\n fullpath = os.path.join(base, path)\n\n \n helpers.ensure_dir(fullpath)\n with io.open(fullpath, 'wb') as file:\n model = Schema(deepcopy(schema))\n data = storage.iter(table)\n writer = csv.writer(file, encoding='utf-8')\n writer.writerow(model.headers)\n for row in data:\n writer.writerow(row)\n\n \n resource = {'schema': schema, 'path': path}\n if name is not None:\n resource['name'] = name\n resources.append(resource)\n\n \n mode = 'w'\n encoding = 'utf-8'\n if six.PY2:\n mode = 'wb'\n encoding = None\n resources = _restore_resources(resources)\n helpers.ensure_dir(descriptor)\n with io.open(descriptor,\n mode=mode,\n encoding=encoding) as file:\n descriptor = {\n 'name': datapackage_name,\n 'resources': resources,\n }\n json.dump(descriptor, file, indent=4)\n return storage", "docstring": "Pull Data Package from storage.\n\nAll parameters should be used as keyword arguments.\n\nArgs:\ndescriptor (str): path where to store descriptor\nname (str): name of the pulled datapackage\nbackend (str): backend name like `sql` or `bigquery`\nbackend_options (dict): backend options mentioned in backend docs", "source": "juraj-google-style"} {"code": "def launch_minecraft(ports = [], wait_timeout = 360):\n \n if \"MALMO_XSD_PATH\" not in os.environ:\n print(\"Please set the MALMO_XSD_PATH environment variable.\")\n return\n cwd = os.getcwd()\n try:\n os.chdir(malmo_install_dir + \"/Minecraft\")\n launch_minecraft_in_background(os.getcwd(), ports, wait_timeout)\n finally:\n os.chdir(cwd)", "docstring": "Launch Malmo Minecraft Mod in one or more clients from\nthe Minecraft directory on the (optionally) given ports.\nArgs:\nports: an optionsl list of ports to start minecraft clients on.\nDefaults to a single Minecraft client on port 10000.\nwait_timeout: optional time in seconds to wait (defaults to 3 mins).", "source": "juraj-google-style"} {"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n prefix_ones = [1] * len(self.prefix_tokens)\n suffix_ones = [1] * len(self.suffix_tokens)\n if token_ids_1 is None:\n return prefix_ones + [0] * len(token_ids_0) + suffix_ones\n return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"} {"code": "def update_instance(self, data):\n \n for key, val in iteritems(data):\n if not hasattr(self, key):\n raise AttributeError(\n \"No field named {key} for model {model}\".format(\n key=key,\n model=self.__class__.__name__\n )\n )\n\n setattr(self, key, val)\n\n self.save()\n\n return self", "docstring": "Update a single record by id with the provided data.\n\nArgs:\ndata (dict): The new data to update the record with.\n\nReturns:\nself: This is an instance of itself with the updated data.\n\nRaises:\nAttributeError: This is raised if a key in the ``data`` isn't\na field on the model.", "source": "juraj-google-style"} {"code": "def read_message(self, timeout=None):\n raw_data = self.stream.read(struct.calcsize(self.RECV_MSG_TYPE.struct_format), timeout)\n try:\n raw_message = struct.unpack(self.RECV_MSG_TYPE.struct_format, raw_data)\n except struct.error:\n raise usb_exceptions.AdbProtocolError('%s expected format \"%s\", got data %s', self, self.RECV_MSG_TYPE.struct_format, raw_data)\n if (raw_message[0] not in self.WIRE_TO_CMD):\n raise usb_exceptions.AdbProtocolError('Unrecognized command id: %s', raw_message)\n raw_message = ((self.WIRE_TO_CMD[raw_message[0]],) + raw_message[1:])\n if (self.RECV_MSG_TYPE.has_data and raw_message[(- 1)]):\n data_len = raw_message[(- 1)]\n raw_message = (raw_message[:(- 1)] + (self.stream.read(data_len, timeout),))\n if (raw_message[0] not in self.VALID_RESPONSES):\n raise usb_exceptions.AdbProtocolError('%s not a valid response for %s', raw_message[0], self)\n if (raw_message[0] == 'FAIL'):\n raise usb_exceptions.AdbRemoteError('Remote ADB failure: %s', raw_message)\n return self.RECV_MSG_TYPE(*raw_message)", "docstring": "Read a message from this transport and return it.\n\nReads a message of RECV_MSG_TYPE and returns it. Note that this method\nabstracts the data length and data read so that the caller simply gets the\ndata along with the header in the returned message.\n\nArgs:\ntimeout: timeouts.PolledTimeout to use for the operation.\n\nReturns:\nAn instance of self.RECV_MSG_TYPE that was read from self.stream.\n\nRaises:\nAdbProtocolError: If an invalid response is received.\nAdbRemoteError: If a FAIL response is received.", "source": "codesearchnet"} {"code": "def single_qubit_matrix_to_pauli_rotations(mat: np.ndarray, atol: float=0) -> List[Tuple[(ops.Pauli, float)]]:\n\n def is_clifford_rotation(half_turns):\n return near_zero_mod(half_turns, 0.5, atol=atol)\n\n def to_quarter_turns(half_turns):\n return (round((2 * half_turns)) % 4)\n\n def is_quarter_turn(half_turns):\n return (is_clifford_rotation(half_turns) and ((to_quarter_turns(half_turns) % 2) == 1))\n\n def is_half_turn(half_turns):\n return (is_clifford_rotation(half_turns) and (to_quarter_turns(half_turns) == 2))\n\n def is_no_turn(half_turns):\n return (is_clifford_rotation(half_turns) and (to_quarter_turns(half_turns) == 0))\n (z_rad_before, y_rad, z_rad_after) = linalg.deconstruct_single_qubit_matrix_into_angles(mat)\n z_ht_before = ((z_rad_before / np.pi) - 0.5)\n m_ht = (y_rad / np.pi)\n m_pauli = ops.pauli_gates.X\n z_ht_after = ((z_rad_after / np.pi) + 0.5)\n if is_clifford_rotation(z_ht_before):\n if ((is_quarter_turn(z_ht_before) or is_quarter_turn(z_ht_after)) ^ (is_half_turn(m_ht) and is_no_turn((z_ht_before - z_ht_after)))):\n z_ht_before += 0.5\n z_ht_after -= 0.5\n m_pauli = ops.pauli_gates.Y\n if (is_half_turn(z_ht_before) or is_half_turn(z_ht_after)):\n z_ht_before -= 1\n z_ht_after += 1\n m_ht = (- m_ht)\n if is_no_turn(m_ht):\n z_ht_before += z_ht_after\n z_ht_after = 0\n elif is_half_turn(m_ht):\n z_ht_after -= z_ht_before\n z_ht_before = 0\n rotation_list = [(ops.pauli_gates.Z, z_ht_before), (m_pauli, m_ht), (ops.pauli_gates.Z, z_ht_after)]\n return [(pauli, ht) for (pauli, ht) in rotation_list if (not is_no_turn(ht))]", "docstring": "Implements a single-qubit operation with few rotations.\n\nArgs:\nmat: The 2x2 unitary matrix of the operation to implement.\natol: A limit on the amount of absolute error introduced by the\nconstruction.\n\nReturns:\nA list of (Pauli, half_turns) tuples that, when applied in order,\nperform the desired operation.", "source": "codesearchnet"} {"code": "def fill_wildcards(self, field=None, value=0):\n if ((field in [None, 'wildcards']) or isinstance(value, Pad)):\n return\n default_value = getattr(Match, field)\n if isinstance(default_value, IPAddress):\n if (field == 'nw_dst'):\n shift = FlowWildCards.OFPFW_NW_DST_SHIFT\n base_mask = FlowWildCards.OFPFW_NW_DST_MASK\n else:\n shift = FlowWildCards.OFPFW_NW_SRC_SHIFT\n base_mask = FlowWildCards.OFPFW_NW_SRC_MASK\n self.wildcards &= (FlowWildCards.OFPFW_ALL ^ base_mask)\n wildcard = ((value.max_prefix - value.netmask) << shift)\n self.wildcards |= wildcard\n else:\n wildcard_field = 'OFPFW_{}'.format(field.upper())\n wildcard = getattr(FlowWildCards, wildcard_field)\n if (((value == default_value) and (not (self.wildcards & wildcard))) or ((value != default_value) and (self.wildcards & wildcard))):\n self.wildcards ^= wildcard", "docstring": "Update wildcards attribute.\n\nThis method update a wildcards considering the attributes of the\ncurrent instance.\n\nArgs:\nfield (str): Name of the updated field.\nvalue (GenericType): New value used in the field.", "source": "codesearchnet"} {"code": "def uninstall(device: AndroidDevice, package_name: str) -> None:\n if is_apk_installed(device, package_name):\n try:\n device.adb.uninstall([package_name])\n except adb.AdbError as e1:\n if ADB_UNINSTALL_INTERNAL_ERROR_MSG in str(e1):\n device.log.debug('Encountered uninstall internal error, try pm remove with UID 0.')\n try:\n device.adb.shell(['pm', 'uninstall', '-k', '--user', '0', package_name])\n return\n except adb.AdbError as e2:\n device.log.exception('Second attempt to uninstall failed: %s', e2)\n raise e1", "docstring": "Uninstall an apk on an Android device if it is installed.\n\nWorks for regular app and OEM pre-installed non-system app.\n\nArgs:\ndevice: AndroidDevice, Mobly's Android controller object.\npackage_name: string, package name of the app.", "source": "github-repos"} {"code": "def __init__(self, scope, parent, id_, name, definition=True):\n \n CodeEntity.__init__(self, scope, parent)\n self.id = id_\n self.name = name\n self.members = []\n self.superclasses = []\n self.member_of = None\n self.references = []\n self._definition = self if definition else None", "docstring": "Constructor for classes.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nid: An unique identifier for this class.\nname (str): The name of the class in the program.", "source": "juraj-google-style"} {"code": "def from_fhir_path_expression(fhir_path_expression: str, fhir_context: context.FhirPathContext, structdef_type: _fhir_path_data_types.StructureDataType, handler: primitive_handler.PrimitiveHandler, root_node_context: Optional[Builder]=None) -> 'Builder':\n ast = _ast.build_fhir_path_ast(fhir_path_expression)\n new_context = root_node_context.node if root_node_context else None\n visitor = _evaluation.FhirPathCompilerVisitor(handler, fhir_context, structdef_type, new_context)\n root = visitor.visit(ast)\n return Builder(root, handler)", "docstring": "Function to create an expression builder from a fhir path string.\n\nArgs:\nfhir_path_expression: The FHIRPath expression to parse.\nfhir_context: The context containing the FHIR resources.\nstructdef_type: The root structure definition for the expression.\nhandler: The primitive handler.\nroot_node_context: Optional root expression that fhir_path_expression may\nreference.\n\nReturns:\nThe expression Builder equivalent of the fhir_path_expression.", "source": "github-repos"} {"code": "def unreduce_array(array, shape, axis, keepdims):\n \n \n \n if axis is not None and (not keepdims or keepdims is numpy._NoValue): \n if isinstance(axis, int):\n axis = axis,\n for ax in sorted(axis):\n array = numpy.expand_dims(array, ax)\n return numpy.broadcast_to(array, shape)", "docstring": "Reverse summing over a dimension, NumPy implementation.\n\nArgs:\narray: The array that was reduced.\nshape: The original shape of the array before reduction.\naxis: The axis or axes that were summed.\nkeepdims: Whether these axes were kept as singleton axes.\n\nReturns:\nAn array with axes broadcast to match the shape of the original array.", "source": "juraj-google-style"} {"code": "def translate_config(self, profile, merge=None, replace=None):\n result = []\n for (k, v) in self:\n other_merge = (getattr(merge, k) if merge else None)\n other_replace = (getattr(replace, k) if replace else None)\n translator = Translator(v, profile, merge=other_merge, replace=other_replace)\n result.append(translator.translate())\n return '\\n'.join(result)", "docstring": "Translate the object to native configuration.\n\nIn this context, merge and replace means the following:\n\n* **Merge** - Elements that exist in both ``self`` and ``merge`` will use by default the\nvalues in ``merge`` unless ``self`` specifies a new one. Elements that exist only\nin ``self`` will be translated as they are and elements present only in ``merge``\nwill be removed.\n* **Replace** - All the elements in ``replace`` will either be removed or replaced by\nelements in ``self``.\n\nYou can specify one of ``merge``, ``replace`` or none of them. If none of them are set we\nwill just translate configuration.\n\nArgs:\nprofile (list): Which profiles to use.\nmerge (Root): Object we want to merge with.\nreplace (Root): Object we want to replace.", "source": "codesearchnet"} {"code": "def torch_extract_patches(image_tensor, patch_height, patch_width):\n requires_backends(torch_extract_patches, ['torch'])\n image_tensor = image_tensor.unsqueeze(0)\n patches = torch.nn.functional.unfold(image_tensor, (patch_height, patch_width), stride=(patch_height, patch_width))\n patches = patches.reshape(image_tensor.size(0), image_tensor.size(1), patch_height, patch_width, -1)\n patches = patches.permute(0, 4, 2, 3, 1).reshape(image_tensor.size(2) \n return patches.unsqueeze(0)", "docstring": "Utiliy function to extract patches from a given image tensor. Returns a tensor of shape (1, `patch_height`,\n`patch_width`, `num_channels`x `patch_height` x `patch_width`)\n\nArgs:\nimage_tensor (torch.Tensor):\nThe image tensor to extract patches from.\npatch_height (int):\nThe height of the patches to extract.\npatch_width (int):\nThe width of the patches to extract.", "source": "github-repos"} {"code": "def listen_loop(self):\n \n\n while self.listening:\n try:\n data, address = self.sock.recvfrom(self.bufsize)\n self.receive_datagram(data, address)\n if self.stats_enabled:\n self.stats['bytes_recieved'] += len(data)\n except socket.error as error:\n if error.errno == errno.WSAECONNRESET:\n logger.info(\"connection reset\")\n else:\n raise\n\n logger.info(\"Shutting down the listener...\")", "docstring": "Starts the listen loop and executes the receieve_datagram method\nwhenever a packet is receieved.\n\nArgs:\nNone\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def generate_nb_data(P, R, n_cells, assignments=None):\n (genes, clusters) = P.shape\n output = np.zeros((genes, n_cells))\n if (assignments is None):\n cluster_probs = (np.ones(clusters) / clusters)\n labels = []\n for i in range(n_cells):\n if (assignments is None):\n c = np.random.choice(range(clusters), p=cluster_probs)\n else:\n c = assignments[i]\n labels.append(c)\n output[(:, i)] = np.random.negative_binomial(R[(:, c)], (1.0 - P[(:, c)]))\n return (output, np.array(labels))", "docstring": "Generates negative binomial data\n\nArgs:\nP (array): genes x clusters\nR (array): genes x clusters\nn_cells (int): number of cells\nassignments (list): cluster assignment of each cell. Default:\nrandom uniform\n\nReturns:\ndata array with shape genes x cells\nlabels - array of cluster labels", "source": "codesearchnet"} {"code": "def load_intent(self, name, file_name, reload_cache=False):\n \n self.intents.load(name, file_name, reload_cache)\n with open(file_name) as f:\n self.padaos.add_intent(name, f.read().split('\\n'))\n self.must_train = True", "docstring": "Loads an intent, optionally checking the cache first\n\nArgs:\nname (str): The associated name of the intent\nfile_name (str): The location of the intent file\nreload_cache (bool): Whether to refresh all of cache", "source": "juraj-google-style"} {"code": "def extract_derivative_feature(feature):\n \n first_derivative_feature = processing.derivative_extraction(\n feature, DeltaWindows=2)\n second_derivative_feature = processing.derivative_extraction(\n first_derivative_feature, DeltaWindows=2)\n\n \n feature_cube = np.concatenate(\n (feature[:, :, None], first_derivative_feature[:, :, None],\n second_derivative_feature[:, :, None]),\n axis=2)\n return feature_cube", "docstring": "This function extracts temporal derivative features which are\nfirst and second derivatives.\n\nArgs:\nfeature (array): The feature vector which its size is: N x M\n\nReturn:\narray: The feature cube vector which contains the static, first and second derivative features of size: N x M x 3", "source": "juraj-google-style"} {"code": "def get_plan(self, plan_code):\n return self.client._get((self.url + 'plans/{}'.format(plan_code)), headers=self.get_headers())", "docstring": "Check all the information of a plan for subscriptions associated with the merchant.\n\nArgs:\nplan_code: Plan’s identification code for the merchant.\n\nReturns:", "source": "codesearchnet"} {"code": "def _ParseMFTEntry(self, parser_mediator, mft_entry):\n \n for attribute_index in range(0, mft_entry.number_of_attributes):\n try:\n mft_attribute = mft_entry.get_attribute(attribute_index)\n self._ParseMFTAttribute(parser_mediator, mft_entry, mft_attribute)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning((\n 'unable to parse MFT attribute: {0:d} with error: {1!s}').format(\n attribute_index, exception))", "docstring": "Extracts data from a NFTS $MFT entry.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmft_entry (pyfsntfs.file_entry): MFT entry.", "source": "juraj-google-style"} {"code": "def get_pixel(self, x: int, y: int) -> Tuple[(int, int, int)]:\n color = lib.TCOD_image_get_pixel(self.image_c, x, y)\n return (color.r, color.g, color.b)", "docstring": "Get the color of a pixel in this Image.\n\nArgs:\nx (int): X pixel of the Image. Starting from the left at 0.\ny (int): Y pixel of the Image. Starting from the top at 0.\n\nReturns:\nTuple[int, int, int]:\nAn (r, g, b) tuple containing the pixels color value.\nValues are in a 0 to 255 range.", "source": "codesearchnet"} {"code": "def change_window(self, size_window):\n \n self.size_window = size_window\n self.window = self.lambert_window(\n self.size_window, self.lat0, self.lon0)", "docstring": "Change the region of interest\n\nArgs:\nsize_window (float): Radius of the region of interest (km)\n\nNotes:\nChange the attributes ``size_window`` and ``window`` to\ncorrespond to the new region of interest.", "source": "juraj-google-style"} {"code": "def members(name, members_list, **kwargs):\n members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(',')]\n if (not isinstance(members_list, list)):\n log.debug('member_list is not a list')\n return False\n try:\n obj_group = _get_group_object(name)\n except pywintypes.com_error as exc:\n msg = 'Failed to access group {0}. {1}'.format(name, win32api.FormatMessage(exc.excepinfo[5]))\n log.error(msg)\n return False\n existing_members = [_get_username(x) for x in obj_group.members()]\n existing_members.sort()\n members_list.sort()\n if (existing_members == members_list):\n log.info('%s membership is correct', name)\n return True\n success = True\n for member in members_list:\n if (member not in existing_members):\n try:\n obj_group.Add(('WinNT:\n log.info('User added: %s', member)\n except pywintypes.com_error as exc:\n msg = 'Failed to add {0} to {1}. {2}'.format(member, name, win32api.FormatMessage(exc.excepinfo[5]))\n log.error(msg)\n success = False\n for member in existing_members:\n if (member not in members_list):\n try:\n obj_group.Remove(('WinNT:\n log.info('User removed: %s', member)\n except pywintypes.com_error as exc:\n msg = 'Failed to remove {0} from {1}. {2}'.format(member, name, win32api.FormatMessage(exc.excepinfo[5]))\n log.error(msg)\n success = False\n return success", "docstring": "Ensure a group contains only the members in the list\n\nArgs:\n\nname (str):\nThe name of the group to modify\n\nmembers_list (str):\nA single user or a comma separated list of users. The group will\ncontain only the users specified in this list.\n\nReturns:\nbool: ``True`` if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' group.members foo 'user1,user2,user3'", "source": "codesearchnet"} {"code": "def get_student_item_dict(self, anonymous_user_id=None):\n item_id = self._serialize_opaque_key(self.scope_ids.usage_id)\n if hasattr(self, 'xmodule_runtime'):\n course_id = self.get_course_id()\n if anonymous_user_id:\n student_id = anonymous_user_id\n else:\n student_id = self.xmodule_runtime.anonymous_student_id\n else:\n course_id = 'edX/Enchantment_101/April_1'\n if (self.scope_ids.user_id is None):\n student_id = ''\n else:\n student_id = unicode(self.scope_ids.user_id)\n student_item_dict = dict(student_id=student_id, item_id=item_id, course_id=course_id, item_type='ubcpi')\n return student_item_dict", "docstring": "Create a student_item_dict from our surrounding context.\n\nSee also: submissions.api for details.\n\nArgs:\nanonymous_user_id(str): A unique anonymous_user_id for (user, course) pair.\nReturns:\n(dict): The student item associated with this XBlock instance. This\nincludes the student id, item id, and course id.", "source": "codesearchnet"} {"code": "def load_default_japanese_parser() -> Parser:\n with open(os.path.join(MODEL_DIR, 'ja.json'), encoding='utf-8') as f:\n model = json.load(f)\n return Parser(model)", "docstring": "Loads a parser equipped with the default Japanese model.\n\nReturns:\nA parser (:obj:`budoux.Parser`).", "source": "github-repos"} {"code": "def refs(self, type='all', **kwargs):\n path = ('%s/%s/refs' % (self.manager.path, self.get_id()))\n data = {'type': type}\n return self.manager.gitlab.http_get(path, query_data=data, **kwargs)", "docstring": "List the references the commit is pushed to.\n\nArgs:\ntype (str): The scope of references ('branch', 'tag' or 'all')\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabGetError: If the references could not be retrieved\n\nReturns:\nlist: The references the commit is pushed to.", "source": "codesearchnet"} {"code": "def AddValue(self, name, number, aliases=None, description=None):\n if (name in self.values_per_name):\n raise KeyError('Value with name: {0:s} already exists.'.format(name))\n if (number in self.values_per_number):\n raise KeyError('Value with number: {0!s} already exists.'.format(number))\n for alias in (aliases or []):\n if (alias in self.values_per_alias):\n raise KeyError('Value with alias: {0:s} already exists.'.format(alias))\n enumeration_value = EnumerationValue(name, number, aliases=aliases, description=description)\n self.values.append(enumeration_value)\n self.values_per_name[name] = enumeration_value\n self.values_per_number[number] = enumeration_value\n for alias in (aliases or []):\n self.values_per_alias[alias] = enumeration_value", "docstring": "Adds an enumeration value.\n\nArgs:\nname (str): name.\nnumber (int): number.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\n\nRaises:\nKeyError: if the enumeration value already exists.", "source": "codesearchnet"} {"code": "def calculate_checksum_on_bytes(\n b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM\n):\n \n checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)\n checksum_calc.update(b)\n return checksum_calc.hexdigest()", "docstring": "Calculate the checksum of ``bytes``.\n\nWarning: This method requires the entire object to be buffered in (virtual) memory,\nwhich should normally be avoided in production code.\n\nArgs:\nb: bytes\nRaw bytes\n\nalgorithm: str\nChecksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.\n\nReturns:\nstr : Checksum as a hexadecimal string, with length decided by the algorithm.", "source": "juraj-google-style"} {"code": "def wait_for_boot_completion(\n self, timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND):\n \n timeout_start = time.time()\n\n self.adb.wait_for_device(timeout=timeout)\n while time.time() < timeout_start + timeout:\n try:\n if self.is_boot_completed():\n return\n except adb.AdbError:\n \n \n pass\n time.sleep(5)\n raise DeviceError(self, 'Booting process timed out')", "docstring": "Waits for Android framework to broadcast ACTION_BOOT_COMPLETED.\n\nThis function times out after 15 minutes.\n\nArgs:\ntimeout: float, the number of seconds to wait before timing out.\nIf not specified, no timeout takes effect.", "source": "juraj-google-style"} {"code": "def update_restore_inputs(self, checkpoint_key: str, shape_and_slice_spec: str) -> tuple[Sequence[str], Sequence[str]]:\n logging.vlog(1, 'Updating restore v2 inputs for %s[%s]: %s', checkpoint_key, self._object_local_name, shape_and_slice_spec)\n slices = []\n first_layout = self._from_shard_layouts[0]\n full_vocab_size = first_layout.total_rows_per_sparse_core_shard * first_layout.num_sparse_cores\n stack_dim = first_layout.unsharded_padded_shape[1]\n full_shape = [full_vocab_size, stack_dim]\n logging.vlog(1, 'Read checkpoint_key %s: %s', checkpoint_key, full_shape)\n slices.append(_shard_info_str(full_shape, trackable_base.ShardInfo(offset=[0, 0], shape=full_shape)))\n return ([checkpoint_key], slices)", "docstring": "Return the full shape of the stacked that is passed into restore_v2.\n\nThis shape information is required by the restore_v2 process to ensure it\nloads the complete tensor from the checkpoint. The full tensor is required\nto perform resharding operations.\n\nArgs:\ncheckpoint_key: The input checkpoint key to be read.\nshape_and_slice_spec: The shape and slice spec of the checkpoint key to be\nread.\n\nReturns:\nA tuple of (keys, slices) that should be passed to restore_v2 in order to\nreshard according to the resharding plan. The restored tensors from\nrestore_v2 op will usually be passed to reshard method of this class to\nget the final resharded value.", "source": "github-repos"} {"code": "def end_episode(self, agent_indices):\n with tf.name_scope('end_episode/'):\n return tf.cond(self._is_training, (lambda : self._define_end_episode(agent_indices)), str)", "docstring": "Add episodes to the memory and perform update steps if memory is full.\n\nDuring training, add the collected episodes of the batch indices that\nfinished their episode to the memory. If the memory is full, train on it,\nand then clear the memory. A summary string is returned if requested at\nthis step.\n\nArgs:\nagent_indices: Tensor containing current batch indices.\n\nReturns:\nSummary tensor.", "source": "codesearchnet"} {"code": "def _WriteSerializedAttributeContainerList(self, container_type):\n if (container_type == self._CONTAINER_TYPE_EVENT):\n if (not self._serialized_event_heap.data_size):\n return\n number_of_attribute_containers = self._serialized_event_heap.number_of_events\n else:\n container_list = self._GetSerializedAttributeContainerList(container_type)\n if (not container_list.data_size):\n return\n number_of_attribute_containers = container_list.number_of_attribute_containers\n if self._serializers_profiler:\n self._serializers_profiler.StartTiming('write')\n if (container_type == self._CONTAINER_TYPE_EVENT):\n query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)'\n else:\n query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(container_type)\n values_tuple_list = []\n for _ in range(number_of_attribute_containers):\n if (container_type == self._CONTAINER_TYPE_EVENT):\n (timestamp, serialized_data) = self._serialized_event_heap.PopEvent()\n else:\n serialized_data = container_list.PopAttributeContainer()\n if (self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB):\n compressed_data = zlib.compress(serialized_data)\n serialized_data = sqlite3.Binary(compressed_data)\n else:\n compressed_data = ''\n if self._storage_profiler:\n self._storage_profiler.Sample('write', container_type, len(serialized_data), len(compressed_data))\n if (container_type == self._CONTAINER_TYPE_EVENT):\n values_tuple_list.append((timestamp, serialized_data))\n else:\n values_tuple_list.append((serialized_data,))\n self._cursor.executemany(query, values_tuple_list)\n if self._serializers_profiler:\n self._serializers_profiler.StopTiming('write')\n if (container_type == self._CONTAINER_TYPE_EVENT):\n self._serialized_event_heap.Empty()\n else:\n container_list.Empty()", "docstring": "Writes a serialized attribute container list.\n\nArgs:\ncontainer_type (str): attribute container type.", "source": "codesearchnet"} {"code": "def get_key_pair(self, alias_name):\n \n uri = self.URI + \"/keypair/\" + alias_name\n return self._client.get(uri)", "docstring": "Retrieves the public and private key pair associated with the specified alias name.\n\nArgs:\nalias_name: Key pair associated with the RabbitMQ\n\nReturns:\ndict: RabbitMQ certificate", "source": "juraj-google-style"} {"code": "def has_arg(fn, name, accept_all=False):\n arg_spec = tf_inspect.getfullargspec(fn)\n if accept_all and arg_spec.varkw is not None:\n return True\n return name in arg_spec.args or name in arg_spec.kwonlyargs", "docstring": "Checks if a callable accepts a given keyword argument.\n\nArgs:\nfn: Callable to inspect.\nname: Check if `fn` can be called with `name` as a keyword argument.\naccept_all: What to return if there is no parameter called `name` but the\nfunction accepts a `**kwargs` argument.\n\nReturns:\nbool, whether `fn` accepts a `name` keyword argument.", "source": "github-repos"} {"code": "def compstat(sdat, tstart=None, tend=None):\n data = sdat.tseries_between(tstart, tend)\n time = data['t'].values\n delta_time = (time[(- 1)] - time[0])\n data = data.iloc[(:, 1:)].values\n mean = (np.trapz(data, x=time, axis=0) / delta_time)\n rms = np.sqrt((np.trapz(((data - mean) ** 2), x=time, axis=0) / delta_time))\n with open(misc.out_name('statistics.dat'), 'w') as out_file:\n mean.tofile(out_file, sep=' ', format='%10.5e')\n out_file.write('\\n')\n rms.tofile(out_file, sep=' ', format='%10.5e')\n out_file.write('\\n')", "docstring": "Compute statistics from series output by StagYY.\n\nCreate a file 'statistics.dat' containing the mean and standard deviation\nof each series on the requested time span.\n\nArgs:\nsdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.\ntstart (float): starting time. Set to None to start at the beginning of\navailable data.\ntend (float): ending time. Set to None to stop at the end of available\ndata.", "source": "codesearchnet"} {"code": "def AddTokenToState(self, newline, dry_run, must_split=False):\n self._PushParameterListState(newline)\n penalty = 0\n if newline:\n penalty = self._AddTokenOnNewline(dry_run, must_split)\n else:\n self._AddTokenOnCurrentLine(dry_run)\n penalty += self._CalculateComprehensionState(newline)\n penalty += self._CalculateParameterListState(newline)\n return self.MoveStateToNextToken() + penalty", "docstring": "Add a token to the format decision state.\n\nAllow the heuristic to try out adding the token with and without a newline.\nLater on, the algorithm will determine which one has the lowest penalty.\n\nArguments:\nnewline: (bool) Add the token on a new line if True.\ndry_run: (bool) Don't commit whitespace changes to the FormatToken if\nTrue.\nmust_split: (bool) A newline was required before this token.\n\nReturns:\nThe penalty of splitting after the current token.", "source": "github-repos"} {"code": "def get_tokens(condition):\n try:\n ast_tokens = list(ast.walk(ast.parse(condition.strip())))\n except SyntaxError as exception:\n Logger.get_logger(__name__).error('Syntax error: %s', exception)\n ast_tokens = []\n return ast_tokens", "docstring": "Get AST tokens for Python condition.\n\nReturns:\nlist: list of AST tokens", "source": "codesearchnet"} {"code": "async def _async_forward(async_chunks: collections.AsyncIterable,\n out: Optional[Union[TeeCapture, IO[str]]]\n ) -> Optional[str]:\n \n capture = isinstance(out, TeeCapture)\n out_pipe = out.out_pipe if isinstance(out, TeeCapture) else out\n\n chunks = [] if capture else None \n async for chunk in async_chunks:\n if not isinstance(chunk, str):\n chunk = chunk.decode()\n if out_pipe:\n print(chunk, file=out_pipe, end='')\n if chunks is not None:\n chunks.append(chunk)\n\n return ''.join(chunks) if chunks is not None else None", "docstring": "Prints/captures output from the given asynchronous iterable.\n\nArgs:\nasync_chunks: An asynchronous source of bytes or str.\nout: Where to put the chunks.\n\nReturns:\nThe complete captured output, or else None if the out argument wasn't a\nTeeCapture instance.", "source": "juraj-google-style"} {"code": "def _create_hunt(self, name, args):\n runner_args = self.grr_api.types.CreateHuntRunnerArgs()\n runner_args.description = self.reason\n hunt = self.grr_api.CreateHunt(flow_name=name, flow_args=args, hunt_runner_args=runner_args)\n print('{0!s}: Hunt created'.format(hunt.hunt_id))\n self._check_approval_wrapper(hunt, hunt.Start)\n return hunt", "docstring": "Create specified hunt.\n\nArgs:\nname: string containing hunt name.\nargs: proto (*FlowArgs) for type of hunt, as defined in GRR flow proto.\n\nReturns:\nThe newly created GRR hunt object.\n\nRaises:\nValueError: if approval is needed and approvers were not specified.", "source": "codesearchnet"} {"code": "def topics(self, exclude_internal_topics=True):\n \n topics = set(self._partitions.keys())\n if exclude_internal_topics:\n return topics - self.internal_topics\n else:\n return topics", "docstring": "Get set of known topics.\n\nArguments:\nexclude_internal_topics (bool): Whether records from internal topics\n(such as offsets) should be exposed to the consumer. If set to\nTrue the only way to receive records from an internal topic is\nsubscribing to it. Default True\n\nReturns:\nset: {topic (str), ...}", "source": "juraj-google-style"} {"code": "def scp_file_remote_to_local(self, remote_path, local_path):\n \n\n scp_command = [\n 'scp',\n '-o',\n 'StrictHostKeyChecking=no',\n '-i',\n self.browser_config.get('ssh_key_path'),\n '%s@%s:\"%s\"' %\n (\n self.browser_config.get('username'),\n self.get_ip(),\n remote_path\n ),\n local_path\n ]\n self.info_log(\n \"executing command: %s\" %\n ' '.join(scp_command)\n )\n p = Popen(scp_command)\n p.wait()", "docstring": "Scp a remote file to local\n\nArgs:\nremote_path (str)\nlocal_path (str)", "source": "juraj-google-style"} {"code": "def typing(self, room: Room, timeout: int = 5000):\n \n path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}'\n return self.api._send('PUT', path, {'typing': True, 'timeout': timeout})", "docstring": "Send typing event directly to api\n\nArgs:\nroom: room to send typing event to\ntimeout: timeout for the event, in ms", "source": "juraj-google-style"} {"code": "def _ConvertListToObject(cls, json_list):\n \n list_value = []\n for json_list_element in json_list:\n if isinstance(json_list_element, dict):\n list_value.append(cls._ConvertDictToObject(json_list_element))\n\n elif isinstance(json_list_element, list):\n list_value.append(cls._ConvertListToObject(json_list_element))\n\n else:\n list_value.append(json_list_element)\n\n return list_value", "docstring": "Converts a JSON list into an object.\n\nArgs:\njson_list (list[object]): JSON serialized objects.\n\nReturns:\nlist[object]: a deserialized list.", "source": "juraj-google-style"} {"code": "def sentencecase(string):\n \n joiner = ' '\n string = re.sub(r\"[\\-_\\.\\s]\", joiner, str(string))\n if not string:\n return string\n return capitalcase(trimcase(\n re.sub(r\"[A-Z]\", lambda matched: joiner +\n lowercase(matched.group(0)), string)\n ))", "docstring": "Convert string into sentence case.\nFirst letter capped and each punctuations are joined with space.\n\nArgs:\nstring: String to convert.\n\nReturns:\nstring: Sentence cased string.", "source": "juraj-google-style"} {"code": "def frequency_to_probability(frequency_map, decorator=(lambda f: f)):\n total = sum(frequency_map.values())\n return {k: decorator((v / total)) for (k, v) in frequency_map.items()}", "docstring": "Transform a ``frequency_map`` into a map of probability using the sum of all frequencies as the total.\n\nExample:\n>>> frequency_to_probability({'a': 2, 'b': 2})\n{'a': 0.5, 'b': 0.5}\n\nArgs:\nfrequency_map (dict): The dictionary to transform\ndecorator (function): A function to manipulate the probability\n\nReturns:\nDictionary of ngrams to probability", "source": "codesearchnet"} {"code": "def add(self, handler):\n \n self._handlers.append(handler)\n\n \n static_paths = set(h.static_path() for h in self.handlers)\n static_paths.discard(None)\n if len(static_paths) > 1:\n raise RuntimeError(\"More than one static path requested for app: %r\" % list(static_paths))\n elif len(static_paths) == 1:\n self._static_path = static_paths.pop()\n else:\n self._static_path = None", "docstring": "Add a handler to the pipeline used to initialize new documents.\n\nArgs:\nhandler (Handler) : a handler for this Application to use to\nprocess Documents", "source": "juraj-google-style"} {"code": "def _load_checkpoints(self, checkpointDirs):\n memo_lookup_table = {}\n for checkpoint_dir in checkpointDirs:\n logger.info('Loading checkpoints from {}'.format(checkpoint_dir))\n checkpoint_file = os.path.join(checkpoint_dir, 'tasks.pkl')\n try:\n with open(checkpoint_file, 'rb') as f:\n while True:\n try:\n data = pickle.load(f)\n memo_fu = Future()\n if data['exception']:\n memo_fu.set_exception(data['exception'])\n else:\n memo_fu.set_result(data['result'])\n memo_lookup_table[data['hash']] = memo_fu\n except EOFError:\n break\n except FileNotFoundError:\n reason = 'Checkpoint file was not found: {}'.format(checkpoint_file)\n logger.error(reason)\n raise BadCheckpoint(reason)\n except Exception:\n reason = 'Failed to load checkpoint: {}'.format(checkpoint_file)\n logger.error(reason)\n raise BadCheckpoint(reason)\n logger.info('Completed loading checkpoint:{0} with {1} tasks'.format(checkpoint_file, len(memo_lookup_table.keys())))\n return memo_lookup_table", "docstring": "Load a checkpoint file into a lookup table.\n\nThe data being loaded from the pickle file mostly contains input\nattributes of the task: func, args, kwargs, env...\nTo simplify the check of whether the exact task has been completed\nin the checkpoint, we hash these input params and use it as the key\nfor the memoized lookup table.\n\nArgs:\n- checkpointDirs (list) : List of filepaths to checkpoints\nEg. ['runinfo/001', 'runinfo/002']\n\nReturns:\n- memoized_lookup_table (dict)", "source": "codesearchnet"} {"code": "def __init__(self, topic_path, add_uuids=None, expansion_service=None):\n super().__init__()\n self._source = _WriteExternal(topic_path=topic_path, add_uuids=add_uuids, expansion_service=expansion_service)", "docstring": "Initializes ``WriteToPubSubLite``.\n\nArgs:\ntopic_path: A Pub/Sub Lite Topic path.\nadd_uuids: Whether to add uuids to the 'x-goog-pubsublite-dataflow-uuid'\nuuid attribute. Defaults to False.", "source": "github-repos"} {"code": "def _get_required_param(self, param_name):\n value = self.request.get(param_name)\n if (not value):\n raise errors.NotEnoughArgumentsError((param_name + ' not specified'))\n return value", "docstring": "Get a required request parameter.\n\nArgs:\nparam_name: name of request parameter to fetch.\n\nReturns:\nparameter value\n\nRaises:\nerrors.NotEnoughArgumentsError: if parameter is not specified.", "source": "codesearchnet"} {"code": "def traverse_by(self, fixers, traversal):\n \n if not fixers:\n return\n for node in traversal:\n for fixer in fixers[node.type]:\n results = fixer.match(node)\n if results:\n new = fixer.transform(node, results)\n if new is not None:\n node.replace(new)\n node = new", "docstring": "Traverse an AST, applying a set of fixers to each node.\n\nThis is a helper method for refactor_tree().\n\nArgs:\nfixers: a list of fixer instances.\ntraversal: a generator that yields AST nodes.\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def is_slice_but_not_on_extension(element_definition: message.Message) -> bool:\n return is_slice_element(element_definition) and (not is_slice_on_extension(element_definition))", "docstring": "Returns `True` if the `element_definition` is a slice not on extension.\n\nArgs:\nelement_definition: The element definition that we are checking.", "source": "github-repos"} {"code": "def get_range(self, start, end):", "docstring": "Retrieve a given byte range [start, end) from this download.\n\nRange must be in this form:\n0 <= start < end: Fetch the bytes from start to end.\n\nArgs:\nstart: (int) Initial byte offset.\nend: (int) Final byte offset, exclusive.\n\nReturns:\n(string) A buffer containing the requested data.", "source": "github-repos"} {"code": "def dumps(self):\n old_stream = self.stream\n try:\n self.stream = six.StringIO()\n self.write_table()\n tabular_text = self.stream.getvalue()\n finally:\n self.stream = old_stream\n return tabular_text", "docstring": "Get rendered tabular text from the table data.\n\nOnly available for text format table writers.\n\nReturns:\nstr: Rendered tabular text.", "source": "codesearchnet"} {"code": "def __add__(self, other):\n \n sum_rel = DistributedReliability(self.thresholds, self.obs_threshold)\n sum_rel.frequencies = self.frequencies + other.frequencies\n return sum_rel", "docstring": "Add two DistributedReliability objects together and combine their values.\n\nArgs:\nother: a DistributedReliability object\n\nReturns:\nA DistributedReliability Object", "source": "juraj-google-style"} {"code": "def detect_framebuffer(self, glo=None) -> 'Framebuffer':\n res = Framebuffer.__new__(Framebuffer)\n (res.mglo, res._size, res._samples, res._glo) = self.mglo.detect_framebuffer(glo)\n res._color_attachments = None\n res._depth_attachment = None\n res.ctx = self\n res.extra = None\n return res", "docstring": "Detect framebuffer.\n\nArgs:\nglo (int): Framebuffer object.\n\nReturns:\n:py:class:`Framebuffer` object", "source": "codesearchnet"} {"code": "def convertDate(self, date, prefix='', weekday=False):\n dayString = self.convertDay(date, prefix=prefix, weekday=weekday)\n timeString = self.convertTime(date)\n return ((dayString + ' at ') + timeString)", "docstring": "Convert a datetime object representing into a human-ready\nstring that can be read, spoken aloud, etc. In effect, runs\nboth convertDay and convertTime on the input, merging the results.\n\nArgs:\ndate (datetime.date): A datetime object to be converted into text.\nprefix (str): An optional argument that prefixes the converted\nstring. For example, if prefix=\"in\", you'd receive \"in two\ndays\", rather than \"two days\", while the method would still\nreturn \"tomorrow\" (rather than \"in tomorrow\").\nweekday (bool): An optional argument that returns \"Monday, Oct. 1\"\nif True, rather than \"Oct. 1\".\n\nReturns:\nA string representation of the input day and time.", "source": "codesearchnet"} {"code": "def GetFileEntryByPathSpec(self, path_spec):\n \n volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec)\n\n \n \n if volume_index is None:\n location = getattr(path_spec, 'location', None)\n if location is None or location != self.LOCATION_ROOT:\n return None\n\n return lvm_file_entry.LVMFileEntry(\n self._resolver_context, self, path_spec, is_root=True,\n is_virtual=True)\n\n if (volume_index < 0 or\n volume_index >= self._vslvm_volume_group.number_of_logical_volumes):\n return None\n\n return lvm_file_entry.LVMFileEntry(self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nLVMFileEntry: a file entry or None if not available.", "source": "juraj-google-style"} {"code": "def _summary(tag, hparams_plugin_data):\n summary = tf.compat.v1.Summary()\n summary.value.add(tag=tag, metadata=metadata.create_summary_metadata(hparams_plugin_data))\n return summary", "docstring": "Returns a summary holding the given HParamsPluginData message.\n\nHelper function.\n\nArgs:\ntag: string. The tag to use.\nhparams_plugin_data: The HParamsPluginData message to use.", "source": "codesearchnet"} {"code": "def quadratic_2d(data):\n arg_data_max = np.argmax(data)\n (i, j) = np.unravel_index(arg_data_max, data.shape)\n z_ = data[((i - 1):(i + 2), (j - 1):(j + 2))]\n try:\n a = ((((((((((- z_[(0, 0)]) + (2 * z_[(0, 1)])) - z_[(0, 2)]) + (2 * z_[(1, 0)])) + (5 * z_[(1, 1)])) + (2 * z_[(1, 2)])) - z_[(2, 0)]) + (2 * z_[(2, 1)])) - z_[(2, 2)]) / 9)\n b = (((((((- z_[(0, 0)]) - z_[(0, 1)]) - z_[(0, 2)]) + z_[(2, 0)]) + z_[(2, 1)]) + z_[(2, 2)]) / 6)\n c = (((((((- z_[(0, 0)]) + z_[(0, 2)]) - z_[(1, 0)]) + z_[(1, 2)]) - z_[(2, 0)]) + z_[(2, 2)]) / 6)\n d = (((((((((z_[(0, 0)] + z_[(0, 1)]) + z_[(0, 2)]) - (z_[(1, 0)] * 2)) - (z_[(1, 1)] * 2)) - (z_[(1, 2)] * 2)) + z_[(2, 0)]) + z_[(2, 1)]) + z_[(2, 2)]) / 6)\n e = ((((z_[(0, 0)] - z_[(0, 2)]) - z_[(2, 0)]) + z_[(2, 2)]) * 0.25)\n f = (((((((((z_[(0, 0)] - (2 * z_[(0, 1)])) + z_[(0, 2)]) + z_[(1, 0)]) - (2 * z_[(1, 1)])) + z_[(1, 2)]) + z_[(2, 0)]) - (2 * z_[(2, 1)])) + z_[(2, 2)]) / 6)\n except IndexError:\n return (i, j)\n det = (((4 * d) * f) - (e ** 2))\n xm = ((- (((2 * f) * b) - (c * e))) / det)\n ym = ((- (((2 * d) * c) - (b * e))) / det)\n return ((i + xm), (j + ym))", "docstring": "Compute the quadratic estimate of the centroid in a 2d-array.\n\nArgs:\ndata (2darray): two dimensional data array\n\nReturns\ncenter (tuple): centroid estimate on the row and column directions,\nrespectively", "source": "codesearchnet"} {"code": "def yaml_dump(data, stream=None):\n \n \n return yaml.dump(\n data,\n stream=stream,\n Dumper=Dumper,\n default_flow_style=False\n )", "docstring": "Dump data to a YAML string/file.\n\nArgs:\ndata (YamlData):\nThe data to serialize as YAML.\nstream (TextIO):\nThe file-like object to save to. If given, this function will write\nthe resulting YAML to that stream.\n\nReturns:\nstr: The YAML string.", "source": "juraj-google-style"} {"code": "def get(self, rid, data_callback=None, raise_on_error=True):\n cached_data = None\n ds_data = self.ds.get(rid, raise_on_error=False)\n if (ds_data is not None):\n expired = True\n if (ds_data.get('found') is True):\n if (self.ttl < int(ds_data.get('_source', {}).get('cache-date', 0))):\n cached_data = ds_data.get('_source', {}).get('cache-data')\n expired = False\n self.tcex.log.debug('Using cached data for ({}).'.format(rid))\n else:\n self.tcex.log.debug('Cached data is expired for ({}).'.format(rid))\n if (expired or (ds_data.get('found') is False)):\n if callable(data_callback):\n cached_data = data_callback(rid)\n self.tcex.log.debug('Using callback data for ({}).'.format(rid))\n if cached_data:\n self.update(rid, cached_data, raise_on_error)\n return cached_data", "docstring": "Get cached data from the data store.\n\nArgs:\nrid (str): The record identifier.\ndata_callback (callable): A method that will return the data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "codesearchnet"} {"code": "def vr60baro(msg):\n d = hex2bin(data(msg))\n if (d[34] == '0'):\n return None\n sign = int(d[35])\n value = bin2int(d[36:45])\n if ((value == 0) or (value == 511)):\n return 0\n value = ((value - 512) if sign else value)\n roc = (value * 32)\n return roc", "docstring": "Vertical rate from barometric measurement, this value may be very noisy.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message (BDS60) string\n\nReturns:\nint: vertical rate in feet/minutes", "source": "codesearchnet"} {"code": "def __init__(self, storage_writer):\n \n super(StorageFileMergeReader, self).__init__(storage_writer)\n self._serializer = json_serializer.JSONAttributeContainerSerializer\n self._serializers_profiler = None", "docstring": "Initializes a storage merge reader.\n\nArgs:\nstorage_writer (StorageWriter): storage writer.", "source": "juraj-google-style"} {"code": "def encode(cls, command):\n \n\n args = []\n for arg in command.args:\n if not isinstance(arg, str):\n arg = str(arg)\n\n if \",\" in arg or arg.startswith(\" \") or arg.endswith(\" \") or arg.startswith(\"hex:\"):\n arg = \"hex:{}\".format(hexlify(arg.encode('utf-8')).decode('utf-8'))\n\n args.append(arg)\n\n argstr = \"\"\n\n if len(args) > 0:\n argstr = \" {\" + \",\".join(args) + \"}\"\n\n return command.name + argstr", "docstring": "Encode a command as an unambiguous string.\n\nArgs:\ncommand (Command): The command to encode.\n\nReturns:\nstr: The encoded command", "source": "juraj-google-style"} {"code": "def __init__(self, table_id=Meter.OFPM_ALL):\n \n super().__init__(InstructionType.OFPIT_GOTO_TABLE)\n self.table_id = table_id", "docstring": "Create a InstructionGotoTable with the optional parameters below.\n\nArgs:\nlength (int): Length of this struct in bytes.\ntable_id (int): set next table in the lookup pipeline.", "source": "juraj-google-style"} {"code": "def validate_allowed_values(allowed_values, value):\n if ((not allowed_values) or isinstance(value, CFNParameter)):\n return True\n return (value in allowed_values)", "docstring": "Support a variable defining which values it allows.\n\nArgs:\nallowed_values (Optional[list]): A list of allowed values from the\nvariable definition\nvalue (obj): The object representing the value provided for the\nvariable\n\nReturns:\nbool: Boolean for whether or not the value is valid.", "source": "codesearchnet"} {"code": "def to_value_list(original_strings, corenlp_values=None):\n \n assert isinstance(original_strings, (list, tuple, set))\n if corenlp_values is not None:\n assert isinstance(corenlp_values, (list, tuple, set))\n assert len(original_strings) == len(corenlp_values)\n return list(set(to_value(x, y) for (x, y)\n in zip(original_strings, corenlp_values)))\n else:\n return list(set(to_value(x) for x in original_strings))", "docstring": "Convert a list of strings to a list of Values\n\nArgs:\noriginal_strings (list[basestring])\ncorenlp_values (list[basestring or None])\nReturns:\nlist[Value]", "source": "juraj-google-style"} {"code": "def is_stateful(self) -> bool:\n return False", "docstring": "Indicates whether this ThresholdFn is stateful.\n\nReturns:\nbool: Always False for `FixedThreshold` as it is stateless.", "source": "github-repos"} {"code": "def create(self, emails, displayName=None, firstName=None, lastName=None, avatar=None, orgId=None, roles=None, licenses=None, **request_parameters):\n check_type(emails, list, may_be_none=False)\n check_type(displayName, basestring)\n check_type(firstName, basestring)\n check_type(lastName, basestring)\n check_type(avatar, basestring)\n check_type(orgId, basestring)\n check_type(roles, list)\n check_type(licenses, list)\n post_data = dict_from_items_with_values(request_parameters, emails=emails, displayName=displayName, firstName=firstName, lastName=lastName, avatar=avatar, orgId=orgId, roles=roles, licenses=licenses)\n json_data = self._session.post(API_ENDPOINT, json=post_data)\n return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Create a new user account for a given organization\n\nOnly an admin can create a new user account.\n\nArgs:\nemails(`list`): Email address(es) of the person (list of strings).\ndisplayName(basestring): Full name of the person.\nfirstName(basestring): First name of the person.\nlastName(basestring): Last name of the person.\navatar(basestring): URL to the person's avatar in PNG format.\norgId(basestring): ID of the organization to which this\nperson belongs.\nroles(`list`): Roles of the person (list of strings containing\nthe role IDs to be assigned to the person).\nlicenses(`list`): Licenses allocated to the person (list of\nstrings - containing the license IDs to be allocated to the\nperson).\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nPerson: A Person object with the details of the created person.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"} {"code": "def AddSerializedFile(self, serialized_file_desc_proto):\n \n\n \n from google.protobuf import descriptor_pb2\n file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(\n serialized_file_desc_proto)\n self.Add(file_desc_proto)", "docstring": "Adds the FileDescriptorProto and its types to this pool.\n\nArgs:\nserialized_file_desc_proto: A bytes string, serialization of the\nFileDescriptorProto to add.", "source": "juraj-google-style"} {"code": "def random_square_mask(shape, fraction):\n \n\n mask = np.ones(shape)\n\n patch_area = shape[0]*shape[1]*fraction\n patch_dim = np.int(math.floor(math.sqrt(patch_area)))\n if patch_area == 0 or patch_dim == 0:\n return mask\n\n x = np.random.randint(shape[0] - patch_dim)\n y = np.random.randint(shape[1] - patch_dim)\n\n mask[x:(x + patch_dim), y:(y + patch_dim), :] = 0\n\n return mask", "docstring": "Create a numpy array with specified shape and masked fraction.\n\nArgs:\nshape: tuple, shape of the mask to create.\nfraction: float, fraction of the mask area to populate with `mask_scalar`.\n\nReturns:\nnumpy.array: A numpy array storing the mask.", "source": "juraj-google-style"} {"code": "def save_data(self, filename):\n \n with zopen(filename, \"wt\") as f:\n json.dump(list(self._data), f, cls=MontyEncoder)", "docstring": "Save the assimilated data to a file.\n\nArgs:\nfilename (str): filename to save the assimilated data to. Note\nthat if the filename ends with gz or bz2, the relevant gzip\nor bz2 compression will be applied.", "source": "juraj-google-style"} {"code": "class AllVote(LabelAggregation):\n\n def __init__(self, **kwargs):\n\n def inner(predictions: Iterable[int]) -> int:\n return self._outlier_label if all(map(lambda p: p == self._outlier_label, predictions)) else self._normal_label\n super().__init__(agg_func=inner, **kwargs)", "docstring": "Aggregates anomaly labels using an \"all vote\" (AND) scheme.\n\nThis `AggregationFn` implements an \"all vote\" strategy. It aggregates\nanomaly labels such that the result is considered an outlier only if all\ninput `AnomalyPrediction` objects are labeled as outliers.\n\nExample:\nIf input labels are [outlier, outlier, outlier], and outlier_label=1,\nthen the aggregated label will be outlier (1).\nIf input labels are [outlier, normal, outlier], and outlier_label=1,\nthen the aggregated label will be normal (0).\n\nArgs:\nnormal_label (int): The integer label for normal predictions. Defaults to 0.\noutlier_label (int): The integer label for outlier predictions. Defaults to\n1.\n**kwargs: Additional keyword arguments to pass to the base\n`LabelAggregation` class.", "source": "github-repos"} {"code": "def _find_and_cache_best_function(self, dispatch_type):\n result = self._dispatch_table.get(dispatch_type)\n if result:\n return result\n with self._write_lock:\n try:\n dispatch_mro = dispatch_type.mro()\n except TypeError:\n dispatch_mro = ()\n best_match = None\n result_type = None\n for (candidate_type, candidate_func) in self.implementations:\n if (not issubclass(dispatch_type, candidate_type)):\n continue\n try:\n match = dispatch_mro.index(candidate_type)\n except ValueError:\n match = None\n if (best_match is None):\n if (result and (match is None)):\n if self._preferred(candidate_type, over=result_type):\n result = candidate_func\n result_type = candidate_type\n elif self._preferred(result_type, over=candidate_type):\n pass\n else:\n raise TypeError(('Two candidate implementations found for multimethod function %s (dispatch type %s) and neither is preferred.' % (self.func_name, dispatch_type)))\n else:\n result = candidate_func\n result_type = candidate_type\n best_match = match\n if ((match or 0) < (best_match or 0)):\n result = candidate_func\n result_type = candidate_type\n best_match = match\n self._dispatch_table[dispatch_type] = result\n return result", "docstring": "Finds the best implementation of this function given a type.\n\nThis function caches the result, and uses locking for thread safety.\n\nReturns:\nImplementing function, in below order of preference:\n1. Explicitly registered implementations (through\nmultimethod.implement) for types that 'dispatch_type' either is\nor inherits from directly.\n2. Explicitly registered implementations accepting an abstract type\n(interface) in which dispatch_type participates (through\nabstract_type.register() or the convenience methods).\n3. Default behavior of the multimethod function. This will usually\nraise a NotImplementedError, by convention.\n\nRaises:\nTypeError: If two implementing functions are registered for\ndifferent abstract types, and 'dispatch_type' participates in\nboth, and no order of preference was specified using\nprefer_type.", "source": "codesearchnet"} {"code": "def _process_skipability(self, feed_item, item):\n if feed_item.get(FieldMap.PLACEMENT_SKIPPABLE, False):\n if not 'videoSettings' in item:\n item['videoSettings'] = {}\n item['videoSettings']['skippableSettings'] = {'skippable': feed_item.get(FieldMap.PLACEMENT_SKIPPABLE, False), 'skipOffset': {}, 'progressOffset': {}}\n skippable_settings = item['videoSettings']['skippableSettings']\n if feed_item.get(FieldMap.PLACEMENT_SKIP_OFFSET_SECONDS, None):\n skippable_settings['skipOffset']['offsetSeconds'] = feed_item.get(FieldMap.PLACEMENT_SKIP_OFFSET_SECONDS, None)\n if feed_item.get(FieldMap.PLACEMENT_SKIP_OFFSET_PERCENTAGE, None):\n skippable_settings['skipOffset']['offsetPercentage'] = feed_item.get(FieldMap.PLACEMENT_SKIP_OFFSET_PERCENTAGE, None)\n if feed_item.get(FieldMap.PLACEMENT_PROGRESS_OFFSET_SECONDS, None):\n skippable_settings['progressOffset']['offsetSeconds'] = feed_item.get(FieldMap.PLACEMENT_SKIP_OFFSET_SECONDS, None)\n if feed_item.get(FieldMap.PLACEMENT_PROGRESS_OFFSET_PERCENTAGE, None):\n skippable_settings['progressOffset']['offsetPercentage'] = feed_item.get(FieldMap.PLACEMENT_SKIP_OFFSET_PERCENTAGE, None)\n elif 'skippableSettings' in item and 'videoSettings' in item:\n del item['videoSettings']['skippableSettings']", "docstring": "Process skipability settings.\n\nArgs:\nfeed_item: A feed item representing a placement from the bulkdozer feed;\nitem: A campaign manager placement object to be updated with the\nskipability settings defined in the feed item", "source": "github-repos"} {"code": "def _append_defects(self, part, part_content_type):\n \n\n part_defects = {}\n\n for e in part.defects:\n defects = \"{}: {}\".format(e.__class__.__name__, e.__doc__)\n self._defects_categories.add(e.__class__.__name__)\n part_defects.setdefault(part_content_type, []).append(defects)\n log.debug(\"Added defect {!r}\".format(defects))\n\n \n if part_defects:\n self._has_defects = True\n\n \n self._defects.append(part_defects)", "docstring": "Add new defects and defects categories to object attributes.\n\nThe defects are a list of all the problems found\nwhen parsing this message.\n\nArgs:\npart (string): mail part\npart_content_type (string): content type of part", "source": "juraj-google-style"} {"code": "def triggered(self):\n trigs = [x[1].triggered(x[0]) for x in self.inputs]\n if (self.trigger_combiner == self.OrTriggerCombiner):\n return (True in trigs)\n return (False not in trigs)", "docstring": "Test if we should trigger our operation.\n\nWe test the trigger condition on each of our inputs and then\ncombine those triggers using our configured trigger combiner\nto get an overall result for whether this node is triggered.\n\nReturns:\nbool: True if we should trigger and False otherwise", "source": "codesearchnet"} {"code": "def _wrap_result(self, response):\n \n if isinstance(response, int):\n response = self._wrap_response(response)\n\n return HandlerResult(\n status=HandlerStatus.RETURN,\n message_out=self._response_proto(**response),\n message_type=self._response_type)", "docstring": "Wraps child's response in a HandlerResult to be sent back to client.\n\nArgs:\nresponse (enum or dict): Either an integer status enum, or a dict\nof attributes to be added to the protobuf response.", "source": "juraj-google-style"} {"code": "def create_explicit(bounds):\n \n safe_bounds = sorted(float(x) for x in bounds)\n if len(safe_bounds) != len(set(safe_bounds)):\n raise ValueError(u'Detected two elements of bounds that are the same')\n return sc_messages.Distribution(\n bucketCounts=[0] * (len(safe_bounds) + 1),\n explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))", "docstring": "Creates a new instance of distribution with explicit buckets.\n\nbounds is an iterable of ordered floats that define the explicit buckets\n\nArgs:\nbounds (iterable[float]): initializes the bounds\n\nReturn:\n:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`\n\nRaises:\nValueError: if the args are invalid for creating an instance", "source": "juraj-google-style"} {"code": "def _VerifyValues(self, input_sizes=None, filter_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NDHWC', data_format_dst='NDHWC', expected=None, op_name='Conv3D'):\n total_size_1 = np.prod(input_sizes)\n total_size_2 = np.prod(filter_sizes)\n x1 = np.reshape([f * 1.0 / total_size_1 for f in range(1, total_size_1 + 1)], input_sizes)\n x2 = np.reshape([f * 1.0 / total_size_2 for f in range(1, total_size_2 + 1)], filter_sizes)\n strides = [1] + strides + [1]\n if dilations is None:\n dilations = [1, 1, 1]\n dilations = [1] + dilations + [1]\n expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src, data_format_dst)\n x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst)\n strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst)\n dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst)\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.bfloat16, shape=input_sizes)\n t2 = array_ops.placeholder(dtypes.bfloat16, shape=filter_sizes)\n with self.test_scope():\n if op_name == 'Conv':\n conv_format = 'CHANNELS_LAST' if data_format_dst == 'NDHWC' else 'CHANNELS_FIRST'\n out = gen_nn_ops.conv(t1, t2, strides=strides, padding=padding, data_format=conv_format, dilations=dilations)\n elif op_name == 'Conv3D':\n out = nn_ops.conv3d(t1, t2, strides=strides, padding=padding, data_format=data_format_dst, dilations=dilations)\n else:\n raise ValueError('Invalid op name: %s' % op_name)\n value = sess.run(out, {t1: x1, t2: x2})\n self.assertAllCloseAccordingToType(expected, value)", "docstring": "Tests that tf.nn.conv3d produces the expected value.\n\nArgs:\ninput_sizes: Input tensor dimensions in [batch, input_rows, input_cols,\ninput_depth].\nfilter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\ninput_depth, output_depth].\nstrides: Strides.\ndilations: RHS dilations.\npadding: Padding type.\ndata_format_src: Data format input is in.\ndata_format_dst: Data format verification will run and input is converted\nto.\nexpected: Expected output.\nop_name: Name of operation to test (Conv/Conv2D)", "source": "github-repos"} {"code": "def auditlog(*, event, actor, data, level=logging.INFO):\n \n try:\n entry = AuditLog()\n entry.event = event\n entry.actor = actor\n entry.data = data\n\n db.session.add(entry)\n db.session.commit()\n\n _AUDIT_LOGGER.log(\n logging.getLevelName(level) if type(level) == str else level,\n {\n 'event': event,\n 'actor': actor,\n 'data': data,\n }\n )\n\n except Exception:\n logging.getLogger(__name__).exception('Failed adding audit log event')\n db.session.rollback()", "docstring": "Generate and insert a new event\n\nArgs:\nevent (`str`): Action performed\nactor (`str`): Actor (user or subsystem) triggering the event\ndata (`dict`): Any extra data necessary for describing the event\nlevel (`str` or `int`): Log level for the message. Uses standard python logging level names / numbers\n\nReturns:\n`None`", "source": "juraj-google-style"} {"code": "def ParseMessageRow(self, parser_mediator, query, row, **unused_kwargs):\n \n query_hash = hash(query)\n\n event_data = KikIOSMessageEventData()\n event_data.body = self._GetRowValue(query_hash, row, 'ZBODY')\n event_data.displayname = self._GetRowValue(query_hash, row, 'ZDISPLAYNAME')\n event_data.message_status = self._GetRowValue(query_hash, row, 'ZSTATE')\n event_data.message_type = self._GetRowValue(query_hash, row, 'ZTYPE')\n event_data.offset = self._GetRowValue(query_hash, row, 'id')\n event_data.query = query\n event_data.username = self._GetRowValue(query_hash, row, 'ZUSERNAME')\n\n timestamp = self._GetRowValue(query_hash, row, 'ZRECEIVEDTIMESTAMP')\n \n timestamp = int(timestamp)\n date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a message row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "juraj-google-style"} {"code": "def convert(model_path: str, out_file: str):\n print('Converting', model_path, 'to', out_file, '...')\n import tensorflow as tf\n from precise.model import load_precise_model\n from keras import backend as K\n (out_dir, filename) = split(out_file)\n out_dir = (out_dir or '.')\n os.makedirs(out_dir, exist_ok=True)\n K.set_learning_phase(0)\n model = load_precise_model(model_path)\n out_name = 'net_output'\n tf.identity(model.output, name=out_name)\n print('Output node name:', out_name)\n print('Output folder:', out_dir)\n sess = K.get_session()\n tf.train.write_graph(sess.graph.as_graph_def(), out_dir, (filename + 'txt'), as_text=True)\n print('Saved readable graph to:', (filename + 'txt'))\n from tensorflow.python.framework import graph_util\n from tensorflow.python.framework import graph_io\n cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name])\n graph_io.write_graph(cgraph, out_dir, filename, as_text=False)\n if isfile((model_path + '.params')):\n copyfile((model_path + '.params'), (out_file + '.params'))\n print('Saved graph to:', filename)\n del sess", "docstring": "Converts an HD5F file from Keras to a .pb for use with TensorFlow\n\nArgs:\nmodel_path: location of Keras model\nout_file: location to write protobuf", "source": "codesearchnet"} {"code": "def reminder_date(self, reminder_date):\n \n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n reminder_date = self._utils.format_datetime(reminder_date, date_format='%Y-%m-%dT%H:%M:%SZ')\n self._data['reminderDate'] = reminder_date\n request = {'reminderDate': reminder_date}\n return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Sets the task reminder_date\nArgs:\nreminder_date: Converted to %Y-%m-%dT%H:%M:%SZ date format", "source": "juraj-google-style"} {"code": "def get_new_doctest_files(repo, base_commit, branching_commit) -> List[str]:\n for diff_obj in branching_commit.diff(base_commit):\n if diff_obj.a_path != 'utils/not_doctested.txt':\n continue\n folder = Path(repo.working_dir)\n with checkout_commit(repo, branching_commit):\n with open(folder / 'utils/not_doctested.txt', 'r', encoding='utf-8') as f:\n old_content = f.read()\n with open(folder / 'utils/not_doctested.txt', 'r', encoding='utf-8') as f:\n new_content = f.read()\n removed_content = {x.split(' ')[0] for x in old_content.split('\\n')} - {x.split(' ')[0] for x in new_content.split('\\n')}\n return sorted(removed_content)\n return []", "docstring": "Get the list of files that were removed from \"utils/not_doctested.txt\", between `base_commit` and\n`branching_commit`.\n\nReturns:\n`List[str]`: List of files that were removed from \"utils/not_doctested.txt\".", "source": "github-repos"} {"code": "def get_point(\n self, x: float = 0, y: float = 0, z: float = 0, w: float = 0\n ) -> float:\n \n return float(lib.NoiseGetSample(self._tdl_noise_c, (x, y, z, w)))", "docstring": "Return the noise value at the (x, y, z, w) point.\n\nArgs:\nx (float): The position on the 1st axis.\ny (float): The position on the 2nd axis.\nz (float): The position on the 3rd axis.\nw (float): The position on the 4th axis.", "source": "juraj-google-style"} {"code": "def decode_list(cls, obj, element_type):\n \n \n if not isinstance(obj, list):\n raise Exception(\"expected a python list\")\n\n return list(map(lambda x: cls.do_decode(x, element_type), obj))", "docstring": "Decodes json into a list, handling conversion of the elements.\n\nArgs:\nobj: the json object to decode\nelement_type: a class object which is the conjure type of\nthe elements in this list.\nReturns:\nA python list where the elements are instances of type\nelement_type.", "source": "juraj-google-style"} {"code": "def ParseEnum(field, value):\n \n enum_descriptor = field.enum_type\n try:\n number = int(value, 0)\n except ValueError:\n \n enum_value = enum_descriptor.values_by_name.get(value, None)\n if enum_value is None:\n raise ValueError('Enum type \"%s\" has no value named %s.' %\n (enum_descriptor.full_name, value))\n else:\n \n enum_value = enum_descriptor.values_by_number.get(number, None)\n if enum_value is None:\n raise ValueError('Enum type \"%s\" has no value with number %d.' %\n (enum_descriptor.full_name, number))\n return enum_value.number", "docstring": "Parse an enum value.\n\nThe value can be specified by a number (the enum value), or by\na string literal (the enum name).\n\nArgs:\nfield: Enum field descriptor.\nvalue: String value.\n\nReturns:\nEnum value number.\n\nRaises:\nValueError: If the enum value could not be parsed.", "source": "juraj-google-style"} {"code": "def _write_init_models(self, filenames):\n self.write(destination=self.output_directory, filename='__init__.py', template_name='__init_model__.py.tpl', filenames=self._prepare_filenames(filenames), class_prefix=self._class_prefix, product_accronym=self._product_accronym, header=self.header_content)", "docstring": "Write init file\n\nArgs:\nfilenames (dict): dict of filename and classes", "source": "codesearchnet"} {"code": "def run_inside_wrap_function_in_eager_mode(graph_function):\n\n def wrap_and_execute(self):\n if context.executing_eagerly():\n wrapped = wrap_function.wrap_function(graph_function, [self])\n wrapped()\n else:\n graph_function(self)\n return wrap_and_execute", "docstring": "Decorator to execute the same graph code in eager and graph modes.\n\nIn graph mode, we just execute the graph_function passed as argument. In eager\nmode, we wrap the function using wrap_function and then execute the wrapped\nresult.\n\nArgs:\ngraph_function: python function containing graph code to be wrapped\n\nReturns:\ndecorated function", "source": "github-repos"} {"code": "def _handle_captcha(captcha_data, message=''):\n from tempfile import NamedTemporaryFile\n tmpf = NamedTemporaryFile(suffix='.png')\n tmpf.write(captcha_data)\n tmpf.flush()\n captcha_text = input(('Please take a look at the captcha image \"%s\" and provide the code:' % tmpf.name))\n tmpf.close()\n return captcha_text", "docstring": "Called when a captcha must be solved\nWrites the image to a temporary file and asks the user to enter the code.\n\nArgs:\ncaptcha_data: Bytestring of the PNG captcha image.\nmessage: Optional. A message from Steam service.\n\nReturns:\nA string containing the solved captcha code.", "source": "codesearchnet"} {"code": "def get_filename_by_suffixes(dir_src, suffixes):\n list_files = os.listdir(dir_src)\n re_files = list()\n if is_string(suffixes):\n suffixes = [suffixes]\n if (not isinstance(suffixes, list)):\n return None\n for (i, suf) in enumerate(suffixes):\n if ((len(suf) >= 1) and (suf[0] != '.')):\n suffixes[i] = ('.' + suf)\n for f in list_files:\n (name, ext) = os.path.splitext(f)\n if StringClass.string_in_list(ext, suffixes):\n re_files.append(f)\n return re_files", "docstring": "get file names with the given suffixes in the given directory\n\nArgs:\ndir_src: directory path\nsuffixes: wanted suffixes list, the suffix in suffixes can with or without '.'\n\nReturns:\nfile names with the given suffixes as list", "source": "codesearchnet"} {"code": "def change_kernel(self, kernel, return_dict=True):\n \n if type(kernel) != Kernel:\n raise BadKernelObject(\"Use Kernel object\")\n\n return self._perform_action(\n {'type': 'change_kernel', 'kernel': kernel.id},\n return_dict\n )", "docstring": "Change the kernel to a new one\n\nArgs:\nkernel : instance of digitalocean.Kernel.Kernel\n\nOptional Args:\nreturn_dict (bool): Return a dict when True (default),\notherwise return an Action.\n\nReturns dict or Action", "source": "juraj-google-style"} {"code": "def init_cache(self, batch_size, max_length):\n input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n attention_mask = jnp.ones_like(input_ids, dtype='i4')\n position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)\n init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True)\n return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.", "source": "github-repos"} {"code": "def __init__(self, actions=None):\n \n super().__init__(InstructionType.OFPIT_APPLY_ACTIONS)\n self.actions = actions if actions else []", "docstring": "Create a InstructionApplyAction with the optional parameters below.\n\nArgs:\nactions (:class:`~.actions.ListOfActions`):\nActions associated with OFPIT_APPLY_ACTIONS.", "source": "juraj-google-style"} {"code": "def _extract_files(archive_path):\n extract_dir = tempfile.mkdtemp()\n\n def _extract_files_cleanup():\n shutil.rmtree(extract_dir, ignore_errors=True)\n atexit.register(_extract_files_cleanup)\n _log('\n zip_file = zipfile.ZipFile(archive_path, mode='r')\n zip_file.extractall(extract_dir)\n zip_file.close()\n return extract_dir", "docstring": "Extract the contents of this .par file to disk.\n\nThis creates a temporary directory, and registers an atexit\nhandler to clean that directory on program exit. Extraction and\ncleanup will potentially use significant time and disk space.\n\nReturns:\nDirectory where contents were extracted to.", "source": "github-repos"} {"code": "def basis(sample_paths: types.RealTensor, time_index: types.IntTensor) -> types.RealTensor:\n sample_paths = tf.convert_to_tensor(sample_paths, name='sample_paths')\n if sample_paths.shape.rank == 3:\n sample_paths = tf.expand_dims(sample_paths, axis=0)\n shape = tf.shape(sample_paths)\n num_samples = shape[1]\n batch_size = shape[0]\n dim = sample_paths.shape[-1]\n slice_samples = tf.slice(sample_paths, [0, 0, time_index, 0], [batch_size, num_samples, 1, dim])\n samples_centered = slice_samples - tf.math.reduce_mean(slice_samples, axis=1, keepdims=True)\n grid = tf.range(degree + 1, dtype=samples_centered.dtype)\n grid = tf.meshgrid(*dim * [grid])\n grid = tf.reshape(tf.stack(grid, -1), [-1, dim])\n basis_expansion = tf.reduce_prod(samples_centered ** grid, axis=-1)\n return tf.transpose(basis_expansion, [0, 2, 1])", "docstring": "Computes polynomial basis expansion at the given sample points.\n\nArgs:\nsample_paths: A `Tensor` of either `flaot32` or `float64` dtype and of\neither shape `[num_samples, num_times, dim]` or\n`[batch_size, num_samples, num_times, dim]`.\ntime_index: An integer scalar `Tensor` that corresponds to the time\ncoordinate at which the basis function is computed.\n\nReturns:\nA `Tensor`s of shape `[batch_size, (degree + 1)**dim, num_samples]`.", "source": "github-repos"} {"code": "def __init__(self, solution_size, population_size=20):\n \n super(StandardOptimizer, self).__init__()\n\n \n self._solution_size = solution_size\n self._population_size = population_size\n\n \n self._hyperparameters['_population_size'] = {\n 'type': 'int',\n 'min': 2,\n 'max': 1026\n }", "docstring": "Initialize general optimization attributes and bookkeeping\n\nArgs:\nsolution_size: The number of values in each solution.\npopulation_size: The number of solutions in every generation.", "source": "juraj-google-style"} {"code": "def _EuclidianDistances(self, slist):\n e_dists2 = [transitfeed.ApproximateDistanceBetweenStops(stop, tail) for (stop, tail) in itertools.izip(slist, slist[1:])]\n return e_dists2", "docstring": "Calculate euclidian distances between stops.\n\nUses the stoplists long/lats to approximate distances\nbetween stations and build a list with y-coordinates for the\nhorizontal lines in the graph.\n\nArgs:\n# Class Stop is defined in transitfeed.py\nstoplist: [Stop, Stop, ...]\n\nReturns:\n# One integer for each pair of stations\n# indicating the approximate distance\n[0,33,140, ... ,X]", "source": "codesearchnet"} {"code": "def push(self, filename, data):\n \n self._queue.put(Chunk(filename, data))", "docstring": "Push a chunk of a file to the streaming endpoint.\n\nArgs:\nfilename: Name of file that this is a chunk of.\nchunk_id: TODO: change to 'offset'\nchunk: File data.", "source": "juraj-google-style"} {"code": "def to_timestamp(dt, timestamp):\n if dt.tzinfo:\n raise TypeError('Cannot store a timezone aware datetime. Convert to UTC and store the naive datetime.')\n timestamp.seconds = calendar.timegm(dt.timetuple())\n timestamp.nanos = (dt.microsecond * _NANOS_PER_MICRO)", "docstring": "Convert datetime to google.protobuf.Timestamp.\n\nArgs:\ndt: a timezone naive datetime.\ntimestamp: a google.protobuf.Timestamp to populate.\n\nRaises:\nTypeError: if a timezone aware datetime was provided.", "source": "codesearchnet"} {"code": "def AddFile(self, fd, external=True):\n files_for_write = []\n for sub_store in self.GetChildrenByPriority(allow_external=external):\n new_file = sub_store.AddFile(fd)\n if new_file:\n files_for_write.append(new_file)\n fd.Seek(0)\n while files_for_write:\n data = fd.Read(self.CHUNK_SIZE)\n if (not data):\n break\n for child in files_for_write:\n child.Write(data)\n for child in files_for_write:\n child.Close()", "docstring": "Create a new file in the file store.\n\nWe delegate the actual file addition to our contained\nimplementations. Implementations can either implement the AddFile() method,\nreturning a file like object which will be written on, or directly support\nthe AddBlobToStore() method which can copy the VFSBlobImage efficiently.\n\nArgs:\nfd: An AFF4 object open for read/write.\nexternal: If true, attempt to add files to stores defined as EXTERNAL.", "source": "codesearchnet"} {"code": "def sort(self, cmp=None, key=None, reverse=False):\n\n def _DefaultKey(value):\n 'Default key func is to create a list of all fields.'\n result = []\n for key in self.header:\n try:\n result.append(float(value[key]))\n except ValueError:\n result.append(value[key])\n return result\n key = (key or _DefaultKey)\n new_table = self._table[1:]\n if (cmp is not None):\n key = cmp_to_key(cmp)\n new_table.sort(key=key, reverse=reverse)\n self._table = [self.header]\n self._table.extend(new_table)\n for (index, row) in enumerate(self._table):\n row.row = index", "docstring": "Sorts rows in the texttable.\n\nArgs:\ncmp: func, non default sort algorithm to use.\nkey: func, applied to each element before sorting.\nreverse: bool, reverse order of sort.", "source": "codesearchnet"} {"code": "def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n \n super(Certificate, self).read(istream, kmip_version=kmip_version)\n tstream = BytearrayStream(istream.read(self.length))\n\n self.certificate_type = CertificateType()\n self.certificate_value = CertificateValue()\n\n self.certificate_type.read(tstream, kmip_version=kmip_version)\n self.certificate_value.read(tstream, kmip_version=kmip_version)\n\n self.is_oversized(tstream)", "docstring": "Read the data encoding the Certificate object and decode it into its\nconstituent parts.\n\nArgs:\nistream (Stream): A data stream containing encoded object data,\nsupporting a read method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"} {"code": "def _letter_map(word):\n lmap = {}\n for letter in word:\n try:\n lmap[letter] += 1\n except KeyError:\n lmap[letter] = 1\n return lmap", "docstring": "Creates a map of letter use in a word.\n\nArgs:\nword: a string to create a letter map from\n\nReturns:\na dictionary of {letter: integer count of letter in word}", "source": "codesearchnet"} {"code": "def __init__(self, options, log):\n self.options = options\n self.log = log\n self.compile_pattern()", "docstring": "Initializer. Subclass may override.\n\nArgs:\noptions: a dict containing the options passed to RefactoringTool\nthat could be used to customize the fixer through the command line.\nlog: a list to append warnings and other messages to.", "source": "github-repos"} {"code": "def get_phonopy_structure(pmg_structure):\n \n\n symbols = [site.specie.symbol for site in pmg_structure]\n\n return PhonopyAtoms(symbols=symbols, cell=pmg_structure.lattice.matrix,\n scaled_positions=pmg_structure.frac_coords)", "docstring": "Convert a pymatgen Structure object to a PhonopyAtoms object.\n\nArgs:\npmg_structure (pymatgen Structure): A Pymatgen structure object.", "source": "juraj-google-style"} {"code": "def retry_auth_check(exception):\n if isinstance(exception, apiclient.errors.HttpError):\n if (exception.resp.status in HTTP_AUTH_ERROR_CODES):\n _print_error('Retrying...')\n return True\n return False", "docstring": "Specific check for auth error codes.\n\nReturn True if we should retry.\n\nFalse otherwise.\nArgs:\nexception: An exception to test for transience.\n\nReturns:\nTrue if we should retry. False otherwise.", "source": "codesearchnet"} {"code": "def generate(self, cache_root):\n generator_cwd = os.path.join(cache_root, 'generated', self.vlnv.sanitized_name)\n generator_input_file = os.path.join(generator_cwd, (self.name + '_input.yml'))\n logger.info(('Generating ' + str(self.vlnv)))\n if (not os.path.exists(generator_cwd)):\n os.makedirs(generator_cwd)\n with open(generator_input_file, 'w') as f:\n f.write(yaml.dump(self.generator_input))\n args = [os.path.join(os.path.abspath(self.generator.root), self.generator.command), generator_input_file]\n if self.generator.interpreter:\n args[0:0] = [self.generator.interpreter]\n Launcher(args[0], args[1:], cwd=generator_cwd).run()\n cores = []\n logger.debug(('Looking for generated cores in ' + generator_cwd))\n for (root, dirs, files) in os.walk(generator_cwd):\n for f in files:\n if f.endswith('.core'):\n try:\n cores.append(Core(os.path.join(root, f)))\n except SyntaxError as e:\n w = ((('Failed to parse generated core file ' + f) + ': ') + e.msg)\n raise RuntimeError(w)\n logger.debug(('Found ' + ', '.join((str(c.name) for c in cores))))\n return cores", "docstring": "Run a parametrized generator\n\nArgs:\ncache_root (str): The directory where to store the generated cores\n\nReturns:\nlist: Cores created by the generator", "source": "codesearchnet"} {"code": "def _normalize_direction(heading: int) -> int:\n \n while heading > 359:\n heading = int(heading - 359)\n while heading < 0:\n heading = int(heading + 359)\n return heading", "docstring": "Make sure that 0 < heading < 360\n\nArgs:\nheading: base heading\n\nReturns: corrected heading", "source": "juraj-google-style"} {"code": "def compute_actor_handle_id(actor_handle_id, num_forks):\n \n assert isinstance(actor_handle_id, ActorHandleID)\n handle_id_hash = hashlib.sha1()\n handle_id_hash.update(actor_handle_id.binary())\n handle_id_hash.update(str(num_forks).encode(\"ascii\"))\n handle_id = handle_id_hash.digest()\n return ActorHandleID(handle_id)", "docstring": "Deterministically compute an actor handle ID.\n\nA new actor handle ID is generated when it is forked from another actor\nhandle. The new handle ID is computed as hash(old_handle_id || num_forks).\n\nArgs:\nactor_handle_id (common.ObjectID): The original actor handle ID.\nnum_forks: The number of times the original actor handle has been\nforked so far.\n\nReturns:\nAn ID for the new actor handle.", "source": "juraj-google-style"} {"code": "def wait_key(keys=None):\n if is_a_tty():\n if keys:\n if (not isinstance(keys, tuple)):\n keys = (keys,)\n while True:\n key = _getch()\n if (key in keys):\n return key\n else:\n return _getch()", "docstring": "Waits for a keypress at the console and returns it.\n\"Where's the any key?\"\n\nArguments:\nkeys - if passed, wait for this specific key, e.g. ESC.\nmay be a tuple.\nReturns:\nchar or ESC - depending on key hit.\nNone - immediately under i/o redirection, not an interactive tty.", "source": "codesearchnet"} {"code": "def _encode(self, tokens: List[str], mean: bool) -> Union[(List[np.ndarray], np.ndarray)]:\n embedded_tokens = []\n for t in tokens:\n try:\n emb = self.tok2emb[t]\n except KeyError:\n try:\n emb = self._get_word_vector(t)\n except KeyError:\n emb = np.zeros(self.dim, dtype=np.float32)\n self.tok2emb[t] = emb\n embedded_tokens.append(emb)\n if (mean is None):\n mean = self.mean\n if mean:\n filtered = [et for et in embedded_tokens if np.any(et)]\n if filtered:\n return np.mean(filtered, axis=0)\n return np.zeros(self.dim, dtype=np.float32)\n return embedded_tokens", "docstring": "Embed one text sample\n\nArgs:\ntokens: tokenized text sample\nmean: whether to return mean embedding of tokens per sample\n\nReturns:\nlist of embedded tokens or array of mean values", "source": "codesearchnet"} {"code": "def send_emote(self, room_id, text_content, timestamp=None):\n \n return self.send_message_event(\n room_id, \"m.room.message\",\n self.get_emote_body(text_content),\n timestamp=timestamp\n )", "docstring": "Perform PUT /rooms/$room_id/send/m.room.message with m.emote msgtype\n\nArgs:\nroom_id (str): The room ID to send the event in.\ntext_content (str): The m.emote body to send.\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"} {"code": "def _EncodeString(self, string):\n try:\n encoded_string = string.encode(self._encoding, errors=self._errors)\n except UnicodeEncodeError:\n if (self._errors == 'strict'):\n logging.error('Unable to properly write output due to encoding error. Switching to error tolerant encoding which can result in non Basic Latin (C0) characters to be replaced with \"?\" or \"\\\\ufffd\".')\n self._errors = 'replace'\n encoded_string = string.encode(self._encoding, errors=self._errors)\n return encoded_string", "docstring": "Encodes the string.\n\nArgs:\nstring (str): string to encode.\n\nReturns:\nbytes: encoded string.", "source": "codesearchnet"} {"code": "def representative_batch_size(self):\n return self.batch_size()", "docstring": "Return a representative size for batches in the dataset.\n\nThis is not guaranteed to be the batch size for all batches in the\ndataset. It just needs to be a rough approximation for batch sizes in\nthe dataset.\n\nReturns:\nint, a representative size for batches found in the dataset,\nor None if it is unknown.", "source": "github-repos"} {"code": "def get_model_objects(model, wfi_role=None, **kwargs):\n query_dict = {}\n for (k, v) in kwargs.items():\n if isinstance(v, list):\n query_dict[k] = [str(x) for x in v]\n else:\n parse = str(v).split('.')\n if ((parse[0] == 'role') and wfi_role):\n query_dict[k] = wfi_role\n for i in range(1, len(parse)):\n query_dict[k] = query_dict[k].__getattribute__(parse[i])\n else:\n query_dict[k] = parse[0]\n return model.objects.all(**query_dict)", "docstring": "Fetches model objects by filtering with kwargs\n\nIf wfi_role is specified, then we expect kwargs contains a\nfilter value starting with role,\n\ne.g. {'user': 'role.program.user'}\n\nWe replace this `role` key with role instance parameter `wfi_role` and try to get\nobject that filter value 'role.program.user' points by iterating `getattribute`. At\nthe end filter argument becomes {'user': user}.\n\nArgs:\nmodel (Model): Model class\nwfi_role (Role): role instance of wf instance\n**kwargs: filter arguments\n\nReturns:\n(list): list of model object instances", "source": "codesearchnet"} {"code": "def _validate_number_sequence(self, seq, n):\n if (seq is None):\n return np.zeros(n)\n if (len(seq) is n):\n try:\n l = [float(e) for e in seq]\n except ValueError:\n raise ValueError((('One or more elements in sequence <' + repr(seq)) + '> cannot be interpreted as a real number'))\n else:\n return np.asarray(l)\n elif (len(seq) is 0):\n return np.zeros(n)\n else:\n raise ValueError((((('Unexpected number of elements in sequence. Got: ' + str(len(seq))) + ', Expected: ') + str(n)) + '.'))", "docstring": "Validate a sequence to be of a certain length and ensure it's a numpy array of floats.\n\nRaises:\nValueError: Invalid length or non-numeric value", "source": "codesearchnet"} {"code": "def write_json(self, path, contents, message):\n log.debug(message.format(path=path))\n makedirs(os.path.dirname(path))\n with open(path, 'w') as fh:\n json.dump(contents, fh, indent=2, sort_keys=True)", "docstring": "Write json to disk.\n\nArgs:\npath (str): the path to write to\ncontents (dict): the contents of the json blob\nmessage (str): the message to log", "source": "codesearchnet"} {"code": "def _GetLoadConfigTimestamp(self, pefile_object):\n \n if not hasattr(pefile_object, 'DIRECTORY_ENTRY_LOAD_CONFIG'):\n return None\n timestamp = getattr(\n pefile_object.DIRECTORY_ENTRY_LOAD_CONFIG.struct, 'TimeDateStamp', 0)\n return timestamp", "docstring": "Retrieves the timestamp from the Load Configuration directory.\n\nArgs:\npefile_object (pefile.PE): pefile object.\n\nReturns:\nint: load configuration timestamps or None if there are none present.", "source": "juraj-google-style"} {"code": "def diet_adam_optimizer_params():\n return hparam.HParams(quantize=True, quantization_scale=(10.0 / tf.int16.max), optimizer='DietAdam', learning_rate=1.0, learning_rate_warmup_steps=2000, learning_rate_decay_scheme='noam', epsilon=1e-10, beta1=0.0, beta2=0.98, factored_second_moment_accumulator=True)", "docstring": "Default hyperparameters for a DietAdamOptimizer.\n\nReturns:\na hyperparameters object.", "source": "codesearchnet"} {"code": "def use_gradient(grad_f):\n \n grad_f_name = register_to_random_name(grad_f)\n\n def function_wrapper(f):\n def inner(*inputs):\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n state = {\"out_value\": None}\n\n \n\n out = f(*inputs)\n\n def store_out(out_value):\n \n state[\"out_value\"] = out_value\n\n store_name = \"store_\" + f.__name__\n store = tf.py_func(store_out, [out], (), stateful=True, name=store_name)\n\n \n \n \n\n def mock_f(*inputs):\n \n return state[\"out_value\"]\n\n with tf.control_dependencies([store]):\n with gradient_override_map({\"PyFunc\": grad_f_name}):\n mock_name = \"mock_\" + f.__name__\n mock_out = tf.py_func(mock_f, inputs, out.dtype, stateful=True,\n name=mock_name)\n mock_out.set_shape(out.get_shape())\n\n \n\n return mock_out\n return inner\n return function_wrapper", "docstring": "Decorator for easily setting custom gradients for TensorFlow functions.\n\n* DO NOT use this function if you need to serialize your graph.\n* This function will cause the decorated function to run slower.\n\nExample:\n\ndef _foo_grad(op, grad): ...\n\n@use_gradient(_foo_grad)\ndef foo(x1, x2, x3): ...\n\nArgs:\ngrad_f: function to use as gradient.\n\nReturns:\nA decorator to apply to the function you wish to override the gradient of.", "source": "juraj-google-style"} {"code": "def write_input(self, output_dir='.', make_dir_if_not_present=True):\n if (make_dir_if_not_present and (not os.path.exists(output_dir))):\n os.makedirs(output_dir)\n for (k, v) in self.items():\n with zopen(os.path.join(output_dir, k), 'wt') as f:\n f.write(v.__str__())", "docstring": "Write VASP input to a directory.\n\nArgs:\noutput_dir (str): Directory to write to. Defaults to current\ndirectory (\".\").\nmake_dir_if_not_present (bool): Create the directory if not\npresent. Defaults to True.", "source": "codesearchnet"} {"code": "def get_patient_reference_element_paths(structdef: StructureDefinition) -> List[str]:\n results = []\n struct_id = cast(Any, structdef).id.value\n for elem in cast(Any, structdef).snapshot.element:\n for t in elem.type:\n for tp in t.target_profile:\n if tp.value.endswith('Patient'):\n results.append(elem.id.value[len(struct_id) + 1:])\n return results", "docstring": "Returns all the top level patient elements for a given Reference.\n\nArgs:\nstructdef: a FHIR StructureDefinition proto.\n\nReturns:\nA list of patients.", "source": "github-repos"} {"code": "def minhash(self, v):\n if (not isinstance(v, collections.Iterable)):\n raise TypeError('Input vector must be an iterable')\n if (not (len(v) == self.dim)):\n raise ValueError(('Input dimension mismatch, expecting %d' % self.dim))\n if (not isinstance(v, np.ndarray)):\n v = np.array(v, dtype=np.float32)\n elif (v.dtype != np.float32):\n v = v.astype(np.float32)\n hashvalues = np.zeros((self.sample_size, 2), dtype=np.int)\n vzeros = (v == 0)\n if vzeros.all():\n raise ValueError('Input is all zeros')\n v[vzeros] = np.nan\n vlog = np.log(v)\n for i in range(self.sample_size):\n t = np.floor(((vlog / self.rs[i]) + self.betas[i]))\n ln_y = ((t - self.betas[i]) * self.rs[i])\n ln_a = ((self.ln_cs[i] - ln_y) - self.rs[i])\n k = np.nanargmin(ln_a)\n (hashvalues[i][0], hashvalues[i][1]) = (k, int(t[k]))\n return WeightedMinHash(self.seed, hashvalues)", "docstring": "Create a new weighted MinHash given a weighted Jaccard vector.\nEach dimension is an integer\nfrequency of the corresponding element in the multi-set represented\nby the vector.\n\nArgs:\nv (numpy.array): The Jaccard vector.", "source": "codesearchnet"} {"code": "def __eval_validator_check(self, check_item, resp_obj):\n if (isinstance(check_item, (dict, list)) or isinstance(check_item, parser.LazyString)):\n check_value = self.eval_content(check_item)\n else:\n check_value = resp_obj.extract_field(check_item)\n return check_value", "docstring": "evaluate check item in validator.\n\nArgs:\ncheck_item: check_item should only be the following 5 formats:\n1, variable reference, e.g. $token\n2, function reference, e.g. ${is_status_code_200($status_code)}\n3, dict or list, maybe containing variable/function reference, e.g. {\"var\": \"$abc\"}\n4, string joined by delimiter. e.g. \"status_code\", \"headers.content-type\"\n5, regex string, e.g. \"LB[\\d]*(.*)RB[\\d]*\"\n\nresp_obj: response object", "source": "codesearchnet"} {"code": "def __init__(self, sample_rate: int, delay_other_parts: bool=True):\n self._sample_rate = sample_rate\n self._delay_other_parts = delay_other_parts", "docstring": "Initializes the rate limiter.\n\nArgs:\nsample_rate: The sample rate of the audio. A typical value is 24000\n(24KHz)\ndelay_other_parts: If true, other parts will be delayed until the audio is\nplayed out. If false, other parts will be passed through as soon as\npossible, overtaking audio if needed.", "source": "github-repos"} {"code": "def SendSerializedMessage(self, message):\n \n try:\n ba = Helper.ToArray(message)\n ba2 = binascii.unhexlify(ba)\n self.bytes_out += len(ba2)\n self.transport.write(ba2)\n except Exception as e:\n logger.debug(f\"Could not send serialized message {e}\")", "docstring": "Send the `message` to the remote client.\n\nArgs:\nmessage (neo.Network.Message):", "source": "juraj-google-style"} {"code": "def _create_table_and_update_context(node, context):\n schema_type_name = sql_context_helpers.get_schema_type_name(node, context)\n table = context.compiler_metadata.get_table(schema_type_name).alias()\n context.query_path_to_selectable[node.query_path] = table\n return table", "docstring": "Create an aliased table for a SqlNode.\n\nUpdates the relevant Selectable global context.\n\nArgs:\nnode: SqlNode, the current node.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nTable, the newly aliased SQLAlchemy table.", "source": "codesearchnet"} {"code": "def get_alarms(zone=None):\n if (zone is None):\n zone = discovery.any_soco()\n response = zone.alarmClock.ListAlarms()\n alarm_list = response['CurrentAlarmList']\n tree = XML.fromstring(alarm_list.encode('utf-8'))\n alarms = tree.findall('Alarm')\n result = set()\n for alarm in alarms:\n values = alarm.attrib\n alarm_id = values['ID']\n if Alarm._all_alarms.get(alarm_id):\n instance = Alarm._all_alarms.get(alarm_id)\n else:\n instance = Alarm(None)\n instance._alarm_id = alarm_id\n Alarm._all_alarms[instance._alarm_id] = instance\n instance.start_time = datetime.strptime(values['StartTime'], '%H:%M:%S').time()\n instance.duration = (None if (values['Duration'] == '') else datetime.strptime(values['Duration'], '%H:%M:%S').time())\n instance.recurrence = values['Recurrence']\n instance.enabled = (values['Enabled'] == '1')\n instance.zone = next((z for z in zone.all_zones if (z.uid == values['RoomUUID'])), None)\n if (instance.zone is None):\n continue\n instance.program_uri = (None if (values['ProgramURI'] == 'x-rincon-buzzer:0') else values['ProgramURI'])\n instance.program_metadata = values['ProgramMetaData']\n instance.play_mode = values['PlayMode']\n instance.volume = values['Volume']\n instance.include_linked_zones = (values['IncludeLinkedZones'] == '1')\n result.add(instance)\n return result", "docstring": "Get a set of all alarms known to the Sonos system.\n\nArgs:\nzone (`SoCo`, optional): a SoCo instance to query. If None, a random\ninstance is used. Defaults to `None`.\n\nReturns:\nset: A set of `Alarm` instances\n\nNote:\nAny existing `Alarm` instance will have its attributes updated to those\ncurrently stored on the Sonos system.", "source": "codesearchnet"} {"code": "def pascalcase(text, acronyms=None):\n \n words, _case, _sep = case_parse.parse_case(text, acronyms)\n return ''.join(words)", "docstring": "Return text in PascalCase style (aka MixedCase).\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> pascalcase(\"hello world\")\n'HelloWorld'\n>>> pascalcase(\"HELLO_HTML_WORLD\", True, [\"HTML\"])\n'HelloHTMLWorld'", "source": "juraj-google-style"} {"code": "def ensuredir(path_, verbose=None, info=False, mode=0o1777):\n r\n if verbose is None:\n verbose = VERYVERBOSE\n if isinstance(path_, (list, tuple)):\n path_ = join(*path_)\n if HAVE_PATHLIB and isinstance(path_, pathlib.Path):\n path_ = str(path_)\n if not checkpath(path_, verbose=verbose, info=info):\n if verbose:\n print('[util_path] mkdir(%r)' % path_)\n try:\n os.makedirs(normpath(path_), mode=mode)\n except OSError as ex:\n util_dbg.printex(\n ex,\n 'check that the longest existing path '\n 'is not a bad windows symlink.', keys=['path_'])\n raise\n return path_", "docstring": "r\"\"\"\nEnsures that directory will exist. creates new dir with sticky bits by\ndefault\n\nArgs:\npath (str): dpath to ensure. Can also be a tuple to send to join\ninfo (bool): if True prints extra information\nmode (int): octal mode of directory (default 0o1777)\n\nReturns:\nstr: path - the ensured directory", "source": "juraj-google-style"} {"code": "def _update_watermarks_for_side_input_and_unblock_tasks(self, side_input, watermark) -> List[Tuple[TransformExecutor, Timestamp]]:\n with self._lock:\n view = self._views[side_input]\n view.watermark = watermark\n unblocked_tasks = []\n tasks_just_unblocked = []\n for task, block_until in view.blocked_tasks:\n if watermark.output_watermark >= block_until:\n view.value = self._pvalue_to_value(side_input, view.elements)\n unblocked_tasks.append(task)\n tasks_just_unblocked.append((task, block_until))\n task.blocked = False\n for task in tasks_just_unblocked:\n view.blocked_tasks.remove(task)\n return unblocked_tasks", "docstring": "Helps update _SideInputsContainer after a watermark update.\n\nFor each view of the side input, it updates the value of the watermark\nrecorded when the watermark moved and unblocks tasks accordingly.\n\nArgs:\nside_input: ``_UnpickledSideInput`` value.\nwatermark: Value of the watermark after an update for a PTransform.\n\nReturns:\nTasks that get unblocked as a result of the watermark advancing.", "source": "github-repos"} {"code": "def _process_update(self, item, feed_item):\n if feed_item.get(FieldMap.CAMPAIGN_ID, '') == '':\n feed_item[FieldMap.CAMPAIGN_ID] = item['campaignId']\n campaign = self.campaign_dao.get(feed_item, required=True)\n placement_group = self.placement_group_dao.get(feed_item, required=True)\n feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']\n feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name']\n if placement_group:\n feed_item[FieldMap.PLACEMENT_GROUP_ID] = placement_group['id']\n feed_item[FieldMap.PLACEMENT_GROUP_NAME] = placement_group['name']\n item['placementGroupId'] = placement_group['id']\n else:\n item['placementGroupId'] = None\n self._process_skipability(feed_item, item)\n item['pricingSchedule']['startDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.PLACEMENT_START_DATE, None)) if feed_item.get(FieldMap.PLACEMENT_START_DATE, '') else item['pricingSchedule']['startDate']\n item['pricingSchedule']['endDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.PLACEMENT_END_DATE, None)) if feed_item.get(FieldMap.PLACEMENT_END_DATE, '') else item['pricingSchedule']['endDate']\n item['pricingSchedule']['pricingType'] = feed_item.get(FieldMap.PLACEMENT_PRICING_SCHEDULE_COST_STRUCTURE, None) if feed_item.get(FieldMap.PLACEMENT_PRICING_SCHEDULE_COST_STRUCTURE, '') else item['pricingSchedule']['pricingType']\n if feed_item.get(FieldMap.PLACEMENT_PRICING_TESTING_START, None):\n item['pricingSchedule']['testingStartDate'] = feed_item.get(FieldMap.PLACEMENT_PRICING_TESTING_START, None)\n item['name'] = feed_item.get(FieldMap.PLACEMENT_NAME, None) if feed_item.get(FieldMap.PLACEMENT_NAME, '') else item['name']\n item['archived'] = feed_item.get(FieldMap.PLACEMENT_ARCHIVED, None) if feed_item.get(FieldMap.PLACEMENT_ARCHIVED, '') else item['archived']\n item['adBlockingOptOut'] = feed_item.get(FieldMap.PLACEMENT_AD_BLOCKING, False)\n self._process_transcode(item, feed_item)\n self._process_active_view_and_verification(item, feed_item)\n self._process_pricing_schedule(item, feed_item)\n key_values = feed_item.get(FieldMap.PLACEMENT_ADDITIONAL_KEY_VALUES, None)\n if key_values == '':\n if item.get('tagSetting', {}).get('additionalKeyValues'):\n del item['tagSetting']['additionalKeyValues']\n elif key_values != None:\n if not 'tagSetting' in item:\n item['tagSetting'] = {}\n item['tagSetting']['additionalKeyValues'] = key_values", "docstring": "Updates an placement based on the values from the feed.\n\nArgs:\nitem: Object representing the placement to be updated, this object is\nupdated directly.\nfeed_item: Feed item representing placement values from the Bulkdozer\nfeed.", "source": "github-repos"} {"code": "def get_column(self, X, column):\n \n if isinstance(X, pd.DataFrame):\n return X[column].values\n\n return X[:, column]", "docstring": "Return a column of the given matrix.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\ncolumn: `int` or `str`.\n\nReturns:\nnp.ndarray: Selected column.", "source": "juraj-google-style"} {"code": "def __init__(self, validate_args=False, name=\"reciprocal\"):\n \n self._name = name\n super(Reciprocal, self).__init__(\n forward_min_event_ndims=0,\n validate_args=validate_args,\n name=name)", "docstring": "Instantiates the `Reciprocal`.\n\nArgs:\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"} {"code": "def _convert_to_json(self, response):\n \n try:\n return response.json()\n except ValueError:\n logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format(\n response.request.url, response.text,\n ))\n return None", "docstring": "Converts response to JSON.\nIf the response cannot be converted to JSON then `None` is returned.\n\nArgs:\nresponse - An object of type `requests.models.Response`\nReturns:\nResponse in JSON format if the response can be converted to JSON. `None` otherwise.", "source": "juraj-google-style"} {"code": "def run(self, dag):\n \n new_dag = DAGCircuit()\n\n if self.initial_layout is None:\n if self.property_set[\"layout\"]:\n self.initial_layout = self.property_set[\"layout\"]\n else:\n self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())\n\n if len(dag.qubits()) != len(self.initial_layout):\n raise TranspilerError('The layout does not match the amount of qubits in the DAG')\n\n if len(self.coupling_map.physical_qubits) != len(self.initial_layout):\n raise TranspilerError(\n \"Mappers require to have the layout to be the same size as the coupling map\")\n\n current_layout = self.initial_layout.copy()\n\n for layer in dag.serial_layers():\n subdag = layer['graph']\n\n for gate in subdag.twoQ_gates():\n physical_q0 = current_layout[gate.qargs[0]]\n physical_q1 = current_layout[gate.qargs[1]]\n if self.coupling_map.distance(physical_q0, physical_q1) != 1:\n \n swap_layer = DAGCircuit()\n\n path = self.coupling_map.shortest_undirected_path(physical_q0, physical_q1)\n for swap in range(len(path) - 2):\n connected_wire_1 = path[swap]\n connected_wire_2 = path[swap + 1]\n\n qubit_1 = current_layout[connected_wire_1]\n qubit_2 = current_layout[connected_wire_2]\n\n \n for qreg in current_layout.get_registers():\n if qreg not in swap_layer.qregs.values():\n swap_layer.add_qreg(qreg)\n\n \n swap_layer.apply_operation_back(SwapGate(),\n qargs=[qubit_1, qubit_2],\n cargs=[])\n\n \n edge_map = current_layout.combine_into_edge_map(self.initial_layout)\n new_dag.compose_back(swap_layer, edge_map)\n\n \n for swap in range(len(path) - 2):\n current_layout.swap(path[swap], path[swap + 1])\n\n edge_map = current_layout.combine_into_edge_map(self.initial_layout)\n new_dag.extend_back(subdag, edge_map)\n\n return new_dag", "docstring": "Runs the BasicSwap pass on `dag`.\nArgs:\ndag (DAGCircuit): DAG to map.\n\nReturns:\nDAGCircuit: A mapped DAG.\n\nRaises:\nTranspilerError: if the coupling map or the layout are not\ncompatible with the DAG", "source": "juraj-google-style"} {"code": "async def get(self, request):\n ticket = (await self.get_ticket(request))\n if (ticket is None):\n return None\n try:\n now = time.time()\n fields = self._ticket.validate(ticket, self._get_ip(request), now)\n if ((self._reissue_time is not None) and (now >= (fields.valid_until - self._reissue_time))):\n request[_REISSUE_KEY] = self._new_ticket(request, fields.user_id)\n return fields.user_id\n except TicketError as e:\n return None", "docstring": "Gets the user_id for the request.\n\nGets the ticket for the request using the get_ticket() function, and\nauthenticates the ticket.\n\nArgs:\nrequest: aiohttp Request object.\n\nReturns:\nThe userid for the request, or None if the ticket is not\nauthenticated.", "source": "codesearchnet"} {"code": "def _AttemptAutoDetectTagFile(self, analysis_mediator):\n \n self._autodetect_tag_file_attempt = True\n if not analysis_mediator.data_location:\n return False\n\n operating_system = analysis_mediator.operating_system.lower()\n filename = self._OS_TAG_FILES.get(operating_system, None)\n if not filename:\n return False\n\n logger.info('Using auto detected tag file: {0:s}'.format(filename))\n tag_file_path = os.path.join(analysis_mediator.data_location, filename)\n self.SetAndLoadTagFile(tag_file_path)\n return True", "docstring": "Detects which tag file is most appropriate.\n\nArgs:\nanalysis_mediator (AnalysisMediator): analysis mediator.\n\nReturns:\nbool: True if a tag file is autodetected.", "source": "juraj-google-style"} {"code": "def target_encode(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]:\n encoded_outputs = self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs)\n return encoded_outputs['input_ids']", "docstring": "Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc.\nwhich are necessary for the model to work correctly. Use this method if you want to build your processing on\nyour own, otherwise refer to `__call__`.\n\nArgs:\nanswer `str`:\nCorresponding answer supervision to the queries for training the model", "source": "github-repos"} {"code": "def preload_check():\n if os.name == 'nt':\n if MSVCP_DLL_NAMES in build_info.build_info:\n missing = []\n for dll_name in build_info.build_info[MSVCP_DLL_NAMES].split(','):\n try:\n ctypes.WinDLL(dll_name)\n except OSError:\n missing.append(dll_name)\n if missing:\n raise ImportError('Could not find the DLL(s) %r. TensorFlow requires that these DLLs be installed in a directory that is named in your %%PATH%% environment variable. You may install these DLLs by downloading \"Microsoft C++ Redistributable for Visual Studio 2015, 2017 and 2019\" for your platform from this URL: https:\n else:\n from tensorflow.python.platform import _pywrap_cpu_feature_guard\n _pywrap_cpu_feature_guard.InfoAboutUnusedCPUFeatures()", "docstring": "Raises an exception if the environment is not correctly configured.\n\nRaises:\nImportError: If the check detects that the environment is not correctly\nconfigured, and attempting to load the TensorFlow runtime will fail.", "source": "github-repos"} {"code": "def annotated(func, name=None):\n if hasattr(func, 'metadata'):\n if (name is not None):\n func.metadata = AnnotatedMetadata(func, name)\n return func\n func.metadata = AnnotatedMetadata(func, name)\n func.finalizer = False\n func.takes_cmdline = False\n func.decorated = False\n func.context = False\n return func", "docstring": "Mark a function as callable from the command line.\n\nThis function is meant to be called as decorator. This function\nalso initializes metadata about the function's arguments that is\nbuilt up by the param decorator.\n\nArgs:\nfunc (callable): The function that we wish to mark as callable\nfrom the command line.\nname (str): Optional string that will override the function's\nbuilt-in name.", "source": "codesearchnet"} {"code": "def xeval(source, optimize=True):\n native = xcompile(source, optimize=optimize)\n return native()", "docstring": "Compiles to native Python bytecode and runs program, returning the\ntopmost value on the stack.\n\nArgs:\noptimize: Whether to optimize the code after parsing it.\n\nReturns:\nNone: If the stack is empty\nobj: If the stack contains a single value\n[obj, obj, ...]: If the stack contains many values", "source": "codesearchnet"} {"code": "def zero_state(self, sample_batch_shape=()):\n h0 = tf.zeros([1, self.hidden_size])\n c0 = tf.zeros([1, self.hidden_size])\n combined_shape = tf.concat((tf.convert_to_tensor(value=sample_batch_shape, dtype=tf.int32), [self.dimensions]), axis=(- 1))\n previous_output = tf.zeros(combined_shape)\n return (previous_output, (h0, c0))", "docstring": "Returns an initial state for the LSTM cell.\n\nArgs:\nsample_batch_shape: A 0D or 1D tensor of the combined sample and\nbatch shape.\n\nReturns:\nA tuple of the initial previous output at timestep 0 of shape\n[sample_batch_shape, dimensions], and the cell state.", "source": "codesearchnet"} {"code": "def GetNotificationsForAllShards(self, queue):\n notifications_by_session_id = {}\n for queue_shard in self.GetAllNotificationShards(queue):\n self._GetUnsortedNotifications(queue_shard, notifications_by_session_id=notifications_by_session_id)\n return notifications_by_session_id.values()", "docstring": "Returns notifications for all shards of a queue at once.\n\nUsed by worker_test_lib.MockWorker to cover all shards with a single worker.\n\nArgs:\nqueue: usually rdfvalue.RDFURN(\"aff4:/W\")\n\nReturns:\nList of rdf_flows.GrrNotification objects", "source": "codesearchnet"} {"code": "def decode_iter_request(data: dict) -> Optional[Union[str, int]]:\n \n if \"response_metadata\" in data:\n return data[\"response_metadata\"].get(\"next_cursor\")\n elif \"paging\" in data:\n current_page = int(data[\"paging\"].get(\"page\", 1))\n max_page = int(data[\"paging\"].get(\"pages\", 1))\n\n if current_page < max_page:\n return current_page + 1\n elif \"has_more\" in data and data[\"has_more\"] and \"latest\" in data:\n return data[\"messages\"][-1][\"ts\"]\n\n return None", "docstring": "Decode incoming response from an iteration request\n\nArgs:\ndata: Response data\n\nReturns:\nNext itervalue", "source": "juraj-google-style"} {"code": "def stack_call(self, *args):\n self.pipelined_args.append(args)\n self.number_of_stacked_calls = (self.number_of_stacked_calls + 1)", "docstring": "Stacks a redis command inside the object.\n\nThe syntax is the same than the call() method a Client class.\n\nArgs:\n*args: full redis command as variable length argument list.\n\nExamples:\n>>> pipeline = Pipeline()\n>>> pipeline.stack_call(\"HSET\", \"key\", \"field\", \"value\")\n>>> pipeline.stack_call(\"PING\")\n>>> pipeline.stack_call(\"INCR\", \"key2\")", "source": "codesearchnet"} {"code": "def GetEntries(self, parser_mediator, data=None, **unused_kwargs):\n seeding_time = data.get('seeding-time-seconds', None)\n event_data = TransmissionEventData()\n event_data.destination = data.get('destination', None)\n (event_data.seedtime, _) = divmod(seeding_time, 60)\n timestamp = data.get('added-date', None)\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n timestamp = data.get('done-date', None)\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n timestamp = data.get('activity-date', None)\n if timestamp:\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract data from Transmission's resume folder files.\n\nThis is the main parsing engine for the parser. It determines if\nthe selected file is the proper file to parse and extracts current\nrunning torrents.\n\nTransmission stores an individual Bencoded file for each active download\nin a folder named resume under the user's application data folder.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ndata (Optional[dict[str, object]]): bencode data values.", "source": "codesearchnet"} {"code": "def begin_disconnection(self, conn_or_internal_id, callback, timeout):\n \n\n data = {\n 'id': conn_or_internal_id,\n 'callback': callback\n }\n\n action = ConnectionAction('begin_disconnection', data, timeout=timeout, sync=False)\n self._actions.put(action)", "docstring": "Begin a disconnection attempt\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id\ncallback (callable): Callback to call when this disconnection attempt either\nsucceeds or fails\ntimeout (float): How long to allow this connection attempt to proceed\nwithout timing it out (in seconds)", "source": "juraj-google-style"} {"code": "def inputs(eval_data, data_dir, batch_size):\n if (not eval_data):\n filenames = [os.path.join(data_dir, ('data_batch_%d.bin' % i)) for i in xrange(1, 6)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n for f in filenames:\n if (not tf.gfile.Exists(f)):\n raise ValueError(('Failed to find file: ' + f))\n filename_queue = tf.train.string_input_producer(filenames)\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, height, width)\n float_image = tf.image.per_image_standardization(resized_image)\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int((num_examples_per_epoch * min_fraction_of_examples_in_queue))\n return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=False)", "docstring": "Construct input for CIFAR evaluation using the Reader ops.\n\nArgs:\neval_data: bool, indicating if one should use the train or eval data set.\ndata_dir: Path to the CIFAR-10 data directory.\nbatch_size: Number of images per batch.\n\nReturns:\nimages: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\nlabels: Labels. 1D tensor of [batch_size] size.", "source": "codesearchnet"} {"code": "def __init__(self, app):\n \n\n super(SMTPEmailAdapter, self).__init__(app)\n\n \n try:\n from flask_mail import Mail\n except ImportError:\n raise ConfigError(\n \"The Flask-Mail package is missing. Install Flask-Mail with 'pip install Flask-Mail'.\")\n self.mail = Mail(app)", "docstring": "Check config settings and setup Flask-Mail.\n\nArgs:\napp(Flask): The Flask application instance.", "source": "juraj-google-style"} {"code": "def get_checkpoint_mtimes(checkpoint_prefixes):\n mtimes = []\n\n def match_maybe_append(pathname):\n fnames = file_io.get_matching_files(pathname)\n if fnames:\n mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1000000000.0)\n return True\n return False\n for checkpoint_prefix in checkpoint_prefixes:\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix, saver_pb2.SaverDef.V2)\n if match_maybe_append(pathname):\n continue\n match_maybe_append(checkpoint_prefix)\n return mtimes", "docstring": "Returns the mtimes (modification timestamps) of the checkpoints.\n\nGlobs for the checkpoints pointed to by `checkpoint_prefixes`. If the files\nexist, collect their mtime. Both V2 and V1 checkpoints are considered, in\nthat priority.\n\nThis is the recommended way to get the mtimes, since it takes into account\nthe naming difference between V1 and V2 formats.\n\nNote: If not all checkpoints exist, the length of the returned mtimes list\nwill be smaller than the length of `checkpoint_prefixes` list, so mapping\ncheckpoints to corresponding mtimes will not be possible.\n\nArgs:\ncheckpoint_prefixes: a list of checkpoint paths, typically the results of\n`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of\nsharded/non-sharded or V1/V2.\nReturns:\nA list of mtimes (in microseconds) of the found checkpoints.", "source": "github-repos"} {"code": "def hexagonal(a: float, c: float):\n \n return Lattice.from_parameters(a, a, c, 90, 90, 120)", "docstring": "Convenience constructor for a hexagonal lattice.\n\nArgs:\na (float): *a* lattice parameter of the hexagonal cell.\nc (float): *c* lattice parameter of the hexagonal cell.\n\nReturns:\nHexagonal lattice of dimensions a x a x c.", "source": "juraj-google-style"} {"code": "def __init__(self, obj, saveables):\n self._obj = obj\n self._saveables = saveables", "docstring": "Constructor.\n\nArgs:\nobj: A Trackable object.\nsaveables: A list of saveables for `obj`.", "source": "github-repos"} {"code": "def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)", "docstring": "Rescale the image by the given factor. image = image * rescale_factor.\n\nArgs:\nimage (`np.ndarray`):\nImage to rescale.\nrescale_factor (`float`):\nThe value to use for rescaling.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format for the output image. If unset, the channel dimension format of the input\nimage is used. Can be one of:\n- `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format for the input image. If unset, is inferred from the input image. Can be\none of:\n- `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n- `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.", "source": "github-repos"} {"code": "def save(self, internal=False, meta=None, index_fields=None):\n for f in self.on_save:\n f(self)\n if (not (internal or self._pre_save_hook_called)):\n self._pre_save_hook_called = True\n self.pre_save()\n if (not self.deleted):\n self._handle_uniqueness()\n if (not self.exist):\n self.pre_creation()\n old_data = self._data.copy()\n if (self.just_created is None):\n self.setattrs(just_created=(not self.exist))\n if (self._just_created is None):\n self.setattrs(_just_created=self.just_created)\n self.objects.save_model(self, meta_data=meta, index_fields=index_fields)\n self._handle_changed_fields(old_data)\n self._process_relations(internal)\n if (not (internal or self._post_save_hook_called)):\n self._post_save_hook_called = True\n self.post_save()\n if self._just_created:\n self.setattrs(just_created=self._just_created, _just_created=False)\n self.post_creation()\n self._pre_save_hook_called = False\n self._post_save_hook_called = False\n if (not internal):\n self._initial_data = self.clean_value()\n return self", "docstring": "Save's object to DB.\n\nDo not override this method, use pre_save and post_save methods.\n\nArgs:\ninternal (bool): True if called within model.\nUsed to prevent unneccessary calls to pre_save and\npost_save methods.\nmeta (dict): JSON serializable meta data for logging of save operation.\n{'lorem': 'ipsum', 'dolar': 5}\nindex_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').\nbin is used for string fields, int is used for integer fields.\n[('lorem','bin'),('dolar','int')]\n\nReturns:\nSaved model instance.", "source": "codesearchnet"} {"code": "def apply(self, data, path=None):\n \n\n if path is None:\n path = []\n\n if not isinstance(data, dict):\n return data\n\n def _enumerate(value):\n if isinstance(value, list):\n for k, v in enumerate(value):\n yield k, v\n elif isinstance(value, dict):\n for k, v in value.items():\n yield k, v\n\n def _set(container, key, value):\n if isinstance(container, list):\n container.append(value)\n else:\n container[key] = value\n\n def _apply(ramap, value, status=False, wc=False, path=[]):\n\n\n if not isinstance(value, dict) and not isinstance(value, list):\n if status:\n return value\n else:\n return None\n\n if not wc:\n status = ramap.get(\"__\", status)\n\n handler = None\n key_handler = None\n if path and self.handlers:\n namespace = Namespace(path)\n for _handler in self.handlers.values():\n if namespace.match(_handler.get(\"namespace\").keys, partial=False):\n handler = _handler\n key_handler = handler.get(\"key\")\n break\n\n if isinstance(value, list):\n if not key_handler:\n key_handler = list_key_handler\n rv = []\n else:\n rv = {}\n\n for k, v in _enumerate(value):\n if key_handler:\n k = key_handler(v, k)\n k = str(k)\n if isinstance(v, dict) or isinstance(v, list):\n if k in ramap:\n r = _apply(ramap[k], v, status=status, path=path+[k])\n if r:\n _set(rv, k, r)\n elif \"*\" in ramap:\n r = _apply(ramap[\"*\"], v, status=status, wc=True, path=path+[k])\n if r:\n _set(rv, k, r)\n elif status:\n _set(rv, k, v)\n else:\n if k in ramap:\n if ramap[k].get(\"__\", True):\n _set(rv, k, v)\n elif \"*\" in ramap and ramap[\"*\"].get(\"__\", True):\n _set(rv, k, v)\n elif status:\n _set(rv, k, v)\n\n return rv\n\n \n \n tmpns = {}\n for ns, handler in self.handlers.items():\n if handler.get(\"explicit\"):\n p = self.pset.get_permissions(ns)\n if p & const.PERM_READ:\n exists = False\n for _ns in self.pset.namespaces:\n if Namespace(_ns).match(Namespace(ns).keys, partial=False):\n exists = True\n break\n if exists:\n continue\n tmpns[ns] = p\n self.pset[ns] = const.PERM_DENY\n\n\n \n rv = _apply(self.pset.read_access_map, data)\n\n \n for ns, p in tmpns.items():\n if p is None:\n del self.pset[ns]\n else:\n self.pset[ns] = p\n\n return rv", "docstring": "Apply permissions in this set to the provided data, effectively\nremoving all keys from it are not permissioned to be viewed\n\nArguments:\n\ndata -- dict of data\n\nReturns:\n\nCleaned data", "source": "juraj-google-style"} {"code": "def print_versions(file: typing.TextIO=None) -> None:\n print('** QuantumFlow dependencies (> python -m quantumflow.meta) **')\n print('quantumflow \\t', qf.__version__, file=file)\n print('python \\t', sys.version[0:5], file=file)\n print('numpy \\t', np.__version__, file=file)\n print('networkx \\t', nx.__version__, file=file)\n print('cvxpy \\t', cvx.__version__, file=file)\n print('pyquil \\t', pyquil.__version__, file=file)\n print(bk.name, ' \\t', bk.version, '(BACKEND)', file=file)", "docstring": "Print version strings of currently installed dependencies\n\n``> python -m quantumflow.meta``\n\n\nArgs:\nfile: Output stream. Defaults to stdout.", "source": "codesearchnet"} {"code": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n logits = outputs.logits\n if target_sizes is not None:\n if len(logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n if is_torch_tensor(target_sizes):\n target_sizes = target_sizes.numpy()\n semantic_segmentation = []\n for idx in range(len(logits)):\n resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = logits.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation", "docstring": "Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\noutputs ([`MobileViTForSemanticSegmentation`]):\nRaw outputs of the model.\ntarget_sizes (`List[Tuple]` of length `batch_size`, *optional*):\nList of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\npredictions will not be resized.\n\nReturns:\nsemantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\nsegmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\nspecified). Each entry of each `torch.Tensor` correspond to a semantic class id.", "source": "github-repos"} {"code": "async def apply_async(processor: Processor | PartProcessor, content: Iterable[ProcessorPart]) -> list[ProcessorPart]:\n async with context():\n content_processor = processor.to_processor()\n as_async = stream_content(content)\n return await gather_stream(content_processor(as_async))", "docstring": "Applies a Processor asynchronously.\n\nWhen a part processor is given as input, this method will first turn it into\na processor and then will process the content asynchronously.\n\nArgs:\nprocessor: the Processor to apply to the content.\ncontent: a collection of ProcessorParts on which to apply the Processor.\n\nReturns:\nthe content, with the Processor applied to each content part.", "source": "github-repos"} {"code": "def join(self, timeout: Optional[float]=None):\n if self._generation_thread is not None:\n self._generation_thread.join(timeout=timeout)\n if self._generation_thread.is_alive():\n logger.warning('Generation thread did not exit after join timeout.')\n else:\n logger.info('Continuous Batching Manager stopped.')\n self._generation_thread = None", "docstring": "Wait for the background thread to finish.\n\nArgs:\ntimeout: Maximum time to wait for the thread to stop", "source": "github-repos"} {"code": "def _pack_fn(self):\n if (not self.packed_length):\n return None\n\n def my_fn(records):\n 'Function from list of TFRecords to list of TFRecords.'\n examples = []\n for record in records:\n x = tf.train.Example()\n x.ParseFromString(record)\n example_dict = {}\n if self.has_inputs:\n example_dict['inputs'] = [int(i) for i in x.features.feature['inputs'].int64_list.value]\n example_dict['targets'] = [int(i) for i in x.features.feature['targets'].int64_list.value]\n examples.append(example_dict)\n examples = list(self._maybe_pack_examples(examples))\n return [generator_utils.to_example(x).SerializeToString() for x in examples]\n return my_fn", "docstring": "For packed datasets, returns a function to pack examples.\n\nReturns:\nNone or a function from list of TFRecords to list of TFRecords", "source": "codesearchnet"} {"code": "def merge_context(self, tag, metadata):\n self.entities.append(tag)\n for k in metadata.keys():\n if (k not in self.metadata):\n self.metadata[k] = k", "docstring": "merge into contextManagerFrame new entity and metadata.\n\nAppends tag as new entity and adds keys in metadata to keys in\nself.metadata.\n\nArgs:\ntag(str): entity to be added to self.entities\nmetadata(object): metadata containes keys to be added to self.metadata", "source": "codesearchnet"} {"code": "def fit_freq_min_max(self, training_signal):\n \n\n window_length = len(self.window)\n window_weight = sum(self.window)\n max_mask = np.zeros(int(window_length / 2) + 1)\n min_mask = np.zeros(int(window_length / 2) + 1)\n\n for i in range(0, len(training_signal) - window_length - 1):\n rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window)\n temp = np.abs(rfft) / window_weight\n max_mask = np.maximum(max_mask, temp)\n min_mask = np.minimum(min_mask, temp)\n\n self.mask_top = self.gain * max_mask\n self.mask_bottom = min_mask / self.gain", "docstring": "Defines a spectral mask based on training data using min and max values of each\nfrequency component\n\nArgs:\ntraining_signal: Training data", "source": "juraj-google-style"} {"code": "def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]:\n from itertools import chain\n return {f'{name}.{idx}': item for idx, item in enumerate(chain.from_iterable(field))}", "docstring": "Flatten any potential nested structure expanding the name of the field with the index of the element within the\nstructure.\n\nArgs:\nname: The name of the nested structure\nfield: The structure to, potentially, be flattened\n\nReturns:\n(Dict[str, Any]): Outputs with flattened structure and key mapping this new structure.", "source": "github-repos"} {"code": "def plugin_method(*plugin_names):\n\n def wrapper(callable_obj):\n for plugin_name in plugin_names:\n if (not hasattr(callable_obj, plugin_name)):\n setattr(callable_obj, plugin_name, True)\n return callable_obj\n return wrapper", "docstring": "Plugin Method decorator.\nSigns a web handler function with the plugins to be applied as attributes.\n\nArgs:\nplugin_names (list): A list of plugin callable names\n\nReturns:\nA wrapped handler callable.\n\nExamples:\n>>> @plugin_method('json', 'bill')\n... def method():\n... return \"Hello!\"\n...\n>>> print method.json\nTrue\n>>> print method.bill\nTrue", "source": "codesearchnet"} {"code": "def get_train_hooks(name_list, **kwargs):\n \n\n if not name_list:\n return []\n\n train_hooks = []\n for name in name_list:\n hook_name = HOOKS.get(name.strip().lower())\n if hook_name is None:\n raise ValueError('Unrecognized training hook requested: {}'.format(name))\n else:\n train_hooks.append(hook_name(**kwargs))\n\n return train_hooks", "docstring": "Factory for getting a list of TensorFlow hooks for training by name.\n\nArgs:\nname_list: a list of strings to name desired hook classes. Allowed:\nLoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined\nas keys in HOOKS\n**kwargs: a dictionary of arguments to the hooks.\n\nReturns:\nlist of instantiated hooks, ready to be used in a classifier.train call.\n\nRaises:\nValueError: if an unrecognized name is passed.", "source": "juraj-google-style"} {"code": "def _ReadDefinition(self, definitions_registry, definition_values):\n \n if not definition_values:\n error_message = 'missing definition values'\n raise errors.DefinitionReaderError(None, error_message)\n\n name = definition_values.get('name', None)\n if not name:\n error_message = 'missing name'\n raise errors.DefinitionReaderError(None, error_message)\n\n type_indicator = definition_values.get('type', None)\n if not type_indicator:\n error_message = 'invalid definition missing type'\n raise errors.DefinitionReaderError(name, error_message)\n\n data_type_callback = self._DATA_TYPE_CALLBACKS.get(type_indicator, None)\n if data_type_callback:\n data_type_callback = getattr(self, data_type_callback, None)\n if not data_type_callback:\n error_message = 'unuspported data type definition: {0:s}.'.format(\n type_indicator)\n raise errors.DefinitionReaderError(name, error_message)\n\n return data_type_callback(definitions_registry, definition_values, name)", "docstring": "Reads a data type definition.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.\ndefinition_values (dict[str, object]): definition values.\n\nReturns:\nDataTypeDefinition: data type definition or None.\n\nRaises:\nDefinitionReaderError: if the definitions values are missing or if\nthe format is incorrect.", "source": "juraj-google-style"} {"code": "def GetEntries(\n self, parser_mediator, cookie_data=None, url=None, **kwargs):\n \n fields = cookie_data.split('.')\n number_of_fields = len(fields)\n\n if number_of_fields not in (1, 6):\n parser_mediator.ProduceExtractionWarning(\n 'unsupported number of fields: {0:d} in cookie: {1:s}'.format(\n number_of_fields, self.COOKIE_NAME))\n return\n\n if number_of_fields == 1:\n domain_hash = None\n visitor_identifier = None\n first_visit_posix_time = None\n previous_visit_posix_time = None\n\n try:\n \n last_visit_posix_time = int(fields[0], 10) / 10000000\n except ValueError:\n last_visit_posix_time = None\n\n number_of_sessions = None\n\n elif number_of_fields == 6:\n domain_hash = fields[0]\n visitor_identifier = fields[1]\n\n \n try:\n first_visit_posix_time = int(fields[2], 10)\n except ValueError:\n first_visit_posix_time = None\n\n try:\n previous_visit_posix_time = int(fields[3], 10)\n except ValueError:\n previous_visit_posix_time = None\n\n try:\n last_visit_posix_time = int(fields[4], 10)\n except ValueError:\n last_visit_posix_time = None\n\n try:\n number_of_sessions = int(fields[5], 10)\n except ValueError:\n number_of_sessions = None\n\n event_data = GoogleAnalyticsEventData('utma')\n event_data.cookie_name = self.COOKIE_NAME\n event_data.domain_hash = domain_hash\n event_data.sessions = number_of_sessions\n event_data.url = url\n event_data.visitor_id = visitor_identifier\n\n if first_visit_posix_time is not None:\n date_time = dfdatetime_posix_time.PosixTime(\n timestamp=first_visit_posix_time)\n event = time_events.DateTimeValuesEvent(\n date_time, 'Analytics Creation Time')\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if previous_visit_posix_time is not None:\n date_time = dfdatetime_posix_time.PosixTime(\n timestamp=previous_visit_posix_time)\n event = time_events.DateTimeValuesEvent(\n date_time, 'Analytics Previous Time')\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n date_time = None\n if last_visit_posix_time is not None:\n date_time = dfdatetime_posix_time.PosixTime(\n timestamp=last_visit_posix_time)\n timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED\n elif first_visit_posix_time is None and previous_visit_posix_time is None:\n \n \n date_time = dfdatetime_semantic_time.SemanticTime('Not set')\n timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME\n\n if date_time is not None:\n event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extracts event objects from the cookie.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\ncookie_data (str): cookie data.\nurl (str): URL or path where the cookie got set.", "source": "juraj-google-style"} {"code": "def Prob(self, x):\n \n if x < self.xs[0]: return 0.0\n index = bisect.bisect(self.xs, x)\n p = self.ps[index - 1]\n return p", "docstring": "Returns CDF(x), the probability that corresponds to value x.\n\nArgs:\nx: number\n\nReturns:\nfloat probability", "source": "juraj-google-style"} {"code": "def get_unsharded_shape(self, shapes):\n self._fill_default_values()\n if len(shapes) != self.number_of_shards:\n raise ValueError(f'Shapes {shapes} is length {len(shapes)} but must be a list of length number_of_shards={self.number_of_shards}')\n unsharded_shapes = [self._unshard_shape(s) for s in shapes]\n for i in range(self.number_of_shards - 1):\n if not unsharded_shapes[i].is_compatible_with(unsharded_shapes[self.number_of_shards - 1]):\n raise ValueError(f'Sharded shapes {shapes} are not consistent shards of a full shape sharded {self.number_of_shards} ways along dimension {self.shard_dimension}.')\n return unsharded_shapes[0]", "docstring": "Returns the shape of an unsharded Tensor given a list of shards.\n\nWhen given a list of shapes of shards, returns the shape of the\nunsharded Tensor that would generate the shards. Sets defaults for the\npolicy if number_of_shards or shard_dimension is None.\n\nArgs:\nshapes: The shapes of the Tensor shards to be combined.\n\nReturns:\nThe shape of the unsharded version of the Tensor.\n\nRaises:\nValueError: if shapes is not a list of length\nself.number_of_shards; or any element of shapes is not a valid\nshape consistent with the sharding policy; or the list of\nshapes is not a valid sharding of a full shape.\nTypeError: if an element of shapes is not convertible to a\nTensorShape", "source": "github-repos"} {"code": "def _get_linear_trajectory(x0, velocity, t):\n \n x0 = tf.convert_to_tensor(x0)\n velocity = tf.convert_to_tensor(velocity)\n t = tf.convert_to_tensor(t)\n if x0.shape.ndims != 1:\n raise ValueError(\"x0 must be a rank 1 tensor\")\n if velocity.shape.ndims != 1:\n raise ValueError(\"velocity must be a rank 1 tensor\")\n if t.shape.ndims != 1:\n raise ValueError(\"t must be a rank 1 tensor\")\n x0 = tf.expand_dims(x0, axis=0)\n velocity = tf.expand_dims(velocity, axis=0)\n dx = velocity * tf.expand_dims(t, axis=-1)\n linear_trajectories = x0 + dx\n assert linear_trajectories.shape.ndims == 2, \\\n \"linear_trajectories should be a rank 2 tensor\"\n return linear_trajectories", "docstring": "Construct a linear trajectory from x0.\n\nArgs:\nx0: N-D float tensor.\nvelocity: N-D float tensor\nt: [sequence_length]-length float tensor\n\nReturns:\nx: [sequence_length, ndims] float tensor.", "source": "juraj-google-style"} {"code": "def __init__(self, affine_transformation_matrix, tol=0.01):\n \n affine_transformation_matrix = np.array(affine_transformation_matrix)\n if affine_transformation_matrix.shape != (4, 4):\n raise ValueError(\"Affine Matrix must be a 4x4 numpy array!\")\n self.affine_matrix = affine_transformation_matrix\n self.tol = tol", "docstring": "Initializes the SymmOp from a 4x4 affine transformation matrix.\nIn general, this constructor should not be used unless you are\ntransferring rotations. Use the static constructors instead to\ngenerate a SymmOp from proper rotations and translation.\n\nArgs:\naffine_transformation_matrix (4x4 array): Representing an\naffine transformation.\ntol (float): Tolerance for determining if matrices are equal.", "source": "juraj-google-style"} {"code": "def get_dataset_date(self, date_format=None):\n \n \n dataset_date = self.get_dataset_date_as_datetime()\n return self._get_formatted_date(dataset_date, date_format)", "docstring": "Get dataset date as string in specified format. For range returns start date.\nIf no format is supplied, an ISO 8601 string is returned.\n\nArgs:\ndate_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None.\n\nReturns:\nOptional[str]: Dataset date string or None if no date is set", "source": "juraj-google-style"} {"code": "def angle_to_name(angle, segments=8, abbr=False):\n \n if segments == 4:\n string = COMPASS_NAMES[int((angle + 45) / 90) % 4 * 2]\n elif segments == 8:\n string = COMPASS_NAMES[int((angle + 22.5) / 45) % 8 * 2]\n elif segments == 16:\n string = COMPASS_NAMES[int((angle + 11.25) / 22.5) % 16]\n else:\n raise ValueError('Segments parameter must be 4, 8 or 16 not %r'\n % segments)\n if abbr:\n return ''.join(i[0].capitalize() for i in string.split('-'))\n else:\n return string", "docstring": "Convert angle in to direction name.\n\nArgs:\nangle (float): Angle in degrees to convert to direction name\nsegments (int): Number of segments to split compass in to\nabbr (bool): Whether to return abbreviated direction string\n\nReturns:\nstr: Direction name for ``angle``", "source": "juraj-google-style"} {"code": "def register(\n self,\n pattern: str,\n handler: Any,\n flags: int = 0,\n channel: str = \"*\",\n subtype: Optional[str] = None,\n ) -> None:\n \n LOG.debug('Registering message endpoint \"%s: %s\"', pattern, handler)\n match = re.compile(pattern, flags)\n\n if subtype not in self._routes[channel]:\n self._routes[channel][subtype] = dict()\n\n if match in self._routes[channel][subtype]:\n self._routes[channel][subtype][match].append(handler)\n else:\n self._routes[channel][subtype][match] = [handler]", "docstring": "Register a new handler for a specific :class:`slack.events.Message`.\n\nThe routing is based on regex pattern matching the message text and the incoming slack channel.\n\nArgs:\npattern: Regex pattern matching the message text.\nhandler: Callback\nflags: Regex flags.\nchannel: Slack channel ID. Use * for any.\nsubtype: Message subtype", "source": "juraj-google-style"} {"code": "def to_grayscale(img, num_output_channels=1):\n \n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if num_output_channels == 1:\n img = img.convert('L')\n elif num_output_channels == 3:\n img = img.convert('L')\n np_img = np.array(img, dtype=np.uint8)\n np_img = np.dstack([np_img, np_img, np_img])\n img = Image.fromarray(np_img, 'RGB')\n else:\n raise ValueError('num_output_channels should be either 1 or 3')\n\n return img", "docstring": "Convert image to grayscale version of image.\n\nArgs:\nimg (PIL Image): Image to be converted to grayscale.\n\nReturns:\nPIL Image: Grayscale version of the image.\nif num_output_channels = 1 : returned image is single channel\n\nif num_output_channels = 3 : returned image is 3 channel with r = g = b", "source": "juraj-google-style"} {"code": "def get_examples_from_dataset(self, dataset, evaluate=False):\n if evaluate:\n dataset = dataset['validation']\n else:\n dataset = dataset['train']\n examples = []\n for tensor_dict in tqdm(dataset):\n examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))\n return examples", "docstring": "Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.\n\nArgs:\ndataset: The tfds dataset loaded from *tensorflow_datasets.load(\"squad\")*\nevaluate: Boolean specifying if in evaluation mode or in training mode\n\nReturns:\nList of SquadExample\n\nExamples:\n\n```python\n>>> import tensorflow_datasets as tfds\n\n>>> dataset = tfds.load(\"squad\")\n\n>>> training_examples = get_examples_from_dataset(dataset, evaluate=False)\n>>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)\n```", "source": "github-repos"} {"code": "def zoomlevel(self):\n resources = self.get_resource()\n zoomlevel = namedtuple('zoomlevel', 'zoomLevel')\n try:\n return [zoomlevel(resource['zoomLevel']) for resource in resources]\n except TypeError:\n try:\n if isinstance(resources['ElevationData'], dict):\n return zoomlevel(resources['ElevationData']['ZoomLevel'])\n except KeyError:\n try:\n if isinstance(resources['SeaLevelData'], dict):\n zoom = resources['SeaLevelData']['ZoomLevel']\n return zoomlevel(zoom)\n except KeyError:\n print(KeyError)", "docstring": "Retrieves zoomlevel from the output response\n\nReturns:\nzoomlevel (namedtuple): A namedtuple of zoomlevel from the output\nresponse", "source": "codesearchnet"} {"code": "async def send_heartbeat(self, short_name):\n if (short_name not in self.services):\n raise ArgumentError('Unknown service name', short_name=short_name)\n self.services[short_name]['state'].heartbeat()\n (await self._notify_update(short_name, 'heartbeat'))", "docstring": "Post a heartbeat for a service.\n\nArgs:\nshort_name (string): The short name of the service to query", "source": "codesearchnet"} {"code": "def get_ax_fig_plt(ax=None, **kwargs):\n import matplotlib.pyplot as plt\n if (ax is None):\n fig = plt.figure(**kwargs)\n ax = fig.add_subplot(1, 1, 1)\n else:\n fig = plt.gcf()\n return (ax, fig, plt)", "docstring": "Helper function used in plot functions supporting an optional Axes argument.\nIf ax is None, we build the `matplotlib` figure and create the Axes else\nwe return the current active figure.\n\nArgs:\nkwargs: keyword arguments are passed to plt.figure if ax is not None.\n\nReturns:\nax: :class:`Axes` object\nfigure: matplotlib figure\nplt: matplotlib pyplot module.", "source": "codesearchnet"} {"code": "def list_worker_processes(apppool):\n \n ps_cmd = ['Get-ChildItem',\n r\"'IIS:\\AppPools\\{0}\\WorkerProcesses'\".format(apppool)]\n\n cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n\n try:\n items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError('Unable to parse return data as Json.')\n\n ret = dict()\n for item in items:\n ret[item['processId']] = item['appPoolName']\n\n if not ret:\n log.warning('No backups found in output: %s', cmd_ret)\n\n return ret", "docstring": "Returns a list of worker processes that correspond to the passed\napplication pool.\n\n.. versionadded:: 2017.7.0\n\nArgs:\napppool (str): The application pool to query\n\nReturns:\ndict: A dictionary of worker processes with their process IDs\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.list_worker_processes 'My App Pool'", "source": "juraj-google-style"} {"code": "def end_block(self, request_end_block):\n self.abort_if_abci_chain_is_not_synced()\n chain_shift = (0 if (self.chain is None) else self.chain['height'])\n height = (request_end_block.height + chain_shift)\n self.new_height = height\n logger.debug(f'Updating pre-commit state: {self.new_height}')\n pre_commit_state = dict(height=self.new_height, transactions=self.block_txn_ids)\n self.bigchaindb.store_pre_commit_state(pre_commit_state)\n block_txn_hash = calculate_hash(self.block_txn_ids)\n block = self.bigchaindb.get_latest_block()\n if self.block_txn_ids:\n self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash])\n else:\n self.block_txn_hash = block['app_hash']\n validator_update = Election.process_block(self.bigchaindb, self.new_height, self.block_transactions)\n return ResponseEndBlock(validator_updates=validator_update)", "docstring": "Calculate block hash using transaction ids and previous block\nhash to be stored in the next block.\n\nArgs:\nheight (int): new height of the chain.", "source": "codesearchnet"} {"code": "def incomplete_size(self, name=None):\n if name is None:\n name = '%s_incomplete_size' % self._name\n return self._incomplete_size_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)", "docstring": "Returns the number of incomplete elements in the staging area.\n\nArgs:\nname: A name for the operation (optional)\n\nReturns:\nThe created op", "source": "github-repos"} {"code": "def import_module(module_or_filename):\n if os.path.exists(module_or_filename):\n if not module_or_filename.endswith('.py'):\n try:\n return import_from_module_name(module_or_filename)\n except ImportError:\n raise ValueError('Fire can only be called on .py files.')\n return import_from_file_path(module_or_filename)\n if os.path.sep in module_or_filename:\n raise OSError('Fire was passed a filename which could not be found.')\n return import_from_module_name(module_or_filename)", "docstring": "Imports a given module or filename.\n\nIf the module_or_filename exists in the file system and ends with .py, we\nattempt to import it. If that import fails, try to import it as a module.\n\nArgs:\nmodule_or_filename (str): string name of path or module.\n\nRaises:\nValueError: if the given file is invalid.\nIOError: if the file or module can not be found or imported.\n\nReturns:\nTuple[ModuleType, str]: returns the imported module and the module name,\nusually extracted from the path itself.", "source": "github-repos"} {"code": "def amend_commit(\n self,\n append_to_msg: typing.Optional[str] = None,\n new_message: typing.Optional[str] = None,\n files_to_add: typing.Optional[typing.Union[typing.List[str], str]] = None,\n ):", "docstring": "Amends last commit\n\nArgs:\nappend_to_msg: string to append to previous message\nnew_message: new commit message\nfiles_to_add: optional list of files to commit", "source": "juraj-google-style"} {"code": "def generate_output_header(self, query_type='RDAP'):\n \n\n output = '\\n{0}{1}{2} query for {3}:{4}\\n\\n'.format(\n ANSI['ul'],\n ANSI['b'],\n query_type,\n self.obj.address_str,\n ANSI['end']\n )\n\n return output", "docstring": "The function for generating the CLI output header.\n\nArgs:\nquery_type (:obj:`str`): The IPWhois query type. Defaults to\n'RDAP'.\n\nReturns:\nstr: The generated output.", "source": "juraj-google-style"} {"code": "def get_database_info(db_uri):\n \n if not db_uri:\n return None, None\n scheme = urlparse.urlparse(db_uri).scheme\n if scheme == 'sqlite':\n return sqlite3, create_sqlite_connection_provider(db_uri)\n else:\n raise ValueError('Only sqlite DB URIs are supported now: ' + db_uri)", "docstring": "Returns TBContext fields relating to SQL database.\n\nArgs:\ndb_uri: A string URI expressing the DB file, e.g. \"sqlite:~/tb.db\".\n\nReturns:\nA tuple with the db_module and db_connection_provider TBContext fields. If\ndb_uri was empty, then (None, None) is returned.\n\nRaises:\nValueError: If db_uri scheme is not supported.", "source": "juraj-google-style"} {"code": "def get_urls_from_onetab(onetab):\n html = requests.get(onetab).text\n soup = BeautifulSoup(html, 'lxml')\n divs = soup.findAll('div', {'style': 'padding-left: 24px; padding-top: 8px; position: relative; font-size: 13px;'})\n return [div.find('a').attrs['href'] for div in divs]", "docstring": "Get video urls from a link to the onetab shared page.\n\nArgs:\nonetab (str): Link to a onetab shared page.\n\nReturns:\nlist: List of links to the videos.", "source": "codesearchnet"} {"code": "def forward(self, inputs: torch.Tensor):\n inputs = self.dropout1(nn.functional.gelu(self.fc1(inputs)))\n inputs = self.fc2(inputs)\n inputs = self.dropout2(inputs)\n return inputs", "docstring": "Args:\ninputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`):\nInput to the MLP layer.\nReturns:\n`torch.Tensor` of the same shape as `inputs`", "source": "github-repos"} {"code": "def restore(self, request):\n self._connection.connection.rpush(self._request_key, pickle.dumps(request))", "docstring": "Push the request back onto the queue.\n\nArgs:\nrequest (Request): Reference to a request object that should be pushed back\nonto the request queue.", "source": "codesearchnet"} {"code": "def create_binary_array(self, key, value):\n data = None\n if ((key is not None) and (value is not None)):\n value_encoded = []\n for v in value:\n try:\n value_encoded.append(base64.b64encode(bytes(v)).decode('utf-8'))\n except TypeError:\n value_encoded.append(base64.b64encode(bytes(v, 'utf-8')).decode('utf-8'))\n data = self.db.create(key.strip(), json.dumps(value_encoded))\n else:\n self.tcex.log.warning(u'The key or value field was None.')\n return data", "docstring": "Create method of CRUD operation for binary array data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "codesearchnet"} {"code": "def move(self, queue_item, status):\n \n\n items = self.__get_var(\"items_\" + queue_item.status)\n\n del items[queue_item.get_hash()]\n self.count_total -= 1\n\n queue_item.status = status\n self.add(queue_item)", "docstring": "Move a request/response pair to another status.\n\nArgs:\nqueue_item (:class:`nyawc.QueueItem`): The queue item to move\nstatus (str): The new status of the queue item.", "source": "juraj-google-style"} {"code": "def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None):\n \n shape1 = convert_to_shape(shape1)\n shape2 = convert_to_shape(shape2)\n given_output_shape = convert_to_shape(given_output_shape)\n if given_output_shape is not None:\n return given_output_shape\n if is_subsequence(shape1.dims, shape2.dims):\n return shape2\n if is_subsequence(shape2.dims, shape1.dims):\n return shape1\n return Shape(\n shape1.dims + [d for d in shape2.dims if d not in shape1.dims])", "docstring": "Infer shape of the output of a binary op with broadcasting.\n\nIf the output shape is not given with given_output_shape, then we check\nto see if one of the shapes is a subsequence of the other one, and we\nreturn the one that is the supersequence. Otherwise, we list the dimensions\nof shape1, followed by all new dimensions in shape2.\n\nArgs:\nshape1: a Shape\nshape2: a Shape\ngiven_output_shape: an optional Shape\nReturns:\na Shape", "source": "juraj-google-style"} {"code": "def _new(self, name, **kwargs):\n super(Package, self)._new(name, **kwargs)\n ElementTree.SubElement(self, 'filename').text = name", "docstring": "Create a new Package from scratch.\n\nArgs:\nname: String filename of the package to use for the\nPackage object's Display Name (here, \"name\").\nWill also be used as the \"filename\" value. Casper will\nlet you specify different values, but it is not\nrecommended.\nkwargs:\nAccepted keyword args include all top-level keys.\nValues will be cast to string. (Int 10, bool False\nbecome string values \"10\" and \"false\").", "source": "codesearchnet"} {"code": "def new(self, name, *args, **kwargs):\n \n if name in self._instance_map:\n raise ValueError('Instance {0} is already initialized'\n .format(name))\n\n instance = self._class_map[name](*args, **kwargs)\n self._instance_map[name] = instance\n return instance", "docstring": "Create an instance.\n\nArgs:\nname (str): The name of the class\nargs: The arguments to pass to the class.\nkwargs: The keyword arguments to pass to the class.\n\nReturns:\ninstance", "source": "juraj-google-style"} {"code": "def gini(y, p):\n assert (y.shape == p.shape)\n n_samples = y.shape[0]\n arr = np.array([y, p]).transpose()\n true_order = arr[arr[(:, 0)].argsort()][(::(- 1), 0)]\n pred_order = arr[arr[(:, 1)].argsort()][(::(- 1), 0)]\n l_true = (np.cumsum(true_order) / np.sum(true_order))\n l_pred = (np.cumsum(pred_order) / np.sum(pred_order))\n l_ones = np.linspace((1 / n_samples), 1, n_samples)\n g_true = np.sum((l_ones - l_true))\n g_pred = np.sum((l_ones - l_pred))\n return (g_pred / g_true)", "docstring": "Normalized Gini Coefficient.\n\nArgs:\ny (numpy.array): target\np (numpy.array): prediction\n\nReturns:\ne (numpy.float64): normalized Gini coefficient", "source": "codesearchnet"} {"code": "def to_file(self, path):\n \n with open(os.path.expanduser(path), \"w\") as ofile:\n ofile.write(self.__repr__())", "docstring": "Write object XML to path.\n\nArgs:\npath: String file path to the file you wish to (over)write.\nPath will have ~ expanded prior to opening.", "source": "juraj-google-style"} {"code": "def content_metadata_uploads(self, mirror=False):\n excludes_str = ''\n includes_cmds = []\n cmd_base = self._get_upload_cmd(mirror=mirror)\n for content in self.s3props.get('content_metadata'):\n full_path = os.path.join(self.artifact_path, content['path'])\n if (not os.listdir(full_path)):\n raise S3ArtifactNotFound\n excludes_str += '--exclude \"{}/*\" '.format(content['path'])\n include_cmd = '{} --exclude \"*\", --include \"{}/*\"'.format(cmd_base, content['path'])\n include_cmd += ' --content-encoding {} --metadata-directive REPLACE'.format(content['content-encoding'])\n includes_cmds.append(include_cmd)\n exclude_cmd = '{} {}'.format(cmd_base, excludes_str)\n result = subprocess.run(exclude_cmd, check=True, shell=True, stdout=subprocess.PIPE)\n LOG.info('Uploaded files without metadata with command: %s', exclude_cmd)\n LOG.debug('Upload Command Output: %s', result.stdout)\n for include_cmd in includes_cmds:\n result = subprocess.run(include_cmd, check=True, shell=True, stdout=subprocess.PIPE)\n LOG.info('Uploaded files with metadata with command: %s', include_cmd)\n LOG.debug('Upload Command Output: %s', result.stdout)\n return True", "docstring": "Finds all specified encoded directories and uploads in multiple parts,\nsetting metadata for objects.\n\nArgs:\nmirror (bool): If true, uses a flat directory structure instead of nesting under a version.\n\nReturns:\nbool: True if uploaded", "source": "codesearchnet"} {"code": "def get_parameter(self, name):\n i = self.get_parameter_names(include_frozen=True).index(name)\n return self.get_parameter_vector(include_frozen=True)[i]", "docstring": "Get a parameter value by name\n\nArgs:\nname: The name of the parameter", "source": "codesearchnet"} {"code": "def user_func(func, arg_types=None, return_type=None):\n\n class UserFunction(std_core.TypedFunction):\n name = func.__name__\n\n def __call__(self, *args, **kwargs):\n return func(*args, **kwargs)\n\n @classmethod\n def reflect_static_args(cls):\n return arg_types\n\n @classmethod\n def reflect_static_return(cls):\n return return_type\n return UserFunction()", "docstring": "Create an EFILTER-callable version of function 'func'.\n\nAs a security precaution, EFILTER will not execute Python callables\nunless they implement the IApplicative protocol. There is a perfectly good\nimplementation of this protocol in the standard library and user functions\ncan inherit from it.\n\nThis will declare a subclass of the standard library TypedFunction and\nreturn an instance of it that EFILTER will happily call.\n\nArguments:\nfunc: A Python callable that will serve as the implementation.\narg_types (optional): A tuple of argument types. If the function takes\nkeyword arguments, they must still have a defined order.\nreturn_type (optional): The type the function returns.\n\nReturns:\nAn instance of a custom subclass of efilter.stdlib.core.TypedFunction.\n\nExamples:\ndef my_callback(tag):\nprint(\"I got %r\" % tag)\n\napi.apply(\"if True then my_callback('Hello World!')\",\nvars={\n\"my_callback\": api.user_func(my_callback)\n})\n\n# This should print \"I got 'Hello World!'\".", "source": "codesearchnet"} {"code": "def HandleClockSync(self, response):\n \n self.logger.info('Clock drift token has changed: %s.', response)\n self.distro_utils.HandleClockSync(self.logger)", "docstring": "Called when clock drift token changes.\n\nArgs:\nresponse: string, the metadata response with the new drift token value.", "source": "juraj-google-style"} {"code": "def nr_cases(self, snv_cases=None, sv_cases=None):\n query = {}\n if snv_cases:\n query = {'vcf_path': {'$exists': True}}\n if sv_cases:\n query = {'vcf_sv_path': {'$exists': True}}\n if (snv_cases and sv_cases):\n query = None\n return self.db.case.count_documents(query)", "docstring": "Return the number of cases in the database\n\nArgs:\nsnv_cases(bool): If only snv cases should be searched\nsv_cases(bool): If only snv cases should be searched\n\nReturns:\ncases (Iterable(Case)): A iterable with mongo cases", "source": "codesearchnet"} {"code": "def __getitem__(self, key):\n\t\t\n\t\tif key in self._nodes:\treturn self._nodes[key]\n\t\telse:\t\t\t\t\traise KeyError(key)", "docstring": "Get Item (__getitem__)\n\nReturns a specific key from the parent\n\nArguments:\nkey {str} -- The key to get\n\nRaises:\nKeyError\n\nReturns:\nmixed", "source": "juraj-google-style"} {"code": "def calculate_keys_by_mapreduce_state(cls, mapreduce_state):\n \n if mapreduce_state is None:\n return []\n\n keys = []\n for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):\n shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)\n keys.append(cls.get_key_by_shard_id(shard_id))\n return keys", "docstring": "Calculate all shard states keys for given mapreduce.\n\nArgs:\nmapreduce_state: MapreduceState instance\n\nReturns:\nA list of keys for shard states, sorted by shard id.\nThe corresponding shard states may not exist.", "source": "juraj-google-style"} {"code": "def is_struct(declaration):\n if (not is_class(declaration)):\n return False\n decl = class_traits.get_declaration(declaration)\n return (decl.class_type == class_declaration.CLASS_TYPES.STRUCT)", "docstring": "Returns True if declaration represents a C++ struct\n\nArgs:\ndeclaration (declaration_t): the declaration to be checked.\n\nReturns:\nbool: True if declaration represents a C++ struct", "source": "codesearchnet"} {"code": "def adopt_module_key_flags(module, flag_values=_flagvalues.FLAGS):\n \n if not isinstance(module, types.ModuleType):\n raise _exceptions.Error('Expected a module object, not %r.' % (module,))\n _internal_declare_key_flags(\n [f.name for f in flag_values.get_key_flags_for_module(module.__name__)],\n flag_values=flag_values)\n \n if module == _helpers.FLAGS_MODULE:\n _internal_declare_key_flags(\n \n \n \n \n \n [_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS],\n flag_values=_helpers.SPECIAL_FLAGS,\n key_flag_values=flag_values)", "docstring": "Declares that all flags key to a module are key to the current module.\n\nArgs:\nmodule: module, the module object from which all key flags will be declared\nas key flags to the current module.\nflag_values: FlagValues, the FlagValues instance in which the flags will\nbe declared as key flags. This should almost never need to be\noverridden.\n\nRaises:\nError: Raised when given an argument that is a module name (a string),\ninstead of a module object.", "source": "juraj-google-style"} {"code": "def __init__(self, learning_rate, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='ProximalAdagrad'):\n if initial_accumulator_value <= 0.0:\n raise ValueError('initial_accumulator_value must be positive: %s' % initial_accumulator_value)\n super(ProximalAdagradOptimizer, self).__init__(use_locking, name)\n self._learning_rate = learning_rate\n self._initial_accumulator_value = initial_accumulator_value\n self._l1_regularization_strength = l1_regularization_strength\n self._l2_regularization_strength = l2_regularization_strength\n self._l1_regularization_strength_tensor = None\n self._l2_regularization_strength_tensor = None\n self._learning_rate_tensor = None", "docstring": "Construct a new ProximalAdagrad optimizer.\n\nArgs:\nlearning_rate: A `Tensor` or a floating point value. The learning rate.\ninitial_accumulator_value: A floating point value.\nStarting value for the accumulators, must be positive.\nl1_regularization_strength: A float value, must be greater than or\nequal to zero.\nl2_regularization_strength: A float value, must be greater than or\nequal to zero.\nuse_locking: If `True` use locks for update operations.\nname: Optional name prefix for the operations created when applying\ngradients. Defaults to \"Adagrad\".\n\nRaises:\nValueError: If the `initial_accumulator_value` is invalid.", "source": "github-repos"} {"code": "def populate_development(version):\n \n with open(DEVELOPMENT_TEMPLATE, \"r\") as file_obj:\n template = file_obj.read()\n contents = template.format(revision=version, rtd_version=version)\n with open(DEVELOPMENT_FILE, \"w\") as file_obj:\n file_obj.write(contents)", "docstring": "Populates ``DEVELOPMENT.rst`` with release-specific data.\n\nThis is because ``DEVELOPMENT.rst`` is used in the Sphinx documentation.\n\nArgs:\nversion (str): The current version.", "source": "juraj-google-style"} {"code": "def is_unknown(input, model_file=None, model_proto=None, name=None):\n return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(input, model_file=model_file, model_proto=model_proto, name=name, piece_type=0)", "docstring": "Returns true if input id is unknown piece.\n\nArgs:\ninput: An arbitrary tensor of int32.\nmodel_file: The sentencepiece model file path.\nmodel_proto: The sentencepiece model serialized proto.\nEither `model_file` or `model_proto` must be set.\nname: The name argument that is passed to the op function.\nReturns:\nA tensor of bool with the same shape as input.", "source": "codesearchnet"} {"code": "async def get_match(self, m_id, force_update=False) -> Match:\n \n found_m = self._find_match(m_id)\n if force_update or found_m is None:\n await self.get_matches()\n found_m = self._find_match(m_id)\n return found_m", "docstring": "get a single match by id\n\n|methcoro|\n\nArgs:\nm_id: match id\nforce_update (default=False): True to force an update to the Challonge API\n\nReturns:\nMatch\n\nRaises:\nAPIException", "source": "juraj-google-style"} {"code": "def Start(self, hostname, port):\n \n if not self._Open(hostname, port):\n return False\n\n self._rpc_thread = threading.Thread(\n name=self._THREAD_NAME, target=self._xmlrpc_server.serve_forever)\n self._rpc_thread.start()\n return True", "docstring": "Starts the process status RPC server.\n\nArgs:\nhostname (str): hostname or IP address to connect to for requests.\nport (int): port to connect to for requests.\n\nReturns:\nbool: True if the RPC server was successfully started.", "source": "juraj-google-style"} {"code": "def _rpc(self, rpc_func_name, *args, **kwargs):\n try:\n self.check_server_proc_running()\n except Exception:\n self.log.error('Server process running check failed, skip sending RPC method(%s).', rpc_func_name)\n raise\n with self._lock:\n rpc_id = next(self._counter)\n request = self._gen_rpc_request(rpc_id, rpc_func_name, *args, **kwargs)\n self.log.debug('Sending RPC request %s.', request)\n response = self.send_rpc_request(request)\n self.log.debug('RPC request sent.')\n if self.verbose_logging or _MAX_RPC_RESP_LOGGING_LENGTH >= len(response):\n self.log.debug('Snippet received: %s', response)\n else:\n self.log.debug('Snippet received: %s... %d chars are truncated', response[:_MAX_RPC_RESP_LOGGING_LENGTH], len(response) - _MAX_RPC_RESP_LOGGING_LENGTH)\n response_decoded = self._decode_response_string_and_validate_format(rpc_id, response)\n return self._handle_rpc_response(rpc_func_name, response_decoded)", "docstring": "Sends an RPC to the server.\n\nArgs:\nrpc_func_name: str, the name of the snippet function to execute on the\nserver.\n*args: any, the positional arguments of the RPC request.\n**kwargs: any, the keyword arguments of the RPC request.\n\nReturns:\nThe result of the RPC.\n\nRaises:\nerrors.ProtocolError: something went wrong when exchanging data with the\nserver.\nerrors.ApiError: the RPC went through, however executed with errors.", "source": "github-repos"} {"code": "def lstm_with_recurrent_dropout(hidden_size, keep_prob=0.5, **kwargs):\n \n\n lstm = LSTM(hidden_size, **kwargs)\n return RecurrentDropoutWrapper(lstm, LSTMState(keep_prob, None)), lstm", "docstring": "LSTM with recurrent dropout.\n\nArgs:\nhidden_size: the LSTM hidden size.\nkeep_prob: the probability to keep an entry when applying dropout.\n**kwargs: Extra keyword arguments to pass to the LSTM.\n\nReturns:\nA tuple (train_lstm, test_lstm) where train_lstm is an LSTM with\nrecurrent dropout enabled to be used for training and test_lstm\nis the same LSTM without recurrent dropout.", "source": "juraj-google-style"} {"code": "def _write_to_file(self, fileinfo, filename):\n \n txt = to_text_string(fileinfo.editor.get_text_with_eol())\n fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding)", "docstring": "Low-level function for writing text of editor to file.\n\nArgs:\nfileinfo: FileInfo object associated to editor to be saved\nfilename: str with filename to save to\n\nThis is a low-level function that only saves the text to file in the\ncorrect encoding without doing any error handling.", "source": "juraj-google-style"} {"code": "def get_pending_domain_join():\n base_key = 'SYSTEM\\\\CurrentControlSet\\\\Services\\\\Netlogon'\n avoid_key = '{0}\\\\AvoidSpnSet'.format(base_key)\n join_key = '{0}\\\\JoinDomain'.format(base_key)\n if __utils__['reg.key_exists']('HKLM', avoid_key):\n log.debug('Key exists: %s', avoid_key)\n return True\n else:\n log.debug('Key does not exist: %s', avoid_key)\n if __utils__['reg.key_exists']('HKLM', join_key):\n log.debug('Key exists: %s', join_key)\n return True\n else:\n log.debug('Key does not exist: %s', join_key)\n return False", "docstring": "Determine whether there is a pending domain join action that requires a\nreboot.\n\n.. versionadded:: 2016.11.0\n\nReturns:\nbool: ``True`` if there is a pending domain join action, otherwise\n``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' system.get_pending_domain_join", "source": "codesearchnet"} {"code": "def loss_scale(self):\n return self._loss_scale", "docstring": "Returns the loss scale of this Policy.\n\nReturns:\nA `tf.compat.v1.mixed_precision.experimental.LossScale`, or None.", "source": "github-repos"} {"code": "def _prep_crypto_msg(message):\n signature = message['signature']\n certificate = message['certificate']\n (sliced_signature, sliced_certificate) = ([], [])\n for x in range(0, len(signature), 76):\n sliced_signature.append(signature[x:(x + 76)])\n for x in range(0, len(certificate), 76):\n sliced_certificate.append(certificate[x:(x + 76)])\n message['signature'] = (u'\\n'.join(sliced_signature) + u'\\n')\n message['certificate'] = (u'\\n'.join(sliced_certificate) + u'\\n')\n return message", "docstring": "Split the signature and certificate in the same way M2Crypto does.\n\nM2Crypto is dropping newlines into its signature and certificate. This\nexists purely to maintain backwards compatibility.\n\nArgs:\nmessage (dict): A message with the ``signature`` and ``certificate`` keywords.\nThe values of these two keys must be byte strings.\n\nReturns:\ndict: The same message, but with the values of ``signature`` and ``certificate``\nsplit every 76 characters with a newline and a final newline at the end.", "source": "codesearchnet"} {"code": "def trainable_initial_state(batch_size, state_size, dtype, initializers=None, regularizers=None, name=None):\n flat_state_size = nest.flatten(state_size)\n if (not initializers):\n flat_initializer = tuple((tf.zeros_initializer() for _ in flat_state_size))\n else:\n nest.assert_same_structure(initializers, state_size)\n flat_initializer = nest.flatten(initializers)\n if (not all([callable(init) for init in flat_initializer])):\n raise ValueError('Not all the passed initializers are callable objects.')\n if (not regularizers):\n flat_regularizer = tuple(({} for _ in flat_state_size))\n else:\n nest.assert_same_structure(regularizers, state_size)\n flat_regularizer = nest.flatten(regularizers)\n if (not all([callable(regularizer) for regularizer in flat_regularizer])):\n raise ValueError('Not all the passed regularizers are callable objects.')\n name_prefix = (name or 'initial_state')\n try:\n name_suffixes = [state_size._fields[i] for i in xrange(len(flat_state_size))]\n except (AttributeError, IndexError):\n name_suffixes = range(len(flat_state_size))\n flat_initial_state = []\n for (name_suffix, size, init, regularizer) in zip(name_suffixes, flat_state_size, flat_initializer, flat_regularizer):\n shape_with_batch_dim = ([1] + tf.TensorShape(size).as_list())\n variable_name = '{}_{}'.format(name_prefix, name_suffix)\n initial_state_module = basic.TrainableVariable(shape_with_batch_dim, dtype=dtype, initializers={'w': init}, regularizers={'w': regularizer}, name=variable_name)\n initial_state_variable = initial_state_module()\n tiled_name = 'state_{}_tiled'.format(name_suffix)\n initial_state_variable_dims = initial_state_variable.get_shape().ndims\n tile_dims = ([batch_size] + ([1] * (initial_state_variable_dims - 1)))\n flat_initial_state.append(tf.tile(initial_state_variable, tile_dims, name=tiled_name))\n return nest.pack_sequence_as(structure=state_size, flat_sequence=flat_initial_state)", "docstring": "Creates an initial state consisting of trainable variables.\n\nThe trainable variables are created with the same shapes as the elements of\n`state_size` and are tiled to produce an initial state.\n\nArgs:\nbatch_size: An int, or scalar int32 Tensor representing the batch size.\nstate_size: A `TensorShape` or nested tuple of `TensorShape`s to use for the\nshape of the trainable variables.\ndtype: The data type used to create the variables and thus initial state.\ninitializers: An optional container of the same structure as `state_size`\ncontaining initializers for the variables.\nregularizers: An optional container of the same structure as `state_size`\ncontaining regularizers for the variables.\nname: optional string used to prefix the initial state variable names.\n\nReturns:\nA `Tensor` or nested tuple of `Tensor`s with the same size and structure\nas `state_size`, where each `Tensor` is a tiled trainable `Variable`.\n\nRaises:\nValueError: if the user passes initializers that are not functions.\nValueError: if the user passes regularizers that are not functions.", "source": "codesearchnet"} {"code": "def memoise(cls, func):\n \n\n @functools.wraps(func)\n def f(*a):\n\n for arg in a:\n if isinstance(arg, User):\n user = arg\n break\n else:\n raise ValueError(\"One position argument must be a User\")\n\n func_key = (func, tuple(a))\n cache = cls.get_cache(user)\n\n if func_key not in cache:\n cache[func_key] = func(*a)\n\n return cache[func_key]\n\n return f", "docstring": "Decorator that stores the result of the stored function in the\nuser's results cache until the batch completes. Keyword arguments are\nnot yet supported.\n\nArguments:\nfunc (callable(*a)): The function whose results we want\nto store. The positional arguments, ``a``, are used as cache\nkeys.\n\nReturns:\ncallable(*a): The memosing version of ``func``.", "source": "juraj-google-style"} {"code": "def indexed_slices_union_indices_and_values(x1, x2_indices, x2_values=None):\n dim_0 = x1.dense_shape[0]\n x1_indices_expanded = tf.expand_dims(x1.indices, axis=1)\n x2_indices_expanded = tf.expand_dims(x2_indices, axis=1)\n x1_indices_count = tf.shape(x1_indices_expanded)[0]\n x2_indices_count = tf.shape(x2_indices_expanded)[0]\n x1_indices_one_hot = tf.scatter_nd(x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,))\n x2_indices_one_hot = tf.scatter_nd(x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,))\n union_indices = tf.squeeze(tf.where(tf.math.logical_or(x1_indices_one_hot, x2_indices_one_hot)), axis=-1)\n union_indices_count = tf.shape(union_indices)[0]\n\n def values_for_union(indices_expanded, indices_count, values):\n indices_indices = tf.scatter_nd(indices_expanded, tf.range(1, indices_count + 1), (dim_0,))\n to_union_indices = tf.gather(indices_indices, union_indices)\n values_with_leading_zeros = tf.concat([tf.zeros_like(values[0:1]), values], axis=0)\n return tf.gather(values_with_leading_zeros, to_union_indices)\n x1_values_for_union_indices = tf.cond(tf.equal(x1_indices_count, union_indices_count), lambda: x1.values, lambda: values_for_union(x1_indices_expanded, x1_indices_count, x1.values))\n if x2_values is not None:\n x2_values_for_union_indices = tf.cond(tf.equal(x2_indices_count, union_indices_count), lambda: x2_values, lambda: values_for_union(x2_indices_expanded, x2_indices_count, x2_values))\n else:\n x2_values_for_union_indices = None\n return (union_indices, x1_values_for_union_indices, x2_values_for_union_indices)", "docstring": "Compute the indices for the union of two `tf.IndexedSlices` and modify\nthe values for these indices.\n\nArgs:\nx1: the first `tf.IndexedSlices`.\nx2_indices: the indices for the second `tf.IndexedSlices`.\nx2_value: (optional) the values for the second `tf.IndexedSlices`.\nReturns: A tuple containing:\n- the indices for the union\n- `x1` values for the union indices (some zeros were added)\n- `x2` values for the union indices (some zeros were added) or `None` if\n`x2_values` was `None`.", "source": "github-repos"} {"code": "def fulfill(self):\n (is_fulfilled, result) = self._check_fulfilled()\n if is_fulfilled:\n return result\n else:\n raise BrokenPromise(self)", "docstring": "Evaluate the promise and return the result.\n\nReturns:\nThe result of the `Promise` (second return value from the `check_func`)\n\nRaises:\nBrokenPromise: the `Promise` was not satisfied within the time or attempt limits.", "source": "codesearchnet"} {"code": "def connected_site_pairs( self ):\n \n site_connections = {}\n for initial_site in self.sites:\n if not initial_site.label in site_connections:\n site_connections[ initial_site.label ] = []\n for final_site in initial_site.p_neighbours:\n if final_site.label not in site_connections[ initial_site.label ]:\n site_connections[ initial_site.label ].append( final_site.label )\n return site_connections", "docstring": "Returns a dictionary of all connections between pair of sites (by site label).\ne.g. for a linear lattice A-B-C will return::\n\n{ 'A' : [ 'B' ], 'B' : [ 'A', 'C' ], 'C' : [ 'B' ] }\n\nArgs:\nNone\n\nReturns:\nsite_connections (Dict{Str List[Str]}): A dictionary of neighbouring site types in the lattice.", "source": "juraj-google-style"} {"code": "def create_media_service_rg(access_token, subscription_id, rgname, location, stoname, msname):\n \n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', rgname,\n '/providers/microsoft.media/mediaservices/', msname,\n '?api-version=', MEDIA_API])\n ms_body = {'name': msname}\n ms_body['location'] = location\n sub_id_str = '/subscriptions/' + subscription_id + '/resourceGroups/' + rgname + \\\n '/providers/Microsoft.Storage/storageAccounts/' + stoname\n storage_account = {'id': sub_id_str}\n storage_account['isPrimary'] = True\n properties = {'storageAccounts': [storage_account]}\n ms_body['properties'] = properties\n body = json.dumps(ms_body)\n return do_put(endpoint, body, access_token)", "docstring": "Create a media service in a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\nlocation (str): Azure data center location. E.g. westus.\nstoname (str): Azure storage account name.\nmsname (str): Media service name.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"} {"code": "def select_by_key(self, key):\n self._selected_key = None\n self._selected_item = None\n for item in self.children.values():\n item.attributes['selected'] = False\n if (key in self.children):\n self.children[key].attributes['selected'] = True\n self._selected_key = key\n self._selected_item = self.children[key]", "docstring": "Selects an item by its key.\n\nArgs:\nkey (str): The unique string identifier of the item that have to be selected.", "source": "codesearchnet"} {"code": "def build_kw_dict(kw_list):\n \n kw_dict = OrderedDict()\n sorted_list = sorted(\n kw_list,\n key=lambda x: x.get(\"zahlavi\").encode(\"utf-8\")\n )\n\n for keyword_data in sorted_list:\n if \"zahlavi\" not in keyword_data:\n continue\n\n zahlavi = keyword_data[\"zahlavi\"].encode(\"utf-8\")\n old_record = kw_dict.get(zahlavi)\n\n if not old_record:\n kw_dict[zahlavi] = keyword_data\n continue\n\n key = \"angl_ekvivalent\"\n if not old_record.get(key) and keyword_data.get(key):\n kw_dict[zahlavi] = keyword_data\n continue\n\n key = \"zdroj_angl_ekvivalentu\"\n if not old_record.get(key) and keyword_data.get(key):\n kw_dict[zahlavi] = keyword_data\n continue\n\n if len(str(keyword_data)) > len(str(old_record)):\n kw_dict[zahlavi] = keyword_data\n continue\n\n return kw_dict", "docstring": "Build keyword dictionary from raw keyword data. Ignore invalid or\ninvalidated records.\n\nArgs:\nkw_list (list): List of dicts from :func:`read_kw_file`.\n\nReturns:\nOrderedDict: dictionary with keyword data.", "source": "juraj-google-style"} {"code": "def insert_or_assign(self, keys, values, name=None):\n with ops.name_scope(name, '%s_lookup_table_insert' % self.name, [self.resource_handle, keys, values]):\n keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name='keys')\n values = ops.convert_to_tensor(values, dtype=self._value_dtype, name='values')\n with ops.colocate_with(self.resource_handle):\n op = gen_lookup_ops.lookup_table_insert_v2(self.resource_handle, keys, values)\n return op", "docstring": "Associates `keys` with `values`.\n\nArgs:\nkeys: Keys to insert. Can be a tensor of any shape. Must match the table's\nkey type.\nvalues: Values to be associated with keys. Must be a tensor of the same\nshape as `keys` and match the table's value type.\nname: A name for the operation (optional).\n\nReturns:\nThe created Operation.\n\nRaises:\nTypeError: when `keys` or `values` doesn't match the table data\ntypes.", "source": "github-repos"} {"code": "def FileEntryExistsByPathSpec(self, path_spec):\n \n location = getattr(path_spec, 'location', None)\n\n if location is None:\n return False\n\n is_device = False\n if platform.system() == 'Windows':\n \n \n try:\n is_device = pysmdev.check_device(location)\n except IOError as exception:\n \n \n \n\n \n exception_string = str(exception)\n if not isinstance(exception_string, py2to3.UNICODE_TYPE):\n exception_string = py2to3.UNICODE_TYPE(\n exception_string, errors='replace')\n\n if ' access denied ' in exception_string:\n is_device = True\n\n \n \n return is_device or os.path.exists(location) or os.path.islink(location)", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nbool: True if the file entry exists, false otherwise.", "source": "juraj-google-style"} {"code": "def get_per_replica_batch_size(self, global_batch_size):\n if global_batch_size % self._num_replicas_in_sync != 0:\n raise ValueError('The `global_batch_size` %r is not divisible by `num_replicas_in_sync` %r ' % (global_batch_size, self._num_replicas_in_sync))\n return global_batch_size", "docstring": "Returns the per-replica batch size.\n\nArgs:\nglobal_batch_size: the global batch size which should be divisible by\n`num_replicas_in_sync`.\n\nReturns:\nthe per-replica batch size.\n\nRaises:\nValueError: if `global_batch_size` not divisible by\n`num_replicas_in_sync`.", "source": "github-repos"} {"code": "def from_dict_(cls, conf_dict):\n \n return cls(**{name: Section(**opts)\n for name, opts in conf_dict.items()})", "docstring": "Use a dictionary to create a :class:`ConfigurationManager`.\n\nArgs:\nconf_dict (dict of dict of :class:`ConfOpt`): the first level of\nkeys should be the section names. The second level should be\nthe option names. The values are the options metadata.\n\nReturns:\n:class:`ConfigurationManager`: a configuration manager with the\nrequested sections and options.", "source": "juraj-google-style"} {"code": "def __init__(self, file_path):\n \n self._file_path = os.path.normpath(file_path) if file_path is not None else None\n self._is_header_loaded = False\n self._is_data_loaded = False\n self._is_ip = False \n\n \n self._data = []\n self._metadata = {}\n self._heating_dict = {}\n self._cooling_dict = {}\n self._extremes_dict = {}\n self._extreme_hot_weeks = {}\n self._extreme_cold_weeks = {}\n self._typical_weeks = {}\n self._monthly_ground_temps = {}\n self._is_leap_year = False\n self.daylight_savings_start = '0'\n self.daylight_savings_end = '0'\n self.comments_1 = ''\n self.comments_2 = ''\n\n self._num_of_fields = 35", "docstring": "Initalize an EPW object from from a local .epw file.\n\nArgs:\nfile_path: Local file address to an .epw file.", "source": "juraj-google-style"} {"code": "def apply_fixup_array(bin_view, fx_offset, fx_count, entry_size):\n fx_array = bin_view[fx_offset:(fx_offset + (2 * fx_count))]\n fx_len = (fx_count - 1)\n sector_size = int((entry_size / fx_len))\n index = 1\n position = ((sector_size * index) - 2)\n while (position <= entry_size):\n if (bin_view[position:(position + 2)].tobytes() == fx_array[:2].tobytes()):\n bin_view[position:(position + 2)] = fx_array[(index * 2):((index * 2) + 2)]\n else:\n _MOD_LOGGER.error('Error applying the fixup array')\n raise FixUpError(f'Signature {fx_array[:2].tobytes()} does not match {bin_view[position:(position + 2)].tobytes()} at offset {position}.')\n index += 1\n position = ((sector_size * index) - 2)\n _MOD_LOGGER.info('Fix up array applied successfully.')", "docstring": "This function reads the fixup array and apply the correct values\nto the underlying binary stream. This function changes the bin_view\nin memory.\n\nArgs:\nbin_view (memoryview of bytearray) - The binary stream\nfx_offset (int) - Offset to the fixup array\nfx_count (int) - Number of elements in the fixup array\nentry_size (int) - Size of the MFT entry", "source": "codesearchnet"} {"code": "def get_player(first_name, last_name=None, season=constants.CURRENT_SEASON, only_current=0, just_id=True):\n if (last_name is None):\n name = first_name.lower()\n else:\n name = '{}, {}'.format(last_name, first_name).lower()\n pl = PlayerList(season=season, only_current=only_current).info()\n hdr = 'DISPLAY_LAST_COMMA_FIRST'\n if HAS_PANDAS:\n item = pl[(pl.DISPLAY_LAST_COMMA_FIRST.str.lower() == name)]\n else:\n item = next((plyr for plyr in pl if (str(plyr[hdr]).lower() == name)))\n if (len(item) == 0):\n raise PlayerNotFoundException\n elif just_id:\n return item['PERSON_ID']\n else:\n return item", "docstring": "Calls our PlayerList class to get a full list of players and then returns\njust an id if specified or the full row of player information\n\nArgs:\n:first_name: First name of the player\n:last_name: Last name of the player\n(this is None if the player only has first name [Nene])\n:only_current: Only wants the current list of players\n:just_id: Only wants the id of the player\n\nReturns:\nEither the ID or full row of information of the player inputted\n\nRaises:\n:PlayerNotFoundException::", "source": "codesearchnet"} {"code": "def retrieve_collected_errors():\n serialized_message_list = wrap_converter.wrapped_retrieve_collected_errors()\n return list(map(converter_error_data_pb2.ConverterErrorData.FromString, serialized_message_list))", "docstring": "Returns and clears the list of collected errors in ErrorCollector.\n\nThe RetrieveCollectedErrors function in C++ returns a list of serialized proto\nmessages. This function will convert them to ConverterErrorData instances.\n\nReturns:\nA list of ConverterErrorData.", "source": "github-repos"} {"code": "def stream(self, report, callback=None):\n if (self._push_channel is None):\n return\n self._push_channel.stream(report, callback=callback)", "docstring": "Stream a report asynchronously.\n\nIf no one is listening for the report, the report may be dropped,\notherwise it will be queued for sending\n\nArgs:\nreport (IOTileReport): The report that should be streamed\ncallback (callable): Optional callback to get notified when\nthis report is actually sent.", "source": "codesearchnet"} {"code": "def check_config(config):\n for (section, expected_section_keys) in SECTION_KEYS.items():\n section_content = config.get(section)\n if (not section_content):\n raise ConfigurationError('Config file badly formed! Section {} is missing.'.format(section))\n elif (not _section_is_healthy(section_content, expected_section_keys)):\n raise ConfigurationError('The {} section of the configuration file is badly formed!'.format(section))", "docstring": "Check that all sections of the config contain the keys that they should.\n\nArgs:\nconfig (defaultdict): A defaultdict.\nRaises:\nConfigurationError", "source": "codesearchnet"} {"code": "def sensor(self, name, config=None, inactive_sensor_expiration_time_seconds=sys.maxsize, parents=None):\n sensor = self.get_sensor(name)\n if sensor:\n return sensor\n with self._lock:\n sensor = self.get_sensor(name)\n if (not sensor):\n sensor = Sensor(self, name, parents, (config or self.config), inactive_sensor_expiration_time_seconds)\n self._sensors[name] = sensor\n if parents:\n for parent in parents:\n children = self._children_sensors.get(parent)\n if (not children):\n children = []\n self._children_sensors[parent] = children\n children.append(sensor)\n logger.debug('Added sensor with name %s', name)\n return sensor", "docstring": "Get or create a sensor with the given unique name and zero or\nmore parent sensors. All parent sensors will receive every value\nrecorded with this sensor.\n\nArguments:\nname (str): The name of the sensor\nconfig (MetricConfig, optional): A default configuration to use\nfor this sensor for metrics that don't have their own config\ninactive_sensor_expiration_time_seconds (int, optional):\nIf no value if recorded on the Sensor for this duration of\ntime, it is eligible for removal\nparents (list of Sensor): The parent sensors\n\nReturns:\nSensor: The sensor that is created", "source": "codesearchnet"} {"code": "def get_oauth_access_token(url, client_id, client_secret, token_type='jwt', grant_type='client_credentials', refresh_token=None):\n now = datetime.datetime.utcnow()\n data = {'grant_type': grant_type, 'client_id': client_id, 'client_secret': client_secret, 'token_type': token_type}\n if refresh_token:\n data['refresh_token'] = refresh_token\n else:\n assert (grant_type != 'refresh_token'), 'refresh_token parameter required'\n response = requests.post(url, data=data, headers={'User-Agent': USER_AGENT})\n data = response.json()\n try:\n access_token = data['access_token']\n expires_in = data['expires_in']\n except KeyError:\n raise requests.RequestException(response=response)\n expires_at = (now + datetime.timedelta(seconds=expires_in))\n return (access_token, expires_at)", "docstring": "Retrieves OAuth 2.0 access token using the given grant type.\n\nArgs:\nurl (str): Oauth2 access token endpoint\nclient_id (str): client ID\nclient_secret (str): client secret\nKwargs:\ntoken_type (str): Type of token to return. Options include bearer and jwt.\ngrant_type (str): One of 'client_credentials' or 'refresh_token'\nrefresh_token (str): The previous access token (for grant_type=refresh_token)\n\nReturns:\ntuple: Tuple containing access token string and expiration datetime.", "source": "codesearchnet"} {"code": "def from_string(string_data, file_format='xyz'):\n mols = pb.readstring(str(file_format), str(string_data))\n return BabelMolAdaptor(mols.OBMol)", "docstring": "Uses OpenBabel to read a molecule from a string in all supported\nformats.\n\nArgs:\nstring_data: String containing molecule data.\nfile_format: String specifying any OpenBabel supported formats.\n\nReturns:\nBabelMolAdaptor object", "source": "codesearchnet"} {"code": "def stat(self, entry_path, follow_symlinks=True):\n \n \n try:\n file_object = self.resolve(\n entry_path, follow_symlinks, allow_fd=True)\n self.raise_for_filepath_ending_with_separator(\n entry_path, file_object, follow_symlinks)\n\n return file_object.stat_result.copy()\n except IOError as io_error:\n winerror = (io_error.winerror if hasattr(io_error, 'winerror')\n else io_error.errno)\n self.raise_os_error(io_error.errno, entry_path, winerror=winerror)", "docstring": "Return the os.stat-like tuple for the FakeFile object of entry_path.\n\nArgs:\nentry_path: Path to filesystem object to retrieve.\nfollow_symlinks: If False and entry_path points to a symlink,\nthe link itself is inspected instead of the linked object.\n\nReturns:\nThe FakeStatResult object corresponding to entry_path.\n\nRaises:\nOSError: if the filesystem object doesn't exist.", "source": "juraj-google-style"} {"code": "def read_stream(self, file: IO, data_stream: DataStream) -> Reply:\n \n\n yield from data_stream.read_file(file=file)\n\n reply = yield from self._control_stream.read_reply()\n\n self.raise_if_not_match(\n 'End stream',\n ReplyCodes.closing_data_connection,\n reply\n )\n\n data_stream.close()\n\n return reply", "docstring": "Read from the data stream.\n\nArgs:\nfile: A destination file object or a stream writer.\ndata_stream: The stream of which to read from.\n\nCoroutine.\n\nReturns:\nReply: The final reply.", "source": "juraj-google-style"} {"code": "def scatter_max(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n return self._lazy_read(gen_resource_variable_ops.resource_scatter_max(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Updates this variable with the max of `tf.IndexedSlices` and itself.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to use as an argument of max with this\nvariable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"} {"code": "def send_message(self, message):\n try:\n if (_message_test_port is not None):\n _message_test_port.sent.append(message)\n (yield message.send(self))\n except (WebSocketClosedError, StreamClosedError):\n log.warning('Failed sending message as connection was closed')\n raise gen.Return(None)", "docstring": "Send a Bokeh Server protocol message to the connected client.\n\nArgs:\nmessage (Message) : a message to send", "source": "codesearchnet"} {"code": "def get_own_new(self, node: cfg.CFGNode, value: cfg.Binding) -> tuple[cfg.CFGNode, cfg.Variable | None]:\n node, new = self.ctx.attribute_handler.get_attribute(node, value.data, '__new__')\n if new is None:\n return (node, None)\n if len(new.bindings) == 1:\n f = new.bindings[0].data\n if isinstance(f, _abstract.AMBIGUOUS_OR_EMPTY) or self.ctx.convert.object_type.is_object_new(f):\n return (node, None)\n return (node, new)", "docstring": "Get this value's __new__ method, if it isn't object.__new__.\n\nArgs:\nnode: The current node.\nvalue: A cfg.Binding containing this value.\n\nReturns:\nA tuple of (1) a node and (2) either a cfg.Variable of the special\n__new__ method, or None.", "source": "github-repos"} {"code": "def __normalized_name(self, message_type):\n \n \n \n name = message_type.definition_name()\n\n split_name = re.split(r'[^0-9a-zA-Z]', name)\n normalized = ''.join(\n part[0].upper() + part[1:] for part in split_name if part)\n\n previous = self.__normalized_names.get(normalized)\n if previous:\n if previous != name:\n raise KeyError('Both %s and %s normalize to the same schema name: %s' %\n (name, previous, normalized))\n else:\n self.__normalized_names[normalized] = name\n\n return normalized", "docstring": "Normalized schema name.\n\nGenerate a normalized schema name, taking the class name and stripping out\neverything but alphanumerics, and camel casing the remaining words.\nA normalized schema name is a name that matches [a-zA-Z][a-zA-Z0-9]*\n\nArgs:\nmessage_type: protorpc.message.Message class being parsed.\n\nReturns:\nA string, the normalized schema name.\n\nRaises:\nKeyError: A collision was found between normalized names.", "source": "juraj-google-style"} {"code": "def ContainsAddressStr(self, address):\n for (key, contract) in self._contracts.items():\n if (contract.Address == address):\n return True\n return False", "docstring": "Determine if the wallet contains the address.\n\nArgs:\naddress (str): a string representing the public key.\n\nReturns:\nbool: True, if the address is present in the wallet. False otherwise.", "source": "codesearchnet"} {"code": "def bbox_scaling(bboxes, scale, clip_shape=None):\n if (float(scale) == 1.0):\n scaled_bboxes = bboxes.copy()\n else:\n w = ((bboxes[(..., 2)] - bboxes[(..., 0)]) + 1)\n h = ((bboxes[(..., 3)] - bboxes[(..., 1)]) + 1)\n dw = ((w * (scale - 1)) * 0.5)\n dh = ((h * (scale - 1)) * 0.5)\n scaled_bboxes = (bboxes + np.stack(((- dw), (- dh), dw, dh), axis=(- 1)))\n if (clip_shape is not None):\n return bbox_clip(scaled_bboxes, clip_shape)\n else:\n return scaled_bboxes", "docstring": "Scaling bboxes w.r.t the box center.\n\nArgs:\nbboxes (ndarray): Shape(..., 4).\nscale (float): Scaling factor.\nclip_shape (tuple, optional): If specified, bboxes that exceed the\nboundary will be clipped according to the given shape (h, w).\n\nReturns:\nndarray: Scaled bboxes.", "source": "codesearchnet"} {"code": "def add_object(self, object_path, weights):\n if not isinstance(weights, dict):\n raise ValueError(f\"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}\")\n if '/' in object_path:\n elements = object_path.split('/')\n partial_path = '/'.join(elements[:-1])\n weights_dict = self.weights_dict\n for e in elements[:-1]:\n if e not in weights_dict:\n raise ValueError(f\"Path '{partial_path}' not found in model.\")\n weights_dict = weights_dict[e]\n weights_dict[elements[-1]] = weights\n else:\n self.weights_dict[object_path] = weights", "docstring": "Add a new object to the file (e.g. a layer).\n\nArgs:\nobject_path: String, full path of the\nobject to add (e.g. `\"layers/dense_2\"`).\nweights: Dict mapping weight names to weight\nvalues (arrays),\ne.g. `{\"0\": kernel_value, \"1\": bias_value}`.", "source": "github-repos"} {"code": "def _get_authorization_headers(self) -> dict:\n auth = base64.encodestring(((self.client_id + ':') + self.client_secret).encode('latin-1')).decode('latin-1')\n auth = auth.replace('\\n', '').replace(' ', '')\n auth = 'Basic {}'.format(auth)\n headers = {'Authorization': auth}\n return headers", "docstring": "Constructs and returns the Authorization header for the client app.\n\nArgs:\nNone\n\nReturns:\nheader dict for communicating with the authorization endpoints", "source": "codesearchnet"} {"code": "class TFTopKLogitsWarper(TFLogitsWarper):\n\n def __init__(self, top_k: int, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):\n if not isinstance(top_k, int) or top_k <= 0:\n raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}')\n self.top_k = max(top_k, min_tokens_to_keep)\n self.filter_value = filter_value\n\n def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:\n top_k = min(self.top_k, scores.shape[-1])\n indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:]\n next_scores = tf.where(indices_to_remove, self.filter_value, scores)\n return next_scores", "docstring": "[`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.\n\nArgs:\ntop_k (`int`):\nThe number of highest probability vocabulary tokens to keep for top-k-filtering.\nfilter_value (`float`, *optional*, defaults to -inf):\nAll filtered values will be set to this float value.\nmin_tokens_to_keep (`int`, *optional*, defaults to 1):\nMinimum number of tokens that cannot be filtered.", "source": "github-repos"} {"code": "def add(self, op1, op2, operator_name, hints=None):\n updated_hints = _infer_hints_allowing_override(op1, op2, hints)\n if operator_name is None:\n operator_name = 'Add/' + op1.name + '__' + op2.name + '/'\n scope_name = self.name\n if scope_name.startswith('_'):\n scope_name = scope_name[1:]\n with ops.name_scope(scope_name):\n return self._add(op1, op2, operator_name, updated_hints)", "docstring": "Return new `LinearOperator` acting like `op1 + op2`.\n\nArgs:\nop1: `LinearOperator`\nop2: `LinearOperator`, with `shape` and `dtype` such that adding to\n`op1` is allowed.\noperator_name: `String` name to give to returned `LinearOperator`\nhints: `_Hints` object. Returned `LinearOperator` will be created with\nthese hints.\n\nReturns:\n`LinearOperator`", "source": "github-repos"} {"code": "def tokenize(self, text):\n text = _replace_html_entities(text)\n if self.strip_handles:\n text = remove_handles(text)\n if self.reduce_len:\n text = reduce_lengthening(text)\n safe_text = HANG_RE.sub('\\\\1\\\\1\\\\1', text)\n words = WORD_RE.findall(safe_text)\n if not self.preserve_case:\n words = [x if EMOTICON_RE.search(x) else x.lower() for x in words]\n return words", "docstring": "Args:\ntext: str\n\nReturns: list(str) A tokenized list of strings; concatenating this list returns the original string if\n`preserve_case=False`", "source": "github-repos"} {"code": "def __add__(self, other: Self | PartProcessor) -> _ChainProcessor:\n if isinstance(other, PartProcessor):\n return _ChainProcessor([self.call, other.to_processor().call])\n elif isinstance(other, _ChainProcessor):\n return _ChainProcessor([self.call] + other._processor_list)\n else:\n return _ChainProcessor([self.call, other.call])", "docstring": "Adds `other` to this processor: self + other.\n\nArgs:\nother: a processor to add to `self`.\n\nReturns:\nThe chain of this process with `other`.", "source": "github-repos"} {"code": "def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs):\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`GroupViTConfig`] (or a derived class) from groupvit text model configuration and groupvit\nvision model configuration.\n\nReturns:\n[`GroupViTConfig`]: An instance of a configuration object", "source": "github-repos"} {"code": "def approve(self, access_level=gitlab.DEVELOPER_ACCESS, **kwargs):\n path = ('%s/%s/approve' % (self.manager.path, self.id))\n data = {'access_level': access_level}\n server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)\n self._update_attrs(server_data)", "docstring": "Approve an access request.\n\nArgs:\naccess_level (int): The access level for the user\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server fails to perform the request", "source": "codesearchnet"} {"code": "def _SigSegvHandler(self, signal_number, stack_frame):\n self._OnCriticalError()\n if (self._original_sigsegv_handler is not None):\n signal.signal(signal.SIGSEGV, self._original_sigsegv_handler)\n os.kill(self._pid, signal.SIGSEGV)", "docstring": "Signal handler for the SIGSEGV signal.\n\nArgs:\nsignal_number (int): numeric representation of the signal.\nstack_frame (frame): current stack frame or None.", "source": "codesearchnet"} {"code": "def dequantize_higgs(model, current_key_name=None):\n with torch.no_grad():\n for name, module in model.named_children():\n if current_key_name is None:\n current_key_name = []\n current_key_name.append(name)\n if isinstance(module, HiggsLinear):\n in_features = module.in_features\n out_features = module.out_features\n model._modules[name] = torch.nn.Linear(in_features, out_features, bias=module.bias is not None, device=module.scales.device, dtype=module.scales.dtype)\n model._modules[name].weight.data = module(torch.eye(in_features, device=module.scales.device, dtype=module.scales.dtype)).T.contiguous()\n if len(list(module.children())) > 0:\n _ = dequantize_higgs(module, current_key_name=current_key_name)\n current_key_name.pop(-1)\n return model", "docstring": "Dequantizes the HiggsLinear layers in the given model by replacing them with standard torch.nn.Linear layers.\nArgs:\nmodel (torch.nn.Module): The model containing HiggsLinear layers to be dequantized.\ncurrent_key_name (list, optional): A list to keep track of the current module names during recursion. Defaults to None.\nReturns:\ntorch.nn.Module: The model with HiggsLinear layers replaced by torch.nn.Linear layers.", "source": "github-repos"} {"code": "def md5sum( string ):\n \n h = hashlib.new( 'md5' )\n h.update( string.encode( 'utf-8' ) )\n return h.hexdigest()", "docstring": "Generate the md5 checksum for a string\n\nArgs:\nstring (Str): The string to be checksummed.\n\nReturns:\n(Str): The hex checksum.", "source": "juraj-google-style"} {"code": "def verify_sc_url(url: str) -> bool:\n parsed = urlsplit(url)\n scheme: str = parsed.scheme\n netloc: str = parsed.netloc\n path: str = parsed.path\n try:\n port = parsed.port\n except ValueError:\n port = None\n result = ((scheme.lower() == 'https') and (netloc.lower().split(':')[0] == 's3.amazonaws.com') and path.startswith('/echo.api/') and ((port == 443) or (port is None)))\n return result", "docstring": "Verify signature certificate URL against Amazon Alexa requirements.\n\nEach call of Agent passes incoming utterances batch through skills filter,\nagent skills, skills processor. Batch of dialog IDs can be provided, in\nother case utterances indexes in incoming batch are used as dialog IDs.\n\nArgs:\nurl: Signature certificate URL from SignatureCertChainUrl HTTP header.\n\nReturns:\nresult: True if verification was successful, False if not.", "source": "codesearchnet"} {"code": "def __init__(self, pub_key=None):\n \n if pub_key is not None and type(pub_key) is not EllipticCurve.ECPoint:\n raise Exception(\"Pubkey must be ECPoint Instance\")\n\n self.PublicKey = pub_key", "docstring": "Create an instance.\n\nArgs:\npub_key (EllipticCurve.ECPoint):\n\nRaises:\nException: if `pub_key` is not a valid ECPoint.", "source": "juraj-google-style"} {"code": "def experimental_set_strategy(strategy):\n old_scope = ops.get_default_graph()._global_distribute_strategy_scope\n if old_scope is not None:\n old_scope.__exit__(None, None, None)\n ops.get_default_graph()._global_distribute_strategy_scope = None\n if has_strategy():\n raise RuntimeError('Must not be called inside a `tf.distribute.Strategy` scope.')\n if strategy is not None:\n new_scope = strategy.scope()\n new_scope.__enter__()\n ops.get_default_graph()._global_distribute_strategy_scope = new_scope", "docstring": "Set a `tf.distribute.Strategy` as current without `with strategy.scope()`.\n\n```\ntf.distribute.experimental_set_strategy(strategy1)\nf()\ntf.distribute.experimental_set_strategy(strategy2)\ng()\ntf.distribute.experimental_set_strategy(None)\nh()\n```\n\nis equivalent to:\n\n```\nwith strategy1.scope():\nf()\nwith strategy2.scope():\ng()\nh()\n```\n\nIn general, you should use the `with strategy.scope():` API, but this\nalternative may be convenient in notebooks where you would have to put\neach cell in a `with strategy.scope():` block.\n\nNote: This should only be called outside of any TensorFlow scope to\navoid improper nesting.\n\nArgs:\nstrategy: A `tf.distribute.Strategy` object or None.\n\nRaises:\nRuntimeError: If called inside a `with strategy.scope():`.", "source": "github-repos"} {"code": "def as_dataframe(self, pattern='*', max_rows=None):\n \n data = []\n for i, metric in enumerate(self.list(pattern)):\n if max_rows is not None and i >= max_rows:\n break\n labels = ', '. join([l.key for l in metric.labels])\n data.append([\n metric.type, metric.display_name, metric.metric_kind,\n metric.value_type, metric.unit, labels])\n\n return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)", "docstring": "Creates a pandas dataframe from the descriptors that match the filters.\n\nArgs:\npattern: An optional pattern to further filter the descriptors. This can\ninclude Unix shell-style wildcards. E.g. ``\"compute*\"``,\n``\"*/cpu/load_??m\"``.\nmax_rows: The maximum number of descriptors to return. If None, return\nall.\n\nReturns:\nA pandas dataframe containing matching metric descriptors.", "source": "juraj-google-style"} {"code": "def bulk_restore(self, filename_tensor, saveables, preferred_shard, restore_sequentially):\n del restore_sequentially\n all_tensors = []\n for saveable in saveables:\n if saveable.device:\n device = saveable_object_util.set_cpu0(saveable.device)\n else:\n device = None\n with ops.device(device):\n all_tensors.extend(self.restore_op(filename_tensor, saveable, preferred_shard))\n return all_tensors", "docstring": "Restore all tensors contained in saveables.\n\nBy default, this issues separate calls to `restore_op` for each saveable.\nSubclasses may override to load multiple saveables in a single call.\n\nArgs:\nfilename_tensor: String Tensor.\nsaveables: List of BaseSaverBuilder.SaveableObject objects.\npreferred_shard: Int. Shard to open first when loading a sharded file.\nrestore_sequentially: Unused. Bool. If true, each restore is sequential.\n\nReturns:\nA list of Tensors resulting from reading 'saveable' from\n'filename'.", "source": "github-repos"} {"code": "def _remove_tree(self, tree, parent=None):\n \n \n for sub_tree in tree.sub_trees:\n self._remove_tree(sub_tree, parent=tree)\n\n \n for index in tree.indexes:\n if not getattr(tree, index):\n continue\n\n self._remove_from(\n getattr(self, index + \"_db\"),\n getattr(tree, index),\n tree,\n )\n\n if parent:\n self._remove_from(self.parent_db, tree.path, parent)\n\n self.zeo.pack()", "docstring": "Really remove the tree identified by `tree` instance from all indexes\nfrom database.\n\nArgs:\ntree (obj): :class:`.Tree` instance.\nparent (obj, default None): Reference to parent.", "source": "juraj-google-style"} {"code": "def extract_random_video_patch(videos, num_frames=(- 1)):\n if (num_frames == (- 1)):\n return videos\n (batch_size, num_total_frames, h, w, c) = common_layers.shape_list(videos)\n if (num_total_frames < num_frames):\n raise ValueError(('Expected num_frames <= %d, got %d' % (num_total_frames, num_frames)))\n frame_start = tf.random_uniform(shape=(batch_size,), minval=0, maxval=((num_total_frames - num_frames) + 1), dtype=tf.int32)\n range_inds = tf.expand_dims(tf.range(num_frames), axis=0)\n frame_inds = (range_inds + tf.expand_dims(frame_start, axis=1))\n frame_inds = tf.reshape(frame_inds, [(- 1)])\n batch_inds = tf.expand_dims(tf.range(batch_size), axis=1)\n batch_inds = tf.tile(batch_inds, [1, num_frames])\n batch_inds = tf.reshape(batch_inds, [(- 1)])\n gather_inds = tf.stack((batch_inds, frame_inds), axis=1)\n video_patches = tf.gather_nd(videos, gather_inds)\n return tf.reshape(video_patches, (batch_size, num_frames, h, w, c))", "docstring": "For every video, extract a random consecutive patch of num_frames.\n\nArgs:\nvideos: 5-D Tensor, (NTHWC)\nnum_frames: Integer, if -1 then the entire video is returned.\nReturns:\nvideo_patch: 5-D Tensor, (NTHWC) with T = num_frames.\nRaises:\nValueError: If num_frames is greater than the number of total frames in\nthe video.", "source": "codesearchnet"} {"code": "def _expression_to_sql(expression, node, context):\n \n _expression_transformers = {\n expressions.LocalField: _transform_local_field_to_expression,\n expressions.Variable: _transform_variable_to_expression,\n expressions.Literal: _transform_literal_to_expression,\n expressions.BinaryComposition: _transform_binary_composition_to_expression,\n }\n expression_type = type(expression)\n if expression_type not in _expression_transformers:\n raise NotImplementedError(\n u'Unsupported compiler expression \"{}\" of type \"{}\" cannot be converted to SQL '\n u'expression.'.format(expression, type(expression)))\n return _expression_transformers[expression_type](expression, node, context)", "docstring": "Recursively transform a Filter block predicate to its SQLAlchemy expression representation.\n\nArgs:\nexpression: expression, the compiler expression to transform.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy Expression equivalent to the passed compiler expression.", "source": "juraj-google-style"} {"code": "def lookup_prefix(self, prefix, n):\n commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]\n return commands[-n:]", "docstring": "Look up the n most recent commands that starts with prefix.\n\nArgs:\nprefix: The prefix to lookup.\nn: Number of most recent commands to look up.\n\nReturns:\nA list of n most recent commands that have the specified prefix, or all\navailable most recent commands that have the prefix, if n exceeds the\nnumber of history commands with the prefix.", "source": "github-repos"} {"code": "def ping(self, endpoint=''):\n \n r = requests.get(self.url() + \"/\" + endpoint)\n return r.status_code", "docstring": "Ping the server to make sure that you can access the base URL.\n\nArguments:\nNone\nReturns:\n`boolean` Successful access of server (or status code)", "source": "juraj-google-style"} {"code": "def Gamma(cls,\n shape: 'TensorFluent',\n scale: 'TensorFluent',\n batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:\n \n if shape.scope != scale.scope:\n raise ValueError('Gamma distribution: parameters must have same scope!')\n concentration = shape.tensor\n rate = 1 / scale.tensor\n dist = tf.distributions.Gamma(concentration, rate)\n batch = shape.batch or scale.batch\n if not batch and batch_size is not None:\n t = dist.sample(batch_size)\n batch = True\n else:\n t = dist.sample()\n scope = shape.scope.as_list()\n return (dist, TensorFluent(t, scope, batch=batch))", "docstring": "Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.\n\nArgs:\nshape: The shape parameter of the Gamma distribution.\nscale: The scale parameter of the Gamma distribution.\nbatch_size: The size of the batch (optional).\n\nReturns:\nThe Gamma distribution and a TensorFluent sample drawn from the distribution.\n\nRaises:\nValueError: If parameters do not have the same scope.", "source": "juraj-google-style"} {"code": "def _fluent_params(self, fluents, ordering) -> FluentParamsList:\n variables = []\n for fluent_id in ordering:\n fluent = fluents[fluent_id]\n param_types = fluent.param_types\n objects = ()\n names = []\n if (param_types is None):\n names = [fluent.name]\n else:\n objects = tuple((self.object_table[ptype]['objects'] for ptype in param_types))\n for values in itertools.product(*objects):\n values = ','.join(values)\n var_name = '{}({})'.format(fluent.name, values)\n names.append(var_name)\n variables.append((fluent_id, names))\n return tuple(variables)", "docstring": "Returns the instantiated `fluents` for the given `ordering`.\n\nFor each fluent in `fluents`, it instantiates each parameter\ntype w.r.t. the contents of the object table.\n\nReturns:\nSequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name\nand a list of instantiated fluents represented as strings.", "source": "codesearchnet"} {"code": "def rm(path):\n \n if path and os.path.exists(path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)", "docstring": "Equivalent to rm -rf.\n\nMake sure ``path`` doesn't exist after this call. If it's a dir,\nshutil.rmtree(); if it's a file, os.remove(); if it doesn't exist,\nignore.\n\nArgs:\npath (str): the path to nuke.", "source": "juraj-google-style"} {"code": "def check_local_install(ctx, version, ext, server='local'):\n here = Path(ctx.releaser.here).resolve()\n dist_dir = (here / 'dist')\n all_files = list(dist_dir.glob('*.{}'.format(ext)))\n the_file = all_files[0]\n for f in all_files[1:]:\n if (f.stat().st_mtime > the_file.stat().st_mtime):\n the_file = f\n environment = 'env-{}-{}-{}'.format(version, ext, server)\n if (server == 'local'):\n pass\n else:\n print('** Uploading to server **')\n cmd = 'twine upload {}'.format(the_file)\n if (server != 'pypi'):\n cmd = (cmd + ' -r {}'.format(server))\n result = invoke.run(cmd, warn=True)\n if result.failed:\n print(textwrap.fill(\"[{}ERROR{}] Something broke trying to upload your package. This will be the case if you have already uploaded it before. To upload again, use a different version number (or a different build by including a '+' suffix to your version number).\".format(ERROR_COLOR, RESET_COLOR), width=(text.get_terminal_size().columns - 1), subsequent_indent=(' ' * 8)))\n if ((here / 'env') / environment).exists():\n shutil.rmtree((('env' + os.sep) + environment))\n invoke.run('python -m venv env{}{}'.format(os.sep, environment))\n other_dependencies(ctx, server, environment)\n if (server == 'local'):\n result = invoke.run('env{0}{1}{0}Scripts{0}pip{2} install {3} --no-cache'.format(os.sep, environment, '.exe', the_file), hide=True)\n else:\n result = invoke.run('env{0}{1}{0}Scripts{0}pip{2} install -i {3} {4}=={5} --no-cache'.format(os.sep, environment, '.exe', server_url(server), ctx.releaser.module_name, version), hide=True)\n if result.failed:\n print('[{}ERROR{}] Something broke trying to install your package.'.format(ERROR_COLOR, RESET_COLOR))\n print(result.stderr)\n sys.exit(1)\n print('** Test version of installed package **')\n result = invoke.run('env{0}{1}{0}Scripts{0}python{2} -c exec()'.format(os.sep, environment, '.exe', ctx.releaser.module_name.strip()))\n test_version = result.stdout.strip()\n if (Version(test_version) == version):\n results = '{}{} install {} works!{}'.format(GOOD_COLOR, server, ext, RESET_COLOR)\n else:\n results = '{}{} install {} broken{}'.format(ERROR_COLOR, server, ext, RESET_COLOR)\n print(results)\n return results", "docstring": "Upload and install works?\n\nUploads a distribution to PyPI, and then tests to see if I can download and\ninstall it.\n\nReturns:\nstr: string summazing operation", "source": "codesearchnet"} {"code": "def run(self, data, max_epochs=1):\n self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={})\n try:\n self._logger.info('Engine run starting with max_epochs={}.'.format(max_epochs))\n start_time = time.time()\n self._fire_event(Events.STARTED)\n while ((self.state.epoch < max_epochs) and (not self.should_terminate)):\n self.state.epoch += 1\n self._fire_event(Events.EPOCH_STARTED)\n (hours, mins, secs) = self._run_once_on_dataset()\n self._logger.info('Epoch[%s] Complete. Time taken: %02d:%02d:%02d', self.state.epoch, hours, mins, secs)\n if self.should_terminate:\n break\n self._fire_event(Events.EPOCH_COMPLETED)\n self._fire_event(Events.COMPLETED)\n time_taken = (time.time() - start_time)\n (hours, mins, secs) = _to_hours_mins_secs(time_taken)\n self._logger.info(('Engine run complete. Time taken %02d:%02d:%02d' % (hours, mins, secs)))\n except BaseException as e:\n self._logger.error('Engine run is terminating due to exception: %s.', str(e))\n self._handle_exception(e)\n return self.state", "docstring": "Runs the process_function over the passed data.\n\nArgs:\ndata (Iterable): Collection of batches allowing repeated iteration (e.g., list or `DataLoader`).\nmax_epochs (int, optional): max epochs to run for (default: 1).\n\nReturns:\nState: output state.", "source": "codesearchnet"} {"code": "def __init__(self, keras_model, trackable_obj=None):\n super(TFLiteKerasModelConverterV2, self).__init__()\n self._keras_model = keras_model\n self._trackable_obj = trackable_obj\n self.experimental_lower_to_saved_model = True", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\nkeras_model: tf.Keras.Model.\ntrackable_obj: tf.AutoTrackable object associated with `funcs`. A\nreference to this object needs to be maintained so that Variables do not\nget garbage collected since functions have a weak reference to\nVariables. This is only required when the tf.AutoTrackable object is not\nmaintained by the user (e.g. `from_saved_model`).", "source": "github-repos"} {"code": "def git_branch_delete(branch_name):\n if (branch_name not in git.protected_branches()):\n log.info('Deleting branch <33>{}', branch_name)\n shell.run('git branch -d {}'.format(branch_name))", "docstring": "Delete the given branch.\n\nArgs:\nbranch_name (str):\nName of the branch to delete.", "source": "codesearchnet"} {"code": "def __add__(self, other: 'TensorFluent') -> 'TensorFluent':\n \n return self._binary_op(self, other, tf.add, tf.float32)", "docstring": "Returns a TensorFluent for the addition arithmetic operator.\n\nArgs:\nself: The first operand.\nother: The second operand.\n\nReturns:\nA TensorFluent wrapping the operator's output.", "source": "juraj-google-style"} {"code": "def _UpdateUsers(self, update_users):\n \n for user, ssh_keys in update_users.items():\n if not user or user in self.invalid_users:\n continue\n configured_keys = self.user_ssh_keys.get(user, [])\n if set(ssh_keys) != set(configured_keys):\n if not self.utils.UpdateUser(user, ssh_keys):\n self.invalid_users.add(user)\n else:\n self.user_ssh_keys[user] = ssh_keys[:]", "docstring": "Provision and update Linux user accounts based on account metadata.\n\nArgs:\nupdate_users: dict, authorized users mapped to their public SSH keys.", "source": "juraj-google-style"} {"code": "def is_on_curve(self, point):\n \n X, Y = point.X, point.Y\n return (\n pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b\n ) % self.P == 0", "docstring": "Checks whether a point is on the curve.\n\nArgs:\npoint (AffinePoint): Point to be checked.\n\nReturns:\nbool: True if point is on the curve, False otherwise.", "source": "juraj-google-style"} {"code": "async def tag(self, name: str, repo: str, *, tag: str=None) -> bool:\n params = {'repo': repo}\n if tag:\n params['tag'] = tag\n (await self.docker._query('images/{name}/tag'.format(name=name), 'POST', params=params, headers={'content-type': 'application/json'}))\n return True", "docstring": "Tag the given image so that it becomes part of a repository.\n\nArgs:\nrepo: the repository to tag in\ntag: the name for the new tag", "source": "codesearchnet"} {"code": "def GetDefaultToken(token):\n \n if token is None:\n token = default_token\n\n if not isinstance(token, access_control.ACLToken):\n raise access_control.UnauthorizedAccess(\n \"Token is not properly specified. It should be an \"\n \"instance of grr.lib.access_control.ACLToken()\")\n\n return token", "docstring": "Returns the provided token or the default token.\n\nArgs:\ntoken: A token or None.\n\nRaises:\naccess_control.UnauthorizedAccess: no token was provided.", "source": "juraj-google-style"} {"code": "def is30(msg):\n \n\n if allzeros(msg):\n return False\n\n d = hex2bin(data(msg))\n\n if d[0:8] != '00110000':\n return False\n\n \n if d[28:30] == '11':\n return False\n\n \n if bin2int(d[15:22]) >= 48:\n return False\n\n return True", "docstring": "Check if a message is likely to be BDS code 2,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "juraj-google-style"} {"code": "def get_value_from_environment(\n section_name,\n key_name,\n envname_pad=ENVNAME_PAD,\n logger=logging.getLogger('ProsperCommon'),\n):\n \n var_name = '{pad}_{section}__{key}'.format(\n pad=envname_pad,\n section=section_name,\n key=key_name\n )\n\n logger.debug('var_name=%s', var_name)\n value = getenv(var_name)\n logger.debug('env value=%s', value)\n\n return value", "docstring": "check environment for key/value pair\n\nArgs:\nsection_name (str): section name\nkey_name (str): key to look up\nenvname_pad (str): namespace padding\nlogger (:obj:`logging.logger`): logging handle\n\nReturns:\nstr: value in environment", "source": "juraj-google-style"} {"code": "def filter_publication(publication, cache=_CACHE):\n if (cache is None):\n cache = load_cache()\n if (publication._get_hash() in cache):\n return None\n cache.update([publication._get_hash()])\n save_cache(cache)\n return publication", "docstring": "Deduplication function, which compares `publication` with samples stored in\n`cache`. If the match NOT is found, `publication` is returned, else None.\n\nArgs:\npublication (obj): :class:`.Publication` instance.\ncache (obj): Cache which is used for lookups.\n\nReturns:\nobj/None: Depends whether the object is found in cache or not.", "source": "codesearchnet"} {"code": "def get_forced_variation(self, experiment_key, user_id):\n \n\n if user_id not in self.forced_variation_map:\n self.logger.debug('User \"%s\" is not in the forced variation map.' % user_id)\n return None\n\n experiment = self.get_experiment_from_key(experiment_key)\n if not experiment:\n \n return None\n\n experiment_to_variation_map = self.forced_variation_map.get(user_id)\n\n if not experiment_to_variation_map:\n self.logger.debug('No experiment \"%s\" mapped to user \"%s\" in the forced variation map.' % (\n experiment_key,\n user_id\n ))\n return None\n\n variation_id = experiment_to_variation_map.get(experiment.id)\n if variation_id is None:\n self.logger.debug(\n 'No variation mapped to experiment \"%s\" in the forced variation map.' % experiment_key\n )\n return None\n\n variation = self.get_variation_from_id(experiment_key, variation_id)\n\n self.logger.debug('Variation \"%s\" is mapped to experiment \"%s\" and user \"%s\" in the forced variation map' % (\n variation.key,\n experiment_key,\n user_id\n ))\n return variation", "docstring": "Gets the forced variation key for the given user and experiment.\n\nArgs:\nexperiment_key: Key for experiment.\nuser_id: The user ID.\n\nReturns:\nThe variation which the given user and experiment should be forced into.", "source": "juraj-google-style"} {"code": "def _verify_required_claims_exist(jwt_claims):\n for claim_name in [u'aud', u'exp', u'iss', u'sub']:\n if (claim_name not in jwt_claims):\n raise suppliers.UnauthenticatedException((u'Missing \"%s\" claim' % claim_name))", "docstring": "Verifies that the required claims exist.\n\nArgs:\njwt_claims: the JWT claims to be verified.\n\nRaises:\nUnauthenticatedException: if some claim doesn't exist.", "source": "codesearchnet"} {"code": "def get(cls, option, default_value=None):\n \n config = cls.__get_instance()\n for name in option.split(':'):\n if not name:\n raise Exception('Incorrect value in path (maybe double `:` or empty path)')\n if name not in config:\n return default_value\n config = config[name]\n return deepcopy(config)", "docstring": "Return value of given option.\n\nIf option isn't found - return default_value (None by default).\n\nArgs:\n- option: string with path to option with `:` separator", "source": "juraj-google-style"} {"code": "def create_iam_resources(env='dev', app='', **_):\n \n session = boto3.session.Session(profile_name=env)\n client = session.client('iam')\n\n app_properties = get_properties(env='pipeline')\n\n generated = get_details(env=env, app=app)\n generated_iam = generated.iam()\n app_details = collections.namedtuple('AppDetails', generated_iam.keys())\n details = app_details(**generated_iam)\n\n LOG.debug('Application details: %s', details)\n\n deployment_type = app_properties['type']\n role_trust_template = get_template(\n 'infrastructure/iam/trust/{0}_role.json.j2'.format(deployment_type), formats=generated)\n\n resource_action(\n client,\n action='create_role',\n log_format='Created Role: %(RoleName)s',\n RoleName=details.role,\n AssumeRolePolicyDocument=role_trust_template)\n resource_action(\n client,\n action='create_instance_profile',\n log_format='Created Instance Profile: %(InstanceProfileName)s',\n InstanceProfileName=details.profile)\n attach_profile_to_role(client, role_name=details.role, profile_name=details.profile)\n\n iam_policy = construct_policy(app=app, group=details.group, env=env, pipeline_settings=app_properties)\n if iam_policy:\n resource_action(\n client,\n action='put_role_policy',\n log_format='Added IAM Policy: %(PolicyName)s',\n RoleName=details.role,\n PolicyName=details.policy,\n PolicyDocument=iam_policy)\n\n resource_action(client, action='create_user', log_format='Created User: %(UserName)s', UserName=details.user)\n resource_action(client, action='create_group', log_format='Created Group: %(GroupName)s', GroupName=details.group)\n resource_action(\n client,\n action='add_user_to_group',\n log_format='Added User to Group: %(UserName)s -> %(GroupName)s',\n GroupName=details.group,\n UserName=details.user)\n\n return True", "docstring": "Create the IAM Resources for the application.\n\nArgs:\nenv (str): Deployment environment/account, i.e. dev, stage, prod.\napp (str): Spinnaker Application name.\n\nReturns:\nTrue upon successful completion.", "source": "juraj-google-style"} {"code": "def paginate(db_query, items_per_page, offset=0, start_page=1):\n \n return Paginator(db_query, items_per_page, offset=offset, start_page=start_page)", "docstring": "Instantiates a Paginator instance for database queries.\n\nArgs:\ndb_query: The SQLAlchemy database query to paginate.\nitems_per_page: The desired number of items per page.\noffset: The number of items to skip when paginating.\nstart_page: The number of the first page when reporting on page numbers.", "source": "juraj-google-style"} {"code": "def __init__(self, conf):\n super(S3FilesSource, self).__init__(conf)\n self._SetDefaults(conf)\n self.s3_client = None", "docstring": "Initialise the S3FilesSource object.\n\nArgs:\nconf: A dictionary of key/value pairs.\n\nRaises:\nRuntimeError: object wasn't initialised with a dict", "source": "github-repos"} {"code": "def assert_keys_exist(self, caller, *keys):\n \n assert keys, (\"*keys parameter must be specified.\")\n for key in keys:\n self.assert_key_exists(key, caller)", "docstring": "Assert that context contains keys.\n\nArgs:\nkeys: validates that these keys exists in context\ncaller: string. calling function or module name - this used to\nconstruct error messages\n\nRaises:\nKeyNotInContextError: When key doesn't exist in context.", "source": "juraj-google-style"} {"code": "def message(self, tree, spins, subtheta, auxvars):\n energy_sources = set()\n for (v, children) in tree.items():\n aux = auxvars[v]\n assert all(((u in spins) for u in self._ancestors[v]))\n\n def energy_contributions():\n (yield subtheta.linear[v])\n for (u, bias) in subtheta.adj[v].items():\n if (u in spins):\n (yield SpinTimes(spins[u], bias))\n plus_energy = Plus(energy_contributions())\n minus_energy = SpinTimes((- 1), plus_energy)\n if children:\n spins[v] = 1\n plus_energy = Plus(plus_energy, self.message(children, spins, subtheta, auxvars))\n spins[v] = (- 1)\n minus_energy = Plus(minus_energy, self.message(children, spins, subtheta, auxvars))\n del spins[v]\n m = FreshSymbol(REAL)\n ancestor_aux = {(auxvars[u] if (spins[u] > 0) else Not(auxvars[u])) for u in self._ancestors[v]}\n plus_aux = And({aux}.union(ancestor_aux))\n minus_aux = And({Not(aux)}.union(ancestor_aux))\n self.assertions.update({LE(m, plus_energy), LE(m, minus_energy), Implies(plus_aux, GE(m, plus_energy)), Implies(minus_aux, GE(m, minus_energy))})\n energy_sources.add(m)\n return Plus(energy_sources)", "docstring": "Determine the energy of the elimination tree.\n\nArgs:\ntree (dict): The current elimination tree\nspins (dict): The current fixed spins\nsubtheta (dict): Theta with spins fixed.\nauxvars (dict): The auxiliary variables for the given spins.\n\nReturns:\nThe formula for the energy of the tree.", "source": "codesearchnet"} {"code": "def __init__(self, dhclient_script=None, dhcp_command=None, debug=False):\n \n self.dhclient_script = dhclient_script or '/sbin/google-dhclient-script'\n self.dhcp_command = dhcp_command\n facility = logging.handlers.SysLogHandler.LOG_DAEMON\n self.logger = logger.Logger(\n name='network-setup', debug=debug, facility=facility)\n self.distro_utils = distro_utils.Utils(debug=debug)", "docstring": "Constructor.\n\nArgs:\ndhclient_script: string, the path to a dhclient script used by dhclient.\ndhcp_command: string, a command to enable Ethernet interfaces.\ndebug: bool, True if debug output should write to the console.", "source": "juraj-google-style"} {"code": "def get_open_func(fn, return_fmt=False):\n bgzip = None\n with open(fn, 'rb') as i_file:\n bgzip = (i_file.read(3) == b'\\x1f\\x8b\\x08')\n if (bgzip and (not HAS_BIOPYTHON)):\n raise ValueError('needs BioPython to index a bgzip file')\n open_func = open\n if bgzip:\n open_func = BgzfReader\n try:\n with open_func(fn, 'r') as i_file:\n if bgzip:\n if (not i_file.seekable()):\n raise ValueError\n pass\n except ValueError:\n raise ValueError('{}: use bgzip for compression...'.format(fn))\n if return_fmt:\n return (bgzip, open_func)\n return open_func", "docstring": "Get the opening function.\n\nArgs:\nfn (str): the name of the file.\nreturn_fmt (bool): if the file format needs to be returned.\n\nReturns:\ntuple: either a tuple containing two elements: a boolean telling if the\nformat is bgzip, and the opening function.", "source": "codesearchnet"} {"code": "def make(world_name, gl_version=GL_VERSION.OPENGL4, window_res=None, cam_res=None, verbose=False):\n holodeck_worlds = _get_worlds_map()\n if (world_name not in holodeck_worlds):\n raise HolodeckException('Invalid World Name')\n param_dict = copy(holodeck_worlds[world_name])\n param_dict['start_world'] = True\n param_dict['uuid'] = str(uuid.uuid4())\n param_dict['gl_version'] = gl_version\n param_dict['verbose'] = verbose\n if (window_res is not None):\n param_dict['window_width'] = window_res[0]\n param_dict['window_height'] = window_res[1]\n if (cam_res is not None):\n param_dict['camera_width'] = cam_res[0]\n param_dict['camera_height'] = cam_res[1]\n return HolodeckEnvironment(**param_dict)", "docstring": "Creates a holodeck environment using the supplied world name.\n\nArgs:\nworld_name (str): The name of the world to load as an environment. Must match the name of a world in an\ninstalled package.\ngl_version (int, optional): The OpenGL version to use (Linux only). Defaults to GL_VERSION.OPENGL4.\nwindow_res ((int, int), optional): The resolution to load the game window at. Defaults to (512, 512).\ncam_res ((int, int), optional): The resolution to load the pixel camera sensors at. Defaults to (256, 256).\nverbose (bool): Whether to run in verbose mode. Defaults to False.\n\nReturns:\nHolodeckEnvironment: A holodeck environment instantiated with all the settings necessary for the specified\nworld, and other supplied arguments.", "source": "codesearchnet"} {"code": "def terminate(self, nowait=False):\n logger.debug('Acquiring lock for service termination')\n with self.lock:\n logger.debug('Terminating service')\n if (not self.listener):\n logger.warning('Service already stopped.')\n return\n self.listener.stop(nowait)\n try:\n if (not nowait):\n self._post_log_batch()\n except Exception:\n if self.error_handler:\n self.error_handler(sys.exc_info())\n else:\n raise\n finally:\n self.queue = None\n self.listener = None", "docstring": "Finalize and stop service\n\nArgs:\nnowait: set to True to terminate immediately and skip processing\nmessages still in the queue", "source": "codesearchnet"} {"code": "def safejoin(base, *elements):\n base = os.path.abspath(base)\n path = os.path.join(base, *elements)\n path = os.path.normpath(path)\n if (not path_is_inside(path, base)):\n raise ValueError('target path is outside of the base path')\n return path", "docstring": "Safely joins paths together.\n\nThe result will always be a subdirectory under `base`, otherwise ValueError\nis raised.\n\nArgs:\nbase (str): base path\nelements (list of strings): path elements to join to base\n\nReturns:\nelements joined to base", "source": "codesearchnet"} {"code": "def remove_perm(self, subj_str, perm_str):\n \n self._assert_valid_permission(perm_str)\n for perm_str in self._equal_or_higher_perm(perm_str):\n self._perm_dict.setdefault(perm_str, set()).discard(subj_str)", "docstring": "Remove permission from a subject.\n\nArgs:\nsubj_str : str\nSubject for which to remove permission(s)\n\nperm_str : str\nPermission to remove. Implicitly removes all higher permissions. E.g., ``write``\nwill also remove ``changePermission`` if previously granted.", "source": "juraj-google-style"} {"code": "def _compute_default_rope_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> tuple['torch.Tensor', float]:\n if config is not None and len(rope_kwargs) > 0:\n raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_default_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}')\n if len(rope_kwargs) > 0:\n base = rope_kwargs['base']\n dim = rope_kwargs['dim']\n elif config is not None:\n base = config.rope_theta\n partial_rotary_factor = config.partial_rotary_factor if hasattr(config, 'partial_rotary_factor') else 1.0\n head_dim = getattr(config, 'head_dim', None) or config.hidden_size \n dim = int(head_dim * partial_rotary_factor)\n attention_factor = 1.0\n inv_freq = 1.0 / base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)\n return (inv_freq, attention_factor)", "docstring": "Computes the inverse frequencies according to the original RoPE implementation\nArgs:\nconfig ([`~transformers.PretrainedConfig`]):\nThe model configuration.\ndevice (`torch.device`):\nThe device to use for initialization of the inverse frequencies.\nseq_len (`int`, *optional*):\nThe current sequence length. Unused for this type of RoPE.\nrope_kwargs (`Dict`, *optional*):\nBC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\nReturns:\nTuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\npost-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).", "source": "github-repos"} {"code": "def from_config(cls, config):\n return cls(**config)", "docstring": "Instantiates an initializer from a configuration dictionary.\n\nExample:\n\n```python\ninitializer = RandomUniform(-1, 1)\nconfig = initializer.get_config()\ninitializer = RandomUniform.from_config(config)\n```\n\nArgs:\nconfig: A Python dictionary. It will typically be the output of\n`get_config`.\n\nReturns:\nAn Initializer instance.", "source": "github-repos"} {"code": "def create_handler(Model, name=None, **kwds):\n \n async def action_handler(service, action_type, payload, props, notify=True, **kwds):\n \n if action_type == get_crud_action('create', name or Model):\n \n try:\n \n message_props = {}\n \n if 'correlation_id' in props:\n \n message_props['correlation_id'] = props['correlation_id']\n\n \n for requirement in Model.required_fields():\n\n \n field_name = requirement.name\n \n \n if not field_name in payload and field_name != 'id':\n \n raise ValueError(\n \"Required field not found in payload: %s\" %field_name\n )\n\n \n new_model = Model(**payload)\n\n \n new_model.save()\n\n \n if notify:\n \n await service.event_broker.send(\n payload=ModelSerializer().serialize(new_model),\n action_type=change_action_status(action_type, success_status()),\n **message_props\n )\n\n \n except Exception as err:\n \n if notify:\n \n await service.event_broker.send(\n payload=str(err),\n action_type=change_action_status(action_type, error_status()),\n **message_props\n )\n \n else:\n \n raise err\n\n\n\n \n return action_handler", "docstring": "This factory returns an action handler that creates a new instance of\nthe specified model when a create action is recieved, assuming the\naction follows nautilus convetions.\n\nArgs:\nModel (nautilus.BaseModel): The model to create when the action\nreceived.\n\nReturns:\nfunction(action_type, payload): The action handler for this model", "source": "juraj-google-style"} {"code": "def compute_sub_structure(self, sub_structure, tol=1e-3):\n \n total_energy_matrix = self.total_energy_matrix.copy()\n\n def find_match(site):\n for test_site in sub_structure:\n frac_diff = abs(np.array(site.frac_coords)\n - np.array(test_site.frac_coords)) % 1\n frac_diff = [abs(a) < tol or abs(a) > 1 - tol\n for a in frac_diff]\n if all(frac_diff):\n return test_site\n return None\n\n matches = []\n for i, site in enumerate(self._s):\n matching_site = find_match(site)\n if matching_site:\n new_charge = compute_average_oxidation_state(matching_site)\n old_charge = self._oxi_states[i]\n scaling_factor = new_charge / old_charge\n matches.append(matching_site)\n else:\n scaling_factor = 0\n total_energy_matrix[i, :] *= scaling_factor\n total_energy_matrix[:, i] *= scaling_factor\n\n if len(matches) != len(sub_structure):\n output = [\"Missing sites.\"]\n for site in sub_structure:\n if site not in matches:\n output.append(\"unmatched = {}\".format(site))\n raise ValueError(\"\\n\".join(output))\n\n return sum(sum(total_energy_matrix))", "docstring": "Gives total ewald energy for an sub structure in the same\nlattice. The sub_structure must be a subset of the original\nstructure, with possible different charges.\n\nArgs:\nsubstructure (Structure): Substructure to compute Ewald sum for.\ntol (float): Tolerance for site matching in fractional coordinates.\n\nReturns:\nEwald sum of substructure.", "source": "juraj-google-style"} {"code": "def get_backup(self, id_or_uri):\n uri = ((self.BACKUPS_PATH + '/') + extract_id_from_uri(id_or_uri))\n return self._client.get(id_or_uri=uri)", "docstring": "Get the details for the backup from an Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\n\nReturns:\nDict: Backup for an Artifacts Bundle.", "source": "codesearchnet"} {"code": "def _create_min_max_boundaries(\n max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE):\n \n \n \n bucket_boundaries = []\n x = min_boundary\n while x < max_length:\n bucket_boundaries.append(x)\n x = max(x + 1, int(x * boundary_scale))\n\n \n buckets_min = [0] + bucket_boundaries\n buckets_max = bucket_boundaries + [max_length + 1]\n return buckets_min, buckets_max", "docstring": "Create min and max boundary lists up to max_length.\n\nFor example, when max_length=24, min_boundary=4 and boundary_scale=2, the\nreturned values will be:\nbuckets_min = [0, 4, 8, 16, 24]\nbuckets_max = [4, 8, 16, 24, 25]\n\nArgs:\nmax_length: The maximum length of example in dataset.\nmin_boundary: Minimum length in boundary.\nboundary_scale: Amount to scale consecutive boundaries in the list.\n\nReturns:\nmin and max boundary lists", "source": "juraj-google-style"} {"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n model_handler = HuggingFacePipelineModelHandler(task=PipelineTask.QuestionAnswering, model=known_args.model_name, load_model_args={'framework': 'pt', 'revision': known_args.revision})\n if not known_args.input:\n text = pipeline | 'CreateSentences' >> beam.Create(['What does Apache Beam do?;Apache Beam enables batch and streaming data processing.', 'What is the capital of France?;The capital of France is Paris .', 'Where was beam summit?;Apache Beam Summit 2023 was in NYC.'])\n else:\n text = pipeline | 'ReadSentences' >> beam.io.ReadFromText(known_args.input)\n processed_text = text | 'PreProcess' >> beam.ParDo(preprocess) | 'SquadExample' >> beam.ParDo(create_squad_example)\n output = processed_text | 'RunInference' >> RunInference(KeyedModelHandler(model_handler)) | 'ProcessOutput' >> beam.ParDo(PostProcessor())\n _ = output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"} {"code": "def cloud_predict(model_name, model_version, data):\n import google.datalab.ml as ml\n if isinstance(data, pd.DataFrame):\n string_buffer = io.StringIO()\n data.to_csv(string_buffer, header=None, index=False)\n input_data = string_buffer.getvalue().split('\\n')\n input_data = [line for line in input_data if line]\n else:\n input_data = data\n predictions = ml.ModelVersions(model_name).predict(model_version, input_data)\n df = pd.DataFrame(columns=sorted(predictions[0].keys()))\n for i in range(len(predictions)):\n for (k, v) in predictions[i].iteritems():\n df.loc[(i, k)] = v\n return df", "docstring": "Use Online prediction.\n\nRuns online prediction in the cloud and prints the results to the screen. For\nrunning prediction on a large dataset or saving the results, run\nlocal_batch_prediction or batch_prediction.\n\nArgs:\nmodel_name: deployed model name\nmodel_version: depoyed model version\ndata: List of csv strings or a Pandas DataFrame that match the model schema.\n\nBefore using this, the model must be created. This can be done by running\ntwo gcloud commands:\n1) gcloud beta ml models create NAME\n2) gcloud beta ml versions create VERSION --model NAME \\\n--origin gs://BUCKET/training_dir/model\nor these datalab commands:\n1) import google.datalab as datalab\nmodel = datalab.ml.ModelVersions(MODEL_NAME)\nmodel.deploy(version_name=VERSION,\npath='gs://BUCKET/training_dir/model')\nNote that the model must be on GCS.", "source": "codesearchnet"} {"code": "def __init__(self,\n skewness=None,\n tailweight=None,\n validate_args=False,\n name=\"SinhArcsinh\"):\n \n self._graph_parents = []\n self._name = name\n self._validate_args = validate_args\n with self._name_scope(\"init\"):\n tailweight = 1. if tailweight is None else tailweight\n skewness = 0. if skewness is None else skewness\n self._skewness = tf.convert_to_tensor(value=skewness, name=\"skewness\")\n self._tailweight = tf.convert_to_tensor(\n value=tailweight, name=\"tailweight\", dtype=self._skewness.dtype)\n dtype_util.assert_same_float_dtype([self._skewness, self._tailweight])\n if validate_args:\n self._tailweight = distribution_util.with_dependencies([\n assert_util.assert_positive(\n self._tailweight,\n message=\"Argument tailweight was not positive\")\n ], self._tailweight)\n super(SinhArcsinh, self).__init__(\n forward_min_event_ndims=0,\n validate_args=validate_args,\n name=name)", "docstring": "Instantiates the `SinhArcsinh` bijector.\n\nArgs:\nskewness: Skewness parameter. Float-type `Tensor`. Default is `0`\nof type `float32`.\ntailweight: Tailweight parameter. Positive `Tensor` of same `dtype` as\n`skewness` and broadcastable `shape`. Default is `1` of type `float32`.\nvalidate_args: Python `bool` indicating whether arguments should be\nchecked for correctness.\nname: Python `str` name given to ops managed by this object.", "source": "juraj-google-style"} {"code": "def upload_benchmark_run(self, dataset_name, table_name, run_id):\n \n expected_file = os.path.join(\n self._logging_dir, logger.BENCHMARK_RUN_LOG_FILE_NAME)\n with tf.gfile.GFile(expected_file) as f:\n benchmark_json = json.load(f)\n benchmark_json[\"model_id\"] = run_id\n table_ref = self._bq_client.dataset(dataset_name).table(table_name)\n errors = self._bq_client.insert_rows_json(table_ref, [benchmark_json])\n if errors:\n tf.logging.error(\n \"Failed to upload benchmark info to bigquery: {}\".format(errors))", "docstring": "Upload benchmark run information to Bigquery.\n\nArgs:\ndataset_name: string, the name of bigquery dataset where the data will be\nuploaded.\ntable_name: string, the name of bigquery table under the dataset where\nthe data will be uploaded.\nrun_id: string, a unique ID that will be attached to the data, usually\nthis is a UUID4 format.", "source": "juraj-google-style"} {"code": "def remove_device(self, device, id_override=None, type_override=None):\n object_id = (id_override or device.object_id())\n object_type = (type_override or device.object_type())\n url_string = '{}/{}s/{}'.format(self.BASE_URL, object_type, object_id)\n try:\n arequest = requests.delete(url_string, headers=API_HEADERS)\n if (arequest.status_code == 204):\n return True\n _LOGGER.error('Failed to remove device. Status code: %s', arequest.status_code)\n return False\n except requests.exceptions.RequestException:\n _LOGGER.error('Failed to remove device.')\n return False", "docstring": "Remove a device.\n\nArgs:\ndevice (WinkDevice): The device the change is being requested for.\nid_override (String, optional): A device ID used to override the\npassed in device's ID. Used to make changes on sub-devices.\ni.e. Outlet in a Powerstrip. The Parent device's ID.\ntype_override (String, optional): Used to override the device type\nwhen a device inherits from a device other than WinkDevice.\nReturns:\n(boolean): True if the device was removed.", "source": "codesearchnet"} {"code": "def _CheckGitkitError(self, raw_response):\n try:\n response = simplejson.loads(raw_response)\n if ('error' not in response):\n return response\n else:\n error = response['error']\n if ('code' in error):\n code = error['code']\n if str(code).startswith('4'):\n raise errors.GitkitClientError(error['message'])\n else:\n raise errors.GitkitServerError(error['message'])\n except simplejson.JSONDecodeError:\n pass\n raise errors.GitkitServerError('null error code from Gitkit server')", "docstring": "Raises error if API invocation failed.\n\nArgs:\nraw_response: string, the http response.\n\nRaises:\nGitkitClientError: if the error code is 4xx.\nGitkitServerError: if the response if malformed.\n\nReturns:\nSuccessful response as dict.", "source": "codesearchnet"} {"code": "def prepare_http_request(self, method_type, params, **kwargs):\n prepared_request = self.session.prepare_request(requests.Request(method=method_type, **params))\n return prepared_request", "docstring": "Prepares the HTTP REQUEST and returns it.\n\nArgs:\nmethod_type: The HTTP method type\nparams: Additional parameters for the HTTP request.\nkwargs: Any extra keyword arguements passed into a client method.\n\nreturns:\nprepared_request: An HTTP request object.", "source": "codesearchnet"} {"code": "def render_html_report(summary, report_template=None, report_dir=None):\n \n if not report_template:\n report_template = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"templates\",\n \"report_template.html\"\n )\n logger.log_debug(\"No html report template specified, use default.\")\n else:\n logger.log_info(\"render with html report template: {}\".format(report_template))\n\n logger.log_info(\"Start to render Html report ...\")\n\n report_dir = report_dir or os.path.join(os.getcwd(), \"reports\")\n if not os.path.isdir(report_dir):\n os.makedirs(report_dir)\n\n start_at_timestamp = int(summary[\"time\"][\"start_at\"])\n summary[\"time\"][\"start_datetime\"] = datetime.fromtimestamp(start_at_timestamp).strftime('%Y-%m-%d %H:%M:%S')\n\n report_path = os.path.join(report_dir, \"{}.html\".format(start_at_timestamp))\n\n with io.open(report_template, \"r\", encoding='utf-8') as fp_r:\n template_content = fp_r.read()\n with io.open(report_path, 'w', encoding='utf-8') as fp_w:\n rendered_content = Template(\n template_content,\n extensions=[\"jinja2.ext.loopcontrols\"]\n ).render(summary)\n fp_w.write(rendered_content)\n\n logger.log_info(\"Generated Html report: {}\".format(report_path))\n\n return report_path", "docstring": "render html report with specified report name and template\n\nArgs:\nreport_template (str): specify html report template path\nreport_dir (str): specify html report save directory", "source": "juraj-google-style"} {"code": "def tf_loss(self, states, internals, reward, update, reference=None):\n \n prediction = self.predict(states=states, internals=internals, update=update)\n return tf.nn.l2_loss(t=(prediction - reward))", "docstring": "Creates the TensorFlow operations for calculating the L2 loss between predicted\nstate values and actual rewards.\n\nArgs:\nstates: Dict of state tensors.\ninternals: List of prior internal state tensors.\nreward: Reward tensor.\nupdate: Boolean tensor indicating whether this call happens during an update.\nreference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\nLoss tensor", "source": "juraj-google-style"} {"code": "def take_quality_screenshot(self, screenshot_name):\n \n self.info_log(\"Taking a quality screenshot...\")\n\n if self.test_instance._runner_dir:\n _screenshot_name = '%s.png' % string_to_filename(screenshot_name)\n relative_path = os.path.join(\n self.test_instance._quality_screenshot_relative_dir,\n _screenshot_name\n )\n\n full_path = os.path.join(\n self.test_instance._quality_screenshot_dir,\n _screenshot_name\n )\n self._driver.save_screenshot(\n full_path\n )\n\n with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: \n capabilities = {\n 'browserName': self.capabilities['browserName'],\n 'platform': self.capabilities['platform'],\n 'version': self.capabilities['version']\n }\n quality_screenshot = Testqualityscreenshot()\n quality_screenshot.timestamp = utcnow()\n quality_screenshot.browser_capabilities = capabilities\n quality_screenshot.browser_id = self.get_id()\n quality_screenshot.file_path = relative_path\n \n quality_screenshot.location = 'local_file_system'\n quality_screenshot.root_path = self.test_instance._runner.root_test_result_dir \n quality_screenshot.extra_data = {}\n quality_screenshot.title = screenshot_name\n quality_screenshot.test_instance_id = self.test_instance._test_instance_id \n quality_screenshot.test_batch_id = self.test_instance._test_batch_id \n\n session.save(quality_screenshot, safe=True)\n\n self.debug_log(\"Quality screenshot taken (%s)\" % full_path)", "docstring": "Take a quality screenshot\n\nUse the screenshot_name args when you want to take a\nscreenshot for reference\n\nArgs:\nscreenshot_name (str) the name of the screenshot", "source": "juraj-google-style"} {"code": "def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):\n \n print '%s call joinCommissioned' % self.port\n cmd = WPANCTL_CMD + 'joiner --start %s %s' %(strPSKd, self.provisioningUrl)\n print cmd\n if self.__sendCommand(cmd)[0] != \"Fail\":\n if self.__getJoinerState():\n self.__sendCommand(WPANCTL_CMD + 'joiner --attach')\n time.sleep(30)\n return True\n else:\n return False\n else:\n return False", "docstring": "start joiner\n\nArgs:\nstrPSKd: Joiner's PSKd\n\nReturns:\nTrue: successful to start joiner\nFalse: fail to start joiner", "source": "juraj-google-style"} {"code": "def _validate_netconfig(self, conf):\n \n\n nets = conf.get('nets', {})\n if len(nets) == 0:\n \n raise LagoInitException('No networks configured.')\n\n no_mgmt_dns = [\n name for name, net in nets.iteritems()\n if net.get('management', None) is None and\n (net.get('main_dns') or net.get('dns_domain_name'))\n ]\n if len(no_mgmt_dns) > 0 and len(nets.keys()) > 1:\n raise LagoInitException(\n (\n 'Networks: {0}, misconfigured, they '\n 'are not marked as management, but have '\n 'DNS attributes. DNS is supported '\n 'only in management networks.'\n ).format(','.join(no_mgmt_dns))\n )\n\n for dom_name, dom_spec in conf['domains'].items():\n mgmts = []\n for nic in dom_spec['nics']:\n net = self._get_net(conf, dom_name, nic)\n if net.get('management', False) is True:\n mgmts.append(nic['net'])\n if len(mgmts) == 0:\n raise LagoInitException(\n (\n 'VM {0} has no management network, '\n 'please connect it to '\n 'one.'\n ).format(dom_name)\n )\n\n if len(mgmts) > 1:\n raise LagoInitException(\n (\n 'VM {0} has more than one management '\n 'network: {1}. It should have exactly '\n 'one.'\n ).format(dom_name, ','.join(mgmts))\n )", "docstring": "Validate network configuration\n\nArgs:\nconf(dict): spec\n\nReturns:\nNone\n\n\nRaises:\n:exc:`~lago.utils.LagoInitException`: If a VM has more than\none management network configured, or a network which is not\nmanagement has DNS attributes, or a VM is configured with a\nnone-existence NIC, or a VM has no management network.", "source": "juraj-google-style"} {"code": "def get_calibration_min_max_value(self, calibration_statistics_serialized: bytes, calibration_options_serialized: bytes) -> Optional[tuple[float, float]]:\n statistics = calibration_statistics_pb2.CalibrationStatistics.FromString(calibration_statistics_serialized)\n options = stablehlo_quant_config_pb2.CalibrationOptions.FromString(calibration_options_serialized)\n return _call_and_return_none_on_error(functools.partial(calibration_algorithm.get_min_max_value, statistics, options), error_msg=f'Retrieving calibrated min / max failed. Options: {options}.')", "docstring": "Calculates min and max values from statistics.\n\nArgs:\ncalibration_statistics_serialized: Serialized `CalibrationStatistics`.\nThis will be the source to calculate min and max values from.\ncalibration_options_serialized: Serialized `CalibrationOptions`. Specifies\nhow the min / max should be calculated.\n\nReturns:\n(min_value, max_value): Min and max calculated using calib_opts. `None`\nupon error.", "source": "github-repos"} {"code": "def final_bearing(self, format='numeric'):\n \n bearings = []\n for segment in self:\n if len(segment) < 2:\n bearings.append([])\n else:\n bearings.append(segment.final_bearing(format))\n return bearings", "docstring": "Calculate final bearing between locations in segments.\n\nArgs:\nformat (str): Format of the bearing string to return\n\nReturns:\nlist of list of float: Groups of bearings between points in\nsegments", "source": "juraj-google-style"} {"code": "def ReleaseObject(self, identifier):\n if (identifier not in self._values):\n raise KeyError('Missing cached object for identifier: {0:s}'.format(identifier))\n cache_value = self._values[identifier]\n if (not cache_value):\n raise RuntimeError('Missing cache value for identifier: {0:s}'.format(identifier))\n cache_value.DecrementReferenceCount()", "docstring": "Releases a cached object based on the identifier.\n\nThis method decrements the cache value reference count.\n\nArgs:\nidentifier (str): VFS object identifier.\n\nRaises:\nKeyError: if the VFS object is not found in the cache.\nRuntimeError: if the cache value is missing.", "source": "codesearchnet"} {"code": "def new(cls, access_token, environment='prod'):\n \n request = RequestBuilder \\\n .request(environment) \\\n .to_service(cls.SERVICE_NAME, cls.SERVICE_VERSION) \\\n .throw(\n StorageForbiddenException,\n lambda resp: 'You are forbidden to do this.'\n if resp.status_code == 403 else None\n ) \\\n .throw(\n StorageNotFoundException,\n lambda resp: 'The entity is not found'\n if resp.status_code == 404 else None\n ) \\\n .throw(\n StorageException,\n lambda resp: 'Server response: {0} - {1}'.format(resp.status_code, resp.text)\n if not resp.ok else None\n )\n\n authenticated_request = request.with_token(access_token)\n\n return cls(request, authenticated_request)", "docstring": "Create a new storage service REST client.\n\nArguments:\nenvironment: The service environment to be used for the client\naccess_token: The access token used to authenticate with the\nservice\n\nReturns:\nA storage_service.api.ApiClient instance\n\nExample:\n>>> storage_client = ApiClient.new(my_access_token)", "source": "juraj-google-style"} {"code": "def serialize_to_transport(self, doc_format='xml', *args, **kwargs):\n return super(ResourceMap, self).serialize(*args, format=doc_format, encoding='utf-8', **kwargs)", "docstring": "Serialize ResourceMap to UTF-8 encoded XML document.\n\nArgs:\ndoc_format: str\nOne of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,\n``trig`` and ``nquads``.\n\nargs and kwargs:\nOptional arguments forwarded to rdflib.ConjunctiveGraph.serialize().\n\nReturns:\nbytes: UTF-8 encoded XML doc.\n\nNote:\nOnly the default, \"xml\", is automatically indexed by DataONE.", "source": "codesearchnet"} {"code": "def merge_accumulators(self, accumulators, *args, **kwargs):\n raise NotImplementedError(str(self))", "docstring": "Returns the result of merging several accumulators\nto a single accumulator value.\n\nArgs:\naccumulators: the accumulators to merge.\nOnly the first accumulator may be modified and returned for efficiency;\nthe other accumulators should not be mutated, because they may be\nshared with other code and mutating them could lead to incorrect\nresults or data corruption.\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"} {"code": "def __init__(self, jsonstr=None, key=None, warnings=None):\n \n super(Resource, self).__init__()\n self.warnings = warnings\n if jsonstr is not None:\n data = json.loads(json.dumps(jsonstr))\n if key is not None:\n object.__setattr__(self, 'json_data', data[key])\n else:\n object.__setattr__(self, 'json_data', data)", "docstring": "Initialization of the object\n\nArgs:\njsonstr (str): a raw JSON string that is returned by a request.\nWe store all the data in `self.json_data` and use `__getattr__`\nand `__setattr__` to make the data accessible like attributes\nof the object\nkey (str): Optional key to use with jsonstr. If `key` exists, we'll\nload the data of `jsonstr[key]` instead of the whole `jsonstr`", "source": "juraj-google-style"} {"code": "def get_direct_band_gap(self):\n if self.is_metal():\n return 0.0\n dg = self.get_direct_band_gap_dict()\n return min((v['value'] for v in dg.values()))", "docstring": "Returns the direct band gap.\n\nReturns:\nthe value of the direct band gap", "source": "codesearchnet"} {"code": "def resource(self, resource_type):\n \n try:\n resource = getattr(self.resources, self.safe_rt(resource_type))(self)\n except AttributeError:\n self._resources(True)\n resource = getattr(self.resources, self.safe_rt(resource_type))(self)\n return resource", "docstring": "Get instance of Resource Class with dynamic type.\n\nArgs:\nresource_type: The resource type name (e.g Adversary, User Agent, etc).\n\nReturns:\n(object): Instance of Resource Object child class.", "source": "juraj-google-style"} {"code": "def GetUserInfo(knowledge_base, user):\n \n \n \n \n \n \n if \"\\\\\" in user:\n domain, user = user.split(\"\\\\\", 1)\n users = [\n u for u in knowledge_base.users\n if u.username == user and u.userdomain == domain\n ]\n else:\n users = [u for u in knowledge_base.users if u.username == user]\n\n if not users:\n return\n else:\n return users[0]", "docstring": "Get a User protobuf for a specific user.\n\nArgs:\nknowledge_base: An rdf_client.KnowledgeBase object.\nuser: Username as string. May contain domain like DOMAIN\\\\user.\n\nReturns:\nA User rdfvalue or None", "source": "juraj-google-style"} {"code": "def group_associations_types(\n self,\n main_type,\n sub_type,\n unique_id,\n target,\n api_branch=None,\n api_entity=None,\n owner=None,\n params=None,\n ):\n \n params = params or {}\n if owner:\n params['owner'] = owner\n\n api_branch = api_branch or target.api_sub_type\n api_entity = api_entity or target.api_entity\n\n if not sub_type:\n url = '/v2/{}/{}/groups/{}'.format(main_type, unique_id, api_branch)\n else:\n url = '/v2/{}/{}/{}/groups/{}'.format(main_type, sub_type, unique_id, api_branch)\n\n for gat in self._iterate(url, params, api_entity):\n yield gat", "docstring": "Args:\nowner:\nmain_type:\nsub_type:\nunique_id:\ntarget:\napi_branch:\napi_entity:\nparams:\n\nReturn:", "source": "juraj-google-style"} {"code": "def ragged_binary_elementwise_assert_op_impl(op, x, y):\n x_is_ragged = ragged_tensor.is_ragged(x)\n y_is_ragged = ragged_tensor.is_ragged(y)\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, preferred_dtype=y.dtype if y_is_ragged else None)\n y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, preferred_dtype=x.dtype)\n if x_is_ragged and y_is_ragged:\n x, y = ragged_tensor.match_row_splits_dtypes(x, y)\n if x_is_ragged and y_is_ragged or (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims):\n shape_x = DynamicRaggedShape.from_tensor(x)\n shape_y = DynamicRaggedShape.from_tensor(y)\n if shape_x.dtype != shape_y.dtype:\n if not x_is_ragged:\n shape_x = shape_x.with_dtype(shape_y.dtype)\n elif not y_is_ragged:\n shape_y = shape_y.with_dtype(shape_x.dtype)\n if _row_partitions_identical(shape_x, shape_y):\n return op(x.flat_values, y.flat_values)\n _, bcast_xz, bcast_yz = broadcast_dynamic_shape_extended(shape_x, shape_y)\n x_new_flat = bcast_xz.broadcast_flat_values(x, inner_dimensions=False)\n y_new_flat = bcast_yz.broadcast_flat_values(y, inner_dimensions=False)\n return op(x_new_flat, y_new_flat)\n x_values = x.flat_values if ragged_tensor.is_ragged(x) else x\n y_values = y.flat_values if ragged_tensor.is_ragged(y) else y\n return op(x_values, y_values)", "docstring": "Binary elementwise assert api handler for RaggedTensors.\n\nThis handles binary assert operations for ragged tensors. Compared with\n`ragged_binary_elementwise_op_impl`, this handler does not compute a ragged\ntensor as output. Instead, it applies the assert operation `op` to input\ntensors based on their ragged shapes and flat_values, and returns the result\nof the assertion operation.\n\nArgs:\nop: a binary assert operation on Tensors.\nx: something that can be coerced to a Tensor or RaggedTensor.\ny: something that can be coerced to a Tensor or RaggedTensor.\n\nReturns:\nthe result of the assertion operation.", "source": "github-repos"} {"code": "def init_cache(self, batch_size, max_length, encoder_outputs):\n decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape)\n\n def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n decoder_module = module._get_decoder_module()\n return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward)\n return unfreeze(init_variables['cache'])", "docstring": "Args:\nbatch_size (`int`):\nbatch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\nmax_length (`int`):\nmaximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\ncache.\nencoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):\n`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:\n`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)\nis a sequence of hidden-states at the output of the last layer of the encoder. Used in the\ncross-attention of the decoder.", "source": "github-repos"} {"code": "def _calc_min_size(self, conv_layers):\n input_size = 1\n for (_, conv_params, max_pooling) in reversed(conv_layers):\n if (max_pooling is not None):\n (kernel_size, stride) = max_pooling\n input_size = ((input_size * stride) + (kernel_size - stride))\n if (conv_params is not None):\n (kernel_size, stride) = conv_params\n input_size = ((input_size * stride) + (kernel_size - stride))\n return input_size", "docstring": "Calculates the minimum size of the input layer.\n\nGiven a set of convolutional layers, calculate the minimum value of\nthe `input_height` and `input_width`, i.e. such that the output has\nsize 1x1. Assumes snt.VALID padding.\n\nArgs:\nconv_layers: List of tuples `(output_channels, (kernel_size, stride),\n(pooling_size, pooling_stride))`\n\nReturns:\nMinimum value of input height and width.", "source": "codesearchnet"} {"code": "def save(self, save_path, options=None):\n save_start_time = time.time()\n if not self._initialized:\n self._ensure_initialized()\n else:\n self._queue.join()\n self._copy_to_cpu()\n self._check_async_thread_error()\n save_counter = self.checkpointer().save_counter.numpy() + 1\n full_path = '{}-{}'.format(save_path, save_counter)\n context.async_wait()\n self._save_file_prefix = save_path\n self._use_checkpoint_save = True\n self._checkpoint_options = copy.copy(options) if options else None\n if self._checkpoint_options:\n self._checkpoint_options.experimental_enable_async_checkpoint = False\n self._queue.put(True)\n save_end_time = time.time()\n metrics.AddCheckpointWriteDuration(api_label=_ASYNC_CHECKPOINT, microseconds=_get_duration_microseconds(save_start_time, save_end_time))\n return full_path", "docstring": "Save the checkpointed variables.\n\nArgs:\nsave_path: The file prefix of the checkpoint file.\noptions: Optional CheckpointOption instance.\n\nReturns:\nThe full path of the checkpoint file.", "source": "github-repos"} {"code": "def _AlignDecodedDataOffset(self, decoded_data_offset):\n self._file_object.seek(0, os.SEEK_SET)\n self._decoder = self._GetDecoder()\n self._decoded_data = b''\n encoded_data_offset = 0\n encoded_data_size = self._file_object.get_size()\n while (encoded_data_offset < encoded_data_size):\n read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)\n if (read_count == 0):\n break\n encoded_data_offset += read_count\n if (decoded_data_offset < self._decoded_data_size):\n self._decoded_data_offset = decoded_data_offset\n break\n decoded_data_offset -= self._decoded_data_size", "docstring": "Aligns the encoded file with the decoded data offset.\n\nArgs:\ndecoded_data_offset (int): decoded data offset.", "source": "codesearchnet"} {"code": "def generate_message_doc(message_descriptor, locations, path, name_prefix=''):\n prefixed_name = (name_prefix + message_descriptor.name)\n print(make_subsection(prefixed_name))\n location = locations[path]\n if location.HasField('leading_comments'):\n print(textwrap.dedent(location.leading_comments))\n row_tuples = []\n for (field_index, field) in enumerate(message_descriptor.field):\n field_location = locations[(path + (2, field_index))]\n if (field.type not in [11, 14]):\n type_str = TYPE_TO_STR[field.type]\n else:\n type_str = make_link(field.type_name.lstrip('.'))\n row_tuples.append((make_code(field.name), field.number, type_str, LABEL_TO_STR[field.label], textwrap.fill(get_comment_from_location(field_location), INFINITY)))\n print_table(('Field', 'Number', 'Type', 'Label', 'Description'), row_tuples)\n nested_types = enumerate(message_descriptor.nested_type)\n for (index, nested_message_desc) in nested_types:\n generate_message_doc(nested_message_desc, locations, (path + (3, index)), name_prefix=(prefixed_name + '.'))\n for (index, nested_enum_desc) in enumerate(message_descriptor.enum_type):\n generate_enum_doc(nested_enum_desc, locations, (path + (4, index)), name_prefix=(prefixed_name + '.'))", "docstring": "Generate docs for message and nested messages and enums.\n\nArgs:\nmessage_descriptor: descriptor_pb2.DescriptorProto instance for message\nto generate docs for.\nlocations: Dictionary of location paths tuples to\ndescriptor_pb2.SourceCodeInfo.Location instances.\npath: Path tuple to the message definition.\nname_prefix: Optional prefix for this message's name.", "source": "codesearchnet"} {"code": "def concrete(self, other=None):\n new_system = self.clone()\n if other:\n new_system.applyFeatures(other, missing='other')\n soft_features = self.getValue(SoftFeatures.SOFT, [])\n score = 0\n for f in sorted(soft_features, key=(lambda f: f.soft), reverse=True):\n try:\n new_system.applyFeatures(f, missing='other')\n score += f.soft\n except:\n pass\n new_system.delValue(SoftFeatures.SOFT)\n return (new_system, score)", "docstring": "Return copy and score after being applied other system and soft features.\n\nArgs:\n\n- other(system, optional): system to apply just before soft features.\n\nReturn(tuple): tuple of the resulting system and its score.", "source": "codesearchnet"} {"code": "def readlink(path):\n \n if sys.getwindowsversion().major < 6:\n raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')\n\n try:\n return salt.utils.path.readlink(path)\n except OSError as exc:\n if exc.errno == errno.EINVAL:\n raise CommandExecutionError('{0} is not a symbolic link'.format(path))\n raise CommandExecutionError(exc.__str__())\n except Exception as exc:\n raise CommandExecutionError(exc)", "docstring": "Return the path that a symlink points to\n\nThis is only supported on Windows Vista or later.\n\nInline with Unix behavior, this function will raise an error if the path is\nnot a symlink, however, the error raised will be a SaltInvocationError, not\nan OSError.\n\nArgs:\npath (str): The path to the symlink\n\nReturns:\nstr: The path that the symlink points to\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.readlink /path/to/link", "source": "juraj-google-style"} {"code": "def write_temp_file(self, content, filename=None, mode='w'):\n \n if filename is None:\n filename = str(uuid.uuid4())\n fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)\n with open(fqpn, mode) as fh:\n fh.write(content)\n return fqpn", "docstring": "Write content to a temporary file.\n\nArgs:\ncontent (bytes|str): The file content. If passing binary data the mode needs to be set\nto 'wb'.\nfilename (str, optional): The filename to use when writing the file.\nmode (str, optional): The file write mode which could be either 'w' or 'wb'.\n\nReturns:\nstr: Fully qualified path name for the file.", "source": "juraj-google-style"} {"code": "def _parse_authors(html_chunk):\n \n authors = html_chunk.match(\n [\"div\", {\"class\": \"comment\"}],\n \"h3\",\n \"a\",\n )\n\n if not authors:\n return []\n\n authors = map(\n lambda x: Author( \n x.getContent().strip(),\n normalize_url(BASE_URL, x.params.get(\"href\", None))\n ),\n authors\n )\n\n return filter(lambda x: x.name.strip(), authors)", "docstring": "Parse authors of the book.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nlist: List of :class:`structures.Author` objects. Blank if no author \\\nfound.", "source": "juraj-google-style"} {"code": "def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):\n \n\n return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)", "docstring": "Construct a resolver from a JSON schema object.\n\nArguments:\n\nschema:\n\nthe referring schema\n\nReturns:\n\n`RefResolver`", "source": "juraj-google-style"} {"code": "def yield_batch(iterable, batch_size, num_tensors=1):\n tensors = [[] for i in range(num_tensors)]\n for item in iterable:\n if (item is None):\n break\n for i in range(num_tensors):\n tmp = (str(item[i]) if (type(item[i]) is bytearray) else item[i])\n tensors[i].append(tmp)\n if (len(tensors[0]) >= batch_size):\n (yield tensors)\n tensors = [[] for i in range(num_tensors)]\n if (len(tensors[0]) > 0):\n (yield tensors)", "docstring": "Generator that yields batches of a DataFrame iterator.\n\nArgs:\n:iterable: Spark partition iterator.\n:batch_size: number of items to retrieve per invocation.\n:num_tensors: number of tensors (columns) expected in each item.\n\nReturns:\nAn array of ``num_tensors`` arrays, each of length `batch_size`", "source": "codesearchnet"} {"code": "def get_cohp(self, spin=None, integrated=False):\n \n if not integrated:\n populations = self.cohp\n else:\n populations = self.icohp\n\n if populations is None:\n return None\n elif spin is None:\n return populations\n else:\n if isinstance(spin, int):\n spin = Spin(spin)\n elif isinstance(spin, str):\n s = {\"up\": 1, \"down\": -1}[spin.lower()]\n spin = Spin(s)\n return {spin: populations[spin]}", "docstring": "Returns the COHP or ICOHP for a particular spin.\n\nArgs:\nspin: Spin. Can be parsed as spin object, integer (-1/1)\nor str (\"up\"/\"down\")\nintegrated: Return COHP (False) or ICOHP (True)\n\nReturns:\nReturns the CHOP or ICOHP for the input spin. If Spin is\nNone and both spins are present, both spins will be returned\nas a dictionary.", "source": "juraj-google-style"} {"code": "def get_leaf_node_path_list(self, sep=os.path.sep, type_str=None):\n return [v.get_path_str(sep, type_str) for v in self.leaf_node_gen]", "docstring": "Get paths for all leaf nodes for the tree rooted at this node.\n\nArgs:\nsep: str\nOne or more characters to insert between each element in the path.\nDefaults to \"/\" on Unix and \"\\\" on Windows.\n\ntype_str:\nSUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include\ninformation from nodes of that type.\n\nReturns:\nlist of str: The paths to the leaf nodes for the tree rooted at this node.", "source": "codesearchnet"} {"code": "def _clean(self, item):\n null_keys = []\n for key in item:\n if item[key] == None:\n null_keys.append(key)\n for key in null_keys:\n del item[key]\n if 'adBlockingOptOut' in item and (not item['adBlockingOptOut']):\n item['adBlockingOptOut'] = False\n return item", "docstring": "Removes null keys from the item.\n\nAn absent key and a null key mean different things in certain contexts for\nCM, this method cleans up objects before sending to the CM API by removing\nany null keys.\n\nArgs:\nitem: The CM object to clean.", "source": "github-repos"} {"code": "def collect_constant_renames():\n renames = set()\n for module in sys.modules.copy().values():\n try:\n constants_v1_list = tf_export.get_v1_constants(module)\n constants_v2_list = tf_export.get_v2_constants(module)\n except:\n pass\n constants_v1 = {constant_name: api_names for api_names, constant_name in constants_v1_list}\n constants_v2 = {constant_name: api_names for api_names, constant_name in constants_v2_list}\n for constant_name, api_names_v1 in constants_v1.items():\n api_names_v2 = constants_v2[constant_name]\n for name in api_names_v1:\n if name not in api_names_v2:\n renames.add((name, get_canonical_name(api_names_v2, name)))\n return renames", "docstring": "Looks for constants that need to be renamed in TF 2.0.\n\nReturns:\nSet of tuples of the form (current name, new name).", "source": "github-repos"} {"code": "def _kl_blockwise_blockwise(b0, b1, name=None):\n \n if len(b0.distributions) != len(b1.distributions):\n raise ValueError(\n 'Can only compute KL divergence between Blockwise distributions with '\n 'the same number of component distributions.')\n\n \n b0_event_sizes = [_event_size(d) for d in b0.distributions]\n b1_event_sizes = [_event_size(d) for d in b1.distributions]\n\n assertions = []\n message = ('Can only compute KL divergence between Blockwise distributions '\n 'with the same pairwise event shapes.')\n\n if (all(isinstance(event_size, int) for event_size in b0_event_sizes) and\n all(isinstance(event_size, int) for event_size in b1_event_sizes)):\n if b0_event_sizes != b1_event_sizes:\n raise ValueError(message)\n else:\n if b0.validate_args or b1.validate_args:\n assertions.extend(\n assert_util.assert_equal( \n e1, e2, message=message)\n for e1, e2 in zip(b0_event_sizes, b1_event_sizes))\n\n with tf.name_scope(name or 'kl_blockwise_blockwise'):\n with tf.control_dependencies(assertions):\n return sum([\n kullback_leibler.kl_divergence(d1, d2) for d1, d2 in zip(\n b0.distributions, b1.distributions)])", "docstring": "Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.\n\nArgs:\nb0: instance of a Blockwise distribution object.\nb1: instance of a Blockwise distribution object.\nname: (optional) Name to use for created operations. Default is\n\"kl_blockwise_blockwise\".\n\nReturns:\nkl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).", "source": "juraj-google-style"} {"code": "def copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph=None):\n if func_name and (not is_function(func_name, default_graph)):\n raise ValueError(f'Function {func_name} was not found. Please make sure the FunctionDef `fdef` is correct.')\n if func_name in copied_functions:\n return\n copied_functions.add(func_name)\n func_def = get_function_def(func_name, default_graph)\n graph_def.library.function.add().CopyFrom(func_def)\n for node_def in func_def.node_def:\n op_def = default_graph.op_def_for_type(node_def.op)\n for attr in op_def.attr:\n if attr.type == 'func':\n func_name = node_def.attr[attr.name].func.name\n copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph)\n elif attr.type == 'list(func)':\n for fn in node_def.attr[attr.name].list.func:\n func_name = fn.name\n copy_function_def_to_graph_def_recursively(func_name, graph_def, copied_functions, default_graph)", "docstring": "Recursively copies `FunctionDef`s to `GraphDef`.\n\nIt copies the outermost `FunctionDef` and all nested `FunctionDef`s to\n`graph_def`. The `copied_function` enforces that every `FunctionDef` will be\ncopied at most once. The `FunctionDef`s will be found from `default_graph` if\nthis function was called in graph mode or from eager context if this function\nwas called in eager mode.\n\nArgs:\nfunc_name: The signature name of FunctionDef to be copied to `graph_def`.\ngraph_def: The GraphDef that will contain all `FunctionDef`s in its library.\ncopied_functions: A set contains all copied function names.\ndefault_graph: The `tf.Graph` where all `FunctionDef`s will be found\nin graph mode. Not used in eager mode.", "source": "github-repos"} {"code": "def find_distinct(self, collection, key):\n obj = getattr(self.db, collection)\n result = obj.distinct(key)\n return result", "docstring": "Search a collection for the distinct key values provided.\n\nArgs:\ncollection: The db collection. See main class documentation.\nkey: The name of the key to find distinct values. For example with\nthe indicators collection, the key could be \"type\".\nReturns:\nList of distinct values.", "source": "codesearchnet"} {"code": "def __init__(self, function_type, default_values, is_pure=False, name=None, jit_compile=None):\n self._function_type = function_type\n self._default_values = default_values\n self._fullargspec = to_fullargspec(function_type, default_values)\n self._is_pure = is_pure\n self._jit_compile = jit_compile\n self._name = name or 'f'\n self._input_signature = to_input_signature(function_type)", "docstring": "Constructs a FunctionSpec describing a python function.\n\nArgs:\nfunction_type: A FunctionType describing the python function signature.\ndefault_values: Dictionary mapping parameter names to default values.\nis_pure: if True all input arguments (including variables and constants)\nwill be converted to tensors and no variable changes allowed.\nname: Name of the function\njit_compile: see `tf.function`.", "source": "github-repos"} {"code": "def save(thing, url_or_handle, **kwargs):\n \n is_handle = hasattr(url_or_handle, \"write\") and hasattr(url_or_handle, \"name\")\n if is_handle:\n _, ext = os.path.splitext(url_or_handle.name)\n else:\n _, ext = os.path.splitext(url_or_handle)\n if not ext:\n raise RuntimeError(\"No extension in URL: \" + url_or_handle)\n\n if ext in savers:\n saver = savers[ext]\n if is_handle:\n saver(thing, url_or_handle, **kwargs)\n else:\n with write_handle(url_or_handle) as handle:\n saver(thing, handle, **kwargs)\n else:\n saver_names = [(key, fn.__name__) for (key, fn) in savers.items()]\n message = \"Unknown extension '{}', supports {}.\"\n raise ValueError(message.format(ext, saver_names))", "docstring": "Save object to file on CNS.\n\nFile format is inferred from path. Use save_img(), save_npy(), or save_json()\nif you need to force a particular format.\n\nArgs:\nobj: object to save.\npath: CNS path.\n\nRaises:\nRuntimeError: If file extension not supported.", "source": "juraj-google-style"} {"code": "def _find_control_structure(self, start_address, search_length):\n words = self._read_memory(start_address, search_length, chunk_size=4, join=False)\n found_offset = None\n for (i, word) in enumerate(words):\n if (word == ControlStructure.CONTROL_MAGIC_1):\n if ((len(words) - i) < 4):\n continue\n if ((words[(i + 1)] == ControlStructure.CONTROL_MAGIC_2) and (words[(i + 2)] == ControlStructure.CONTROL_MAGIC_3) and (words[(i + 3)] == ControlStructure.CONTROL_MAGIC_4)):\n found_offset = i\n break\n if (found_offset is None):\n raise HardwareError('Could not find control structure magic value in search area')\n struct_info = words[(found_offset + 4)]\n (_version, _flags, length) = struct.unpack(' tf.Tensor:\n if input_ids is None and inputs_embeds is None:\n raise ValueError('Need to provide either `input_ids` or `input_embeds`.')\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"} {"code": "def _check_property(self, rest=None, require_indexed=True):\n if (require_indexed and (not self._indexed)):\n raise InvalidPropertyError(('Property is unindexed %s' % self._name))\n if rest:\n raise InvalidPropertyError(('Referencing subproperty %s.%s but %s is not a structured property' % (self._name, rest, self._name)))", "docstring": "Internal helper to check this property for specific requirements.\n\nCalled by Model._check_properties().\n\nArgs:\nrest: Optional subproperty to check, of the form 'name1.name2...nameN'.\n\nRaises:\nInvalidPropertyError if this property does not meet the given\nrequirements or if a subproperty is specified. (StructuredProperty\noverrides this method to handle subproperties.)", "source": "codesearchnet"} {"code": "def _check_obj_properties(self, pub, name=\"pub\"):\n \n if not hasattr(pub, \"indexes\"):\n raise InvalidType(\"`%s` doesn't have .indexes property!\" % name)\n\n if not pub.indexes:\n raise InvalidType(\"`%s.indexes` is not set!\" % name)\n\n if not hasattr(pub, \"project_key\"):\n raise InvalidType(\n \"`%s` doesn't have .project_key property!\" % name\n )\n\n if not pub.project_key:\n raise InvalidType(\"`%s.project_key` is not set!\" % name)", "docstring": "Make sure, that `pub` has the right interface.\n\nArgs:\npub (obj): Instance which will be checked.\nname (str): Name of the instance. Used in exception. Default `pub`.\n\nRaises:\nInvalidType: When the `pub` is not instance of `obj_type`.", "source": "juraj-google-style"} {"code": "def get_user(path, follow_symlinks=True):\n if (not os.path.exists(path)):\n raise CommandExecutionError('Path not found: {0}'.format(path))\n if (follow_symlinks and (sys.getwindowsversion().major >= 6)):\n path = _resolve_symlink(path)\n return salt.utils.win_dacl.get_owner(path)", "docstring": "Return the user that owns a given file\n\nSymlinks are followed by default to mimic Unix behavior. Specify\n`follow_symlinks=False` to turn off this behavior.\n\nArgs:\npath (str): The path to the file or directory\n\nfollow_symlinks (bool):\nIf the object specified by ``path`` is a symlink, get attributes of\nthe linked file instead of the symlink itself. Default is True\n\nReturns:\nstr: The name of the owner\n\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' file.get_user c:\\\\temp\\\\test.txt\nsalt '*' file.get_user c:\\\\temp\\\\test.txt follow_symlinks=False", "source": "codesearchnet"} {"code": "def set_user_info(self, nick, user='*', real='*'):\n \n if self.connected:\n raise Exception(\"Can't set user info now, we're already connected!\")\n\n \n if not self.connected:\n self.nick = nick\n\n self.connect_info['user'] = {\n 'nick': nick,\n 'user': user,\n 'real': real,\n }", "docstring": "Sets user info for this server, to be used before connection.\n\nArgs:\nnick (str): Nickname to use.\nuser (str): Username to use.\nreal (str): Realname to use.", "source": "juraj-google-style"} {"code": "def is_module(obj):\n \n return True if obj and isinstance(obj, ModuleType) or inspect.isclass(obj) else False", "docstring": "Checking and setting type to MODULE\nArgs:\nobj: ModuleType / class\nNote: An instance will be treated as a Class\nReturn:\nBoolean", "source": "juraj-google-style"} {"code": "def start(self, channel=None):\n \n\n super(ReferenceDevice, self).start(channel)\n\n try:\n self.controller.start(channel)\n\n \n for address, tile in sorted(self._tiles.items()):\n if address == 8:\n continue\n\n if not isinstance(tile, EmulatedPeripheralTile):\n raise DataError(\"An emulated ReferenceDevice can only have a single controller and all other tiles must inherit from EmulatedPeripheralTile\",\n address=address)\n\n tile.start(channel)\n\n async def _launch_tiles():\n await self.controller.reset()\n await asyncio.wait_for(self.controller.initialized.wait(), 2.0)\n\n \n \n for address, tile in sorted(self._tiles.items()):\n if address == 8:\n continue\n\n await asyncio.wait_for(tile.initialized.wait(), 2.0)\n\n self.emulator.run_task_external(_launch_tiles())\n\n if self._simulating_time:\n self.emulator.add_task(None, self._time_ticker())\n except:\n self.stop()\n raise", "docstring": "Start this emulated device.\n\nThis triggers the controller to call start on all peripheral tiles in\nthe device to make sure they start after the controller does and then\nit waits on each one to make sure they have finished initializing\nbefore returning.\n\nArgs:\nchannel (IOTilePushChannel): the channel with a stream and trace\nroutine for streaming and tracing data through a VirtualInterface", "source": "juraj-google-style"} {"code": "def get_most_recent_images(self, results, types=[], sensors=[], N=1):\n if (not len(results)):\n return None\n if types:\n results = [r for r in results if (r['type'] in types)]\n if sensors:\n results = [r for r in results if (r['properties'].get('sensorPlatformName') in sensors)]\n newlist = sorted(results, key=(lambda k: k['properties'].get('timestamp')), reverse=True)\n return newlist[:N]", "docstring": "Return the most recent image\n\nArgs:\nresults: a catalog resultset, as returned from a search\ntypes: array of types you want. optional.\nsensors: array of sensornames. optional.\nN: number of recent images to return. defaults to 1.\n\nReturns:\nsingle catalog item, or none if not found", "source": "codesearchnet"} {"code": "def create_script(target_test):\n script = f'\\nimport os\\nimport subprocess\\n\\nresult = subprocess.run(\\n [\"python3\", \"-m\", \"pytest\", \"-v\", \"-rfEp\", f\"{target_test}\"],\\n capture_output = True,\\n text=True,\\n)\\nprint(result.stdout)\\n\\nif f\"PASSED {target_test}\" in result.stdout:\\n print(\"test passed\")\\n exit(0)\\nelif len(result.stderr) > 0:\\n if \"ERROR: file or directory not found: \" in result.stderr:\\n print(\"test file or directory not found in this commit\")\\n exit(0)\\n elif \"ERROR: not found: \" in result.stderr:\\n print(\"test not found in this commit\")\\n exit(0)\\n else:\\n print(f\"pytest failed to run: {{result.stderr}}\")\\n exit(-1)\\nelif f\"FAILED {target_test}\" in result.stdout:\\n print(\"test failed\")\\n exit(2)\\n\\nexit(0)\\n'\n with open('target_script.py', 'w') as fp:\n fp.write(script.strip())", "docstring": "Create a python script to be run by `git bisect run` to determine if `target_test` passes or fails.\nIf a test is not found in a commit, the script with exit code `0` (i.e. `Success`).\n\nArgs:\ntarget_test (`str`): The test to check.\n\nReturns:\n`str`: The script to be run by `git bisect run`.", "source": "github-repos"} {"code": "def take_reference_screenshot(webdriver, file_name):\n folder_location = os.path.join(ProjectUtils.get_project_root(), WebScreenShotUtil.REFERENCE_SCREEN_SHOT_LOCATION)\n WebScreenShotUtil.__capture_screenshot(webdriver, folder_location, (file_name + '.png'))", "docstring": "Captures a screenshot as a reference screenshot.\n\nArgs:\nwebdriver (WebDriver) - Selenium webdriver.\nfile_name (str) - File name to save screenshot as.", "source": "codesearchnet"} {"code": "def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:\n return any(((q in qubits) for q in self.qubits))", "docstring": "Determines if the moment has operations touching the given qubits.\n\nArgs:\nqubits: The qubits that may or may not be touched by operations.\n\nReturns:\nWhether this moment has operations involving the qubits.", "source": "codesearchnet"} {"code": "def fill(self, text):\n\n def _fill(elem):\n elem.clear()\n elem.send_keys(text)\n self.map(_fill, u'fill({!r})'.format(text)).execute()", "docstring": "Set the text value of each matched element to `text`.\n\nExample usage:\n\n.. code:: python\n\n# Set the text of the first element matched by the query to \"Foo\"\nq.first.fill('Foo')\n\nArgs:\ntext (str): The text used to fill the element (usually a text field or text area).\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def replace_multiple_in_file(filename: str,\n replacements: List[Tuple[str, str]]) -> None:\n \n with open(filename) as infile:\n contents = infile.read()\n for text_from, text_to in replacements:\n log.info(\"Amending {}: {} -> {}\",\n filename, repr(text_from), repr(text_to))\n contents = contents.replace(text_from, text_to)\n with open(filename, 'w') as outfile:\n outfile.write(contents)", "docstring": "Replaces multiple from/to string pairs within a single file.\n\nArgs:\nfilename: filename to process (modifying it in place)\nreplacements: list of ``(from_text, to_text)`` tuples", "source": "juraj-google-style"} {"code": "def sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table):\n \n if not ir_blocks:\n raise AssertionError(u'Received no ir_blocks: {}'.format(ir_blocks))\n\n _sanity_check_fold_scope_locations_are_unique(ir_blocks)\n _sanity_check_no_nested_folds(ir_blocks)\n _sanity_check_query_root_block(ir_blocks)\n _sanity_check_output_source_follower_blocks(ir_blocks)\n _sanity_check_block_pairwise_constraints(ir_blocks)\n _sanity_check_mark_location_preceding_optional_traverse(ir_blocks)\n _sanity_check_every_location_is_marked(ir_blocks)\n _sanity_check_coerce_type_outside_of_fold(ir_blocks)\n _sanity_check_all_marked_locations_are_registered(ir_blocks, query_metadata_table)\n _sanity_check_registered_locations_parent_locations(query_metadata_table)", "docstring": "Assert that IR blocks originating from the frontend do not have nonsensical structure.\n\nArgs:\nir_blocks: list of BasicBlocks representing the IR to sanity-check\n\nRaises:\nAssertionError, if the IR has unexpected structure. If the IR produced by the front-end\ncannot be successfully and correctly used to generate MATCH or Gremlin due to a bug,\nthis is the method that should catch the problem.", "source": "juraj-google-style"} {"code": "def get_min_max_value(self) -> tuple[float, float]:\n average_min_max_statistics = self._statistics.average_min_max_statistics\n num_samples = average_min_max_statistics.num_samples\n if num_samples == 0:\n raise ValueError(f'num_samples must not be 0 when calibration method is AverageMinMax: {self._calib_opts}')\n min_value, max_value = (average_min_max_statistics.min_sum / num_samples, average_min_max_statistics.max_sum / num_samples)\n return (min_value, max_value)", "docstring": "Calculates the average of min and max values.\n\nReturns:\n(min_value, max_value): Min and max calculated using AverageMinMax\n\nRaises:\nValueError: num_samples is 0.", "source": "github-repos"} {"code": "def CheckGlobalStatic(filename, clean_lines, linenum, error):\n line = clean_lines.elided[linenum]\n if (((linenum + 1) < clean_lines.NumLines()) and (not Search('[;({]', line))):\n line += clean_lines.elided[(linenum + 1)].strip()\n match = Match('((?:|static +)(?:|const +))(?::*std::)?string( +const)? +([a-zA-Z0-9_:]+)\\\\b(.*)', line)\n if (match and (not Search('\\\\bstring\\\\b(\\\\s+const)?\\\\s*[\\\\*\\\\&]\\\\s*(const\\\\s+)?\\\\w', line)) and (not Search('\\\\boperator\\\\W', line)) and (not Match('\\\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\\\s*\\\\(([^\"]|$)', match.group(4)))):\n if Search('\\\\bconst\\\\b', line):\n error(filename, linenum, 'runtime/string', 4, ('For a static/global string constant, use a C style string instead: \"%schar%s %s[]\".' % (match.group(1), (match.group(2) or ''), match.group(3))))\n else:\n error(filename, linenum, 'runtime/string', 4, 'Static/global string variables are not permitted.')\n if (Search('\\\\b([A-Za-z0-9_]*_)\\\\(\\\\1\\\\)', line) or Search('\\\\b([A-Za-z0-9_]*_)\\\\(CHECK_NOTNULL\\\\(\\\\1\\\\)\\\\)', line)):\n error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')", "docstring": "Check for unsafe global or static objects.\n\nArgs:\nfilename: The name of the current file.\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nerror: The function to call with any errors found.", "source": "codesearchnet"} {"code": "def infer_types(src, options, loader, init_maximum_depth=INIT_MAXIMUM_DEPTH, maximum_depth=None):\n ctx = context.Context(options, loader, src=src)\n loc, defs = ctx.vm.run_program(src, options.input, init_maximum_depth)\n log.info('===Done running definitions and module-level code===')\n snapshotter = metrics.get_metric('memory', metrics.Snapshot)\n snapshotter.take_snapshot('analyze:infer_types:tracer')\n if maximum_depth is None:\n if not options.quick:\n maximum_depth = MAXIMUM_DEPTH\n elif options.analyze_annotated:\n maximum_depth = QUICK_CHECK_MAXIMUM_DEPTH\n else:\n maximum_depth = QUICK_INFER_MAXIMUM_DEPTH\n ctx.exitpoint = ctx.vm.analyze(loc, defs, maximum_depth)\n snapshotter.take_snapshot('analyze:infer_types:post')\n ast = ctx.vm.compute_types(defs)\n ast = ctx.loader.resolve_ast(ast)\n if ctx.vm.has_unknown_wildcard_imports or any((a in defs for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS)):\n if '__getattr__' not in ast:\n ast = pytd_utils.Concat(ast, ctx.loader.get_default_ast())\n if options.protocols:\n protocols_pytd = ctx.loader.import_name('protocols')\n else:\n protocols_pytd = None\n deps_pytd = ctx.loader.concat_all()\n ast = ast.Visit(visitors.CreateTypeParametersForSignatures())\n if options.protocols:\n log.info('=========== PyTD to solve =============\\n%s', pytd_utils.Print(ast))\n ast = convert_structural.convert_pytd(ast, deps_pytd, protocols_pytd)\n else:\n log.info('Solving is turned off. Discarding call traces.')\n ast = ast.Visit(visitors.RemoveUnknownClasses())\n ast = convert_structural.extract_local(ast)\n _maybe_output_debug(options, ctx.program)\n return Analysis(ctx, ast, deps_pytd)", "docstring": "Given Python source return its types.\n\nArgs:\nsrc: A string containing Python source code.\noptions: config.Options object\nloader: A load_pytd.Loader instance to load PYI information.\ninit_maximum_depth: Depth of analysis during module loading.\nmaximum_depth: Depth of the analysis. Default: unlimited.\n\nReturns:\nA tuple of (ast: TypeDeclUnit, builtins: TypeDeclUnit)\nRaises:\nAssertionError: In case of a bad parameter combination.", "source": "github-repos"} {"code": "def reload_class_methods(self, class_, verbose=True):\n if verbose:\n print(('[util_class] Reloading self=%r as class_=%r' % (self, class_)))\n self.__class__ = class_\n for key in dir(class_):\n func = getattr(class_, key)\n if isinstance(func, types.MethodType):\n inject_func_as_method(self, func, class_=class_, allow_override=True, verbose=verbose)", "docstring": "rebinds all class methods\n\nArgs:\nself (object): class instance to reload\nclass_ (type): type to reload as\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_class import * # NOQA\n>>> self = '?'\n>>> class_ = '?'\n>>> result = reload_class_methods(self, class_)\n>>> print(result)", "source": "codesearchnet"} {"code": "def AddAdGroup(self, client_customer_id, campaign_id, name, status):\n \n self.client.SetClientCustomerId(client_customer_id)\n\n ad_group_service = self.client.GetService('AdGroupService')\n operations = [{\n 'operator': 'ADD',\n 'operand': {\n 'campaignId': campaign_id,\n 'name': name,\n 'status': status\n }\n }]\n ad_group_service.mutate(operations)", "docstring": "Create a new ad group.\n\nArgs:\nclient_customer_id: str Client Customer Id used to create the AdGroup.\ncampaign_id: str Id of the campaign to use.\nname: str Name to assign to the AdGroup.\nstatus: str Status to assign to the AdGroup when it is created.", "source": "juraj-google-style"} {"code": "def mean_minimum_centroid_distance(item_a, item_b, max_value):\n \n centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])\n centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])\n distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - centroids_b.T[1:]) ** 2\n mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())\n return np.minimum(mean_min_distances, max_value) / float(max_value)", "docstring": "RMS difference in the minimum distances from the centroids of one track to the centroids of another track\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"} {"code": "def build_action(name=None, image_uri=None, commands=None, entrypoint=None, environment=None, pid_namespace=None, flags=None, port_mappings=None, mounts=None, labels=None):\n return {'name': name, 'imageUri': image_uri, 'commands': commands, 'entrypoint': entrypoint, 'environment': environment, 'pidNamespace': pid_namespace, 'flags': flags, 'portMappings': port_mappings, 'mounts': mounts, 'labels': labels}", "docstring": "Build an Action object for a Pipeline request.\n\nArgs:\nname (str): An optional name for the container.\nimage_uri (str): The URI to pull the container image from.\ncommands (List[str]): commands and arguments to run inside the container.\nentrypoint (str): overrides the ENTRYPOINT specified in the container.\nenvironment (dict[str,str]): The environment to pass into the container.\npid_namespace (str): The PID namespace to run the action inside.\nflags (str): Flags that control the execution of this action.\nport_mappings (dict[int, int]): A map of container to host port mappings for\nthis container.\nmounts (List): A list of mounts to make available to the action.\nlabels (dict[str]): Labels to associate with the action.\n\nReturns:\nAn object representing an Action resource.", "source": "codesearchnet"} {"code": "def create(path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n filesystem = FileSystems.get_filesystem(path)\n return filesystem.create(path, mime_type, compression_type)", "docstring": "Returns a write channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object. See\n``CompressionTypes`` for possible values.\n\nReturns: file handle with a ``close`` function for the user to use.", "source": "github-repos"} {"code": "def get_capture_file(self):\n raise NotImplementedError('Base class should not be called directly!')", "docstring": "The sniffer places a capture in the logger directory. This function\nenables the caller to obtain the path of that capture.\n\nReturns:\nThe full path of the current or last capture.", "source": "github-repos"} {"code": "def config_stdio(self, log_configurations: Optional[List[LogConfiguration]]=None, default_level=logging.INFO) -> None:\n if (not log_configurations):\n for logger in self.loggers.values():\n self._restrict_output(logger, default_level)\n else:\n for (component, level) in log_configurations:\n try:\n logger = self.loggers[(self.root + component)]\n except KeyError:\n raise ValueError('Failed to configure component. Invalid name: {}'.format(component))\n self._restrict_output(logger, level)", "docstring": "Configure the stdio `StreamHandler` levels on the specified loggers.\nIf no log configurations are specified then the `default_level` will be applied to all handlers.\n\nArgs:\nlog_configurations: a list of (component name, log level) tuples\ndefault_level: logging level to apply when no log_configurations are specified", "source": "codesearchnet"} {"code": "def closing(input_rasterfilename, times):\n \n input_raster = RasterUtilClass.read_raster(input_rasterfilename)\n closing_raster = input_raster\n for i in range(times):\n closing_raster = RasterUtilClass.raster_dilation(closing_raster)\n for i in range(times):\n closing_raster = RasterUtilClass.raster_erosion(closing_raster)\n return closing_raster", "docstring": "Do closing.\n\nClosing: Dilate firstly, then Erode.\n\nArgs:\ninput_rasterfilename: input original raster image filename.\ntimes: Erode and Dilate times.\n\nReturns:\nclosing_raster: raster image after close.", "source": "juraj-google-style"} {"code": "def get_backup(self, id_or_uri):\n \n uri = self.BACKUPS_PATH + '/' + extract_id_from_uri(id_or_uri)\n return self._client.get(id_or_uri=uri)", "docstring": "Get the details for the backup from an Artifact Bundle.\n\nArgs:\nid_or_uri: ID or URI of the Artifact Bundle.\n\nReturns:\nDict: Backup for an Artifacts Bundle.", "source": "juraj-google-style"} {"code": "def get_paths(self, id_or_uri, path_id_or_uri=''):\n \n if path_id_or_uri:\n uri = self._client.build_uri(path_id_or_uri)\n if \"/paths\" not in uri:\n uri = self._client.build_uri(\n id_or_uri) + \"/paths\" + \"/\" + path_id_or_uri\n\n else:\n uri = self._client.build_uri(id_or_uri) + \"/paths\"\n\n return self._client.get(uri)", "docstring": "Gets all paths or a specific attachment path for the specified volume attachment.\n\nArgs:\nid_or_uri: Can be either the volume attachment id or the volume attachment uri.\npath_id_or_uri: Can be either the path id or the path uri.\n\nReturns:\ndict: Paths.", "source": "juraj-google-style"} {"code": "def setup(self, keywords=None): \n \n self._keywords = keywords\n self._output_path = tempfile.mkdtemp()", "docstring": "Sets up the _keywords attribute.\n\nArgs:\nkeywords: pipe separated list of keyword to search", "source": "juraj-google-style"} {"code": "def easeInOutQuad(n):\n _checkRange(n)\n if (n < 0.5):\n return (2 * (n ** 2))\n else:\n n = ((n * 2) - 1)\n return ((- 0.5) * ((n * (n - 2)) - 1))", "docstring": "A quadratic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "codesearchnet"} {"code": "def operator_shards(self, qubits: List[cirq.GridQubit]):\n raise NotImplementedError()", "docstring": "Parameter independent Pauli Z strings to measure.\n\nArgs:\nqubits: List of cirq.GridQubits. objects to measure.\n\nReturns:\nList of PauliSum objects whose expectation values are fed to\n`operator_expectation` to compute average energy.", "source": "github-repos"} {"code": "def from_json(json_str, allow_pickle=False):\n if six.PY3:\n if isinstance(json_str, bytes):\n json_str = json_str.decode('utf-8')\n UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)\n object_hook = UtoolJSONEncoder._json_object_hook\n val = json.loads(json_str, object_hook=object_hook)\n return val", "docstring": "Decodes a JSON object specified in the utool convention\n\nArgs:\njson_str (str):\nallow_pickle (bool): (default = False)\n\nReturns:\nobject: val\n\nCommandLine:\npython -m utool.util_cache from_json --show\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_cache import * # NOQA\n>>> import utool as ut\n>>> json_str = 'just a normal string'\n>>> json_str = '[\"just a normal string\"]'\n>>> allow_pickle = False\n>>> val = from_json(json_str, allow_pickle)\n>>> result = ('val = %s' % (ut.repr2(val),))\n>>> print(result)", "source": "codesearchnet"} {"code": "def _ParseSubKey(\n self, parser_mediator, registry_key, parent_path_segments,\n codepage='cp1252'):\n \n try:\n mrulistex = self._ParseMRUListExValue(registry_key)\n except (ValueError, errors.ParseError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse MRUListEx value with error: {0!s}'.format(exception))\n return\n\n if not mrulistex:\n return\n\n entry_numbers = {}\n values_dict = {}\n value_strings = {}\n\n found_terminator = False\n for index, entry_number in enumerate(mrulistex):\n \n if entry_number == -1:\n continue\n\n if found_terminator:\n parser_mediator.ProduceExtractionWarning((\n 'found additional MRUListEx entries after terminator in key: '\n '{0:s}.').format(registry_key.path))\n\n \n found_terminator = False\n\n path_segment = self._ParseMRUListExEntryValue(\n parser_mediator, registry_key, index, entry_number, values_dict,\n value_strings, parent_path_segments, codepage=codepage)\n\n entry_numbers[entry_number] = path_segment\n\n event_data = windows_events.WindowsRegistryEventData()\n event_data.key_path = registry_key.path\n event_data.offset = registry_key.offset\n event_data.regvalue = values_dict\n event_data.source_append = self._SOURCE_APPEND\n event_data.urls = self.URLS\n\n event = time_events.DateTimeValuesEvent(\n registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n for entry_number, path_segment in iter(entry_numbers.items()):\n sub_key_name = '{0:d}'.format(entry_number)\n sub_key = registry_key.GetSubkeyByName(sub_key_name)\n if not sub_key:\n parser_mediator.ProduceExtractionWarning(\n 'Missing BagMRU sub key: {0:d} in key: {1:s}.'.format(\n entry_number, registry_key.path))\n continue\n\n parent_path_segments.append(path_segment)\n self._ParseSubKey(\n parser_mediator, sub_key, parent_path_segments, codepage=codepage)\n parent_path_segments.pop()", "docstring": "Extract event objects from a MRUListEx Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\nparent_path_segments (list[str]): parent shell item path segments.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"} {"code": "def top(self, container, ps_args=None):\n \n u = self._url(\"/containers/{0}/top\", container)\n params = {}\n if ps_args is not None:\n params['ps_args'] = ps_args\n return self._result(self._get(u, params=params), True)", "docstring": "Display the running processes of a container.\n\nArgs:\ncontainer (str): The container to inspect\nps_args (str): An optional arguments passed to ps (e.g. ``aux``)\n\nReturns:\n(str): The output of the top\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"} {"code": "def is_first(self, value):\n \n if value == self._defaults['ai.session.isFirst'] and 'ai.session.isFirst' in self._values:\n del self._values['ai.session.isFirst']\n else:\n self._values['ai.session.isFirst'] = value", "docstring": "The is_first property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def generic_visit(self, node):\n \n if (isinstance(node, ast.stmt) and\n not isinstance(node, ast.FunctionDef)):\n\n new_node = self.wrap_with_try(node)\n\n \n if isinstance(node, self.ast_try_except):\n self.try_except_handler(node)\n return new_node\n\n \n super(ErrorsCatchTransformer, self).generic_visit(node)\n\n return new_node\n\n \n return super(ErrorsCatchTransformer, self).generic_visit(node)", "docstring": "Surround node statement with a try/except block to catch errors.\n\nThis method is called for every node of the parsed code, and only\nchanges statement lines.\n\nArgs:\nnode (ast.AST): node statement to surround.", "source": "juraj-google-style"} {"code": "def write_raw_pb(tensor, step=None, name=None):\n with ops.name_scope(name, 'write_raw_pb') as scope:\n if _summary_state.writer is None:\n return constant_op.constant(False)\n if step is None:\n step = get_step()\n if step is None:\n raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()')\n\n def record():\n \n with ops.device('cpu:0'):\n raw_summary_op = gen_summary_ops.write_raw_proto_summary(_summary_state.writer._resource, step, array_ops.identity(tensor), name=scope)\n with ops.control_dependencies([raw_summary_op]):\n return constant_op.constant(True)\n with ops.device('cpu:0'):\n op = smart_cond.smart_cond(should_record_summaries(), record, _nothing, name='summary_cond')\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n return op", "docstring": "Writes a summary using raw `tf.compat.v1.Summary` protocol buffers.\n\nExperimental: this exists to support the usage of V1-style manual summary\nwriting (via the construction of a `tf.compat.v1.Summary` protocol buffer)\nwith the V2 summary writing API.\n\nArgs:\ntensor: the string Tensor holding one or more serialized `Summary` protobufs\nstep: Explicit `int64`-castable monotonic step value for this summary. If\nomitted, this defaults to `tf.summary.experimental.get_step()`, which must\nnot be None.\nname: Optional string name for this op.\n\nReturns:\nTrue on success, or false if no summary was written because no default\nsummary writer was available.\n\nRaises:\nValueError: if a default writer exists, but no step was provided and\n`tf.summary.experimental.get_step()` is None.", "source": "github-repos"} {"code": "def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False):\n assert not (input_ids is None and inputs_embeds is None)\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n if position_ids is None:\n if input_ids is not None:\n position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids, past_key_values_length=past_key_values_length)\n else:\n position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\nfinal_embeddings (`tf.Tensor`): output embedding tensor.", "source": "github-repos"} {"code": "def check(cls, status):\n \n assert cls.trigger is not None, 'Invalid ErrorTrap, trigger not set'\n assert cls.error is not None, 'Invalid ErrorTrap, error not set'\n\n if status == cls.trigger:\n \n \n raise cls.error()", "docstring": "Checks if a status enum matches the trigger originally set, and\nif so, raises the appropriate error.\n\nArgs:\nstatus (int, enum): A protobuf enum response status to check.\n\nRaises:\nAssertionError: If trigger or error were not set.\n_ApiError: If the statuses don't match. Do not catch. Will be\ncaught automatically and sent back to the client.", "source": "juraj-google-style"} {"code": "def generate_pb_config(pb_id: str,\n pb_config: dict = None,\n workflow_config: dict = None) -> dict:\n \n if workflow_config is None:\n workflow_config = dict()\n if pb_config is None:\n pb_config = dict()\n pb_type = pb_config.get('type', choice(PB_TYPES))\n workflow_id = workflow_config.get('id')\n if workflow_id is None:\n if pb_type == 'offline':\n workflow_id = choice(OFFLINE_WORKFLOWS)\n else:\n workflow_id = choice(REALTIME_WORKFLOWS)\n workflow_version = workflow_config.get('version', generate_version())\n workflow_parameters = workflow_config.get('parameters', dict())\n pb_data = dict(\n id=pb_id,\n version=__pb_version__,\n type=pb_type,\n priority=pb_config.get('priority', randint(0, 10)),\n dependencies=pb_config.get('dependencies', []),\n resources_required=pb_config.get('resources_required', []),\n workflow=dict(\n id=workflow_id,\n version=workflow_version,\n parameters=workflow_parameters\n )\n )\n return pb_data", "docstring": "Generate a PB configuration dictionary.\n\nArgs:\npb_id (str): Processing Block Id\npb_config (dict, optional) PB configuration.\nworkflow_config (dict, optional): Workflow configuration\n\nReturns:\ndict, PB configuration dictionary.", "source": "juraj-google-style"} {"code": "def pull(self, arm_id, success, failure):\n self.__beta_dist_dict[arm_id].observe(success, failure)", "docstring": "Pull arms.\n\nArgs:\narm_id: Arms master id.\nsuccess: The number of success.\nfailure: The number of failure.", "source": "codesearchnet"} {"code": "def get_cod_ids(self, formula):\n \n \n\n \n\n sql = 'select file from data where formula=\"- %s -\"' % \\\n Composition(formula).hill_formula\n text = self.query(sql).split(\"\\n\")\n cod_ids = []\n for l in text:\n m = re.search(r\"(\\d+)\", l)\n if m:\n cod_ids.append(int(m.group(1)))\n return cod_ids", "docstring": "Queries the COD for all cod ids associated with a formula. Requires\nmysql executable to be in the path.\n\nArgs:\nformula (str): Formula.\n\nReturns:\nList of cod ids.", "source": "juraj-google-style"} {"code": "def create_user(self, claims):\n username_claim = settings.USERNAME_CLAIM\n usermodel = get_user_model()\n (user, created) = usermodel.objects.get_or_create(**{usermodel.USERNAME_FIELD: claims[username_claim]})\n if (created or (not user.password)):\n user.set_unusable_password()\n logger.debug(\"User '{}' has been created.\".format(claims[username_claim]))\n return user", "docstring": "Create the user if it doesn't exist yet\n\nArgs:\nclaims (dict): claims from the access token\n\nReturns:\ndjango.contrib.auth.models.User: A Django user", "source": "codesearchnet"} {"code": "def observe_reward_value(self, state_arr, action_arr):\n \n if self.__check_goal_flag(action_arr) is True:\n return 1.0\n else:\n self.__move_enemy(action_arr)\n\n x, y = np.where(action_arr[-1] == 1)\n x, y = x[0], y[0]\n\n e_dist_sum = 0.0\n for e in range(self.__enemy_num):\n e_dist = np.sqrt(\n ((x - self.__enemy_pos_list[e][0]) ** 2) + ((y - self.__enemy_pos_list[e][1]) ** 2)\n )\n e_dist_sum += e_dist\n\n e_dist_penalty = e_dist_sum / self.__enemy_num\n goal_x, goal_y = self.__goal_pos\n \n if x == goal_x and y == goal_y:\n distance = 0.0\n else:\n distance = np.sqrt(((x - goal_x) ** 2) + (y - goal_y) ** 2)\n\n if (x, y) in self.__route_long_memory_list:\n repeating_penalty = self.__repeating_penalty\n else:\n repeating_penalty = 0.0\n\n return 1.0 - distance - repeating_penalty + e_dist_penalty", "docstring": "Compute the reward value.\n\nArgs:\nstate_arr: `np.ndarray` of state.\naction_arr: `np.ndarray` of action.\n\nReturns:\nReward value.", "source": "juraj-google-style"} {"code": "def DeleteOldFeedItems(client, feed_item_ids, feed):\n \n if not feed_item_ids:\n return\n\n feed_item_service = client.GetService('FeedItemService', 'v201809')\n\n operations = [{\n 'operator': 'REMOVE',\n 'operand': {\n 'feedId': feed['id'],\n 'feedItemId': feed_item_id\n }\n } for feed_item_id in feed_item_ids]\n\n feed_item_service.mutate(operations)", "docstring": "Deletes the old feed items for which extension settings have been created.\n\nArgs:\nclient: an AdWordsClient instance.\nfeed_item_ids: a list of Feed Item Ids.\nfeed: the Feed containing the given Feed Item Ids.", "source": "juraj-google-style"} {"code": "def get_visible_devices(device_type=None):\n return context.context().get_visible_devices(device_type)", "docstring": "Get the list of visible physical devices.\n\nReturns the list of `PhysicalDevice`s currently marked as visible to the\nruntime. A visible device will have at least one `LogicalDevice` associated\nwith it once the runtime is initialized.\n\nThe following example verifies all visible GPUs have been disabled:\n\n>>> physical_devices = tf.config.list_physical_devices('GPU')\n>>> try:\n... # Disable all GPUS\n... tf.config.set_visible_devices([], 'GPU')\n... visible_devices = tf.config.get_visible_devices()\n... for device in visible_devices:\n... assert device.device_type != 'GPU'\n... except:\n... # Invalid device or cannot modify virtual devices once initialized.\n... pass\n\nArgs:\ndevice_type: (optional string) Only include devices matching this device\ntype. For example \"CPU\" or \"GPU\".\n\nReturns:\nList of visible `PhysicalDevice`s", "source": "github-repos"} {"code": "def dotcase(text, acronyms=None):\n (words, _case, _sep) = case_parse.parse_case(text, acronyms)\n return '.'.join([w.lower() for w in words])", "docstring": "Return text in dot.case style.\n\nArgs:\ntext: input string to convert case\ndetect_acronyms: should attempt to detect acronyms\nacronyms: a list of acronyms to detect\n\n>>> dotcase(\"hello world\")\n'hello.world'\n>>> dotcase(\"helloHTMLWorld\", True, [\"HTML\"])\n'hello.html.world'", "source": "codesearchnet"} {"code": "def importFile(self, path, mode, outp=None):\n \n if not os.path.isfile(path):\n raise s_exc.NoSuchFile('File does not exist')\n\n fname = os.path.split(path)[1]\n parts = fname.rsplit('.', 1)\n ext = parts[1] if len(parts) is 2 else None\n\n if not ext or ext not in ('crt', 'key', 'p12'):\n mesg = 'importFile only supports .crt, .key, .p12 extensions'\n raise s_exc.BadFileExt(mesg=mesg, ext=ext)\n\n newpath = s_common.genpath(self.certdir, mode, fname)\n if os.path.isfile(newpath):\n raise s_exc.FileExists('File already exists')\n\n shutil.copy(path, newpath)\n if outp is not None:\n outp.printf('copied %s to %s' % (path, newpath))", "docstring": "Imports certs and keys into the Synapse cert directory\n\nArgs:\npath (str): The path of the file to be imported.\nmode (str): The certdir subdirectory to import the file into.\n\nExamples:\nImport CA certifciate 'mycoolca.crt' to the 'cas' directory.\n\ncertdir.importFile('mycoolca.crt', 'cas')\n\nNotes:\nimportFile does not perform any validation on the files it imports.\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def refresh(self):\n res = gpsoauth.perform_oauth(self._email, self._master_token, self._android_id, service=self._scopes, app='com.google.android.keep', client_sig='38918a453d07199354f8b19af05ec6562ced5788')\n if ('Auth' not in res):\n if ('Token' not in res):\n raise exception.LoginException(res.get('Error'))\n self._auth_token = res['Auth']\n return self._auth_token", "docstring": "Refresh the OAuth token.\n\nReturns:\nstring: The auth token.\n\nRaises:\nLoginException: If there was a problem refreshing the OAuth token.", "source": "codesearchnet"} {"code": "def post_info(self, name, message):\n \n self.post_command(OPERATIONS.CMD_POST_MESSAGE,\n _create_message(name, states.INFO_LEVEL, message))", "docstring": "Asynchronously post a user facing info message about a service.\n\nArgs:\nname (string): The name of the service\nmessage (string): The user facing info message that will be stored\nfor the service and can be queried later.", "source": "juraj-google-style"} {"code": "def __init__(self, host='localhost', port=12223):\n \n\n channel = grpc.insecure_channel('{}:{}'.format(host, port))\n self._stub = clearly_pb2_grpc.ClearlyServerStub(channel)", "docstring": "Constructs a client instance.\n\nArgs:\nhost (str): the hostname of the server\nport (int): the port of the server", "source": "juraj-google-style"} {"code": "def prepare_read(data, method='readlines', mode='r'):\n \n if hasattr(data, 'readlines'):\n data = getattr(data, method)()\n elif isinstance(data, list):\n if method == 'read':\n return ''.join(data)\n elif isinstance(data, basestring):\n data = getattr(open(data, mode), method)()\n else:\n raise TypeError('Unable to handle data of type %r' % type(data))\n return data", "docstring": "Prepare various input types for parsing.\n\nArgs:\ndata (iter): Data to read\nmethod (str): Method to process data with\nmode (str): Custom mode to process with, if data is a file\n\nReturns:\nlist: List suitable for parsing\n\nRaises:\nTypeError: Invalid value for data", "source": "juraj-google-style"} {"code": "def ip_address(address):\n \n try:\n return IPv4Address(address)\n except (AddressValueError, NetmaskValueError):\n pass\n\n try:\n return IPv6Address(address)\n except (AddressValueError, NetmaskValueError):\n pass\n\n if isinstance(address, bytes):\n raise AddressValueError(\n '%r does not appear to be an IPv4 or IPv6 address. '\n 'Did you pass in a bytes (str in Python 2) instead of'\n ' a unicode object?' % address)\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %\n address)", "docstring": "Take an IP string/int and return an object of the correct type.\n\nArgs:\naddress: A string or integer, the IP address. Either IPv4 or\nIPv6 addresses may be supplied; integers less than 2**32 will\nbe considered to be IPv4 by default.\n\nReturns:\nAn IPv4Address or IPv6Address object.\n\nRaises:\nValueError: if the *address* passed isn't either a v4 or a v6\naddress", "source": "juraj-google-style"} {"code": "def _load_file_from_gcs(gcs_file_path, credentials=None):\n \n gcs_service = _get_storage_service(credentials)\n\n bucket_name, object_name = gcs_file_path[len('gs:\n request = gcs_service.objects().get_media(\n bucket=bucket_name, object=object_name)\n\n file_handle = io.BytesIO()\n downloader = MediaIoBaseDownload(file_handle, request, chunksize=1024 * 1024)\n done = False\n while not done:\n _, done = _downloader_next_chunk(downloader)\n filevalue = file_handle.getvalue()\n if not isinstance(filevalue, six.string_types):\n filevalue = filevalue.decode()\n return six.StringIO(filevalue)", "docstring": "Load context from a text file in gcs.\n\nArgs:\ngcs_file_path: The target file path; should have the 'gs://' prefix.\ncredentials: Optional credential to be used to load the file from gcs.\n\nReturns:\nThe content of the text file as a string.", "source": "juraj-google-style"} {"code": "def __init__(self, data_type_definition):\n \n super(PaddingMap, self).__init__(data_type_definition)\n self.byte_size = None", "docstring": "Initializes a padding data type map.\n\nArgs:\ndata_type_definition (DataTypeDefinition): data type definition.\n\nRaises:\nFormatError: if the data type map cannot be determined from the data\ntype definition.", "source": "juraj-google-style"} {"code": "def check_symmetry(A):\n \n A = asanyarray(A)\n if A.ndim != 2:\n raise ValueError(\"Checks symmetry only for bi-dimensional arrays.\")\n\n if A.shape[0] != A.shape[1]:\n return False\n\n return abs(A - A.T).max() < sqrt(finfo(float).eps)", "docstring": "Check if ``A`` is a symmetric matrix.\n\nArgs:\nA (array_like): Matrix.\n\nReturns:\nbool: ``True`` if ``A`` is symmetric; ``False`` otherwise.", "source": "juraj-google-style"} {"code": "def plot_spectrum_min_max(self, t=0, f_start=None, f_stop=None, logged=False, if_id=0, c=None, **kwargs):\n \n ax = plt.gca()\n\n plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)\n\n \n if self.header[b'foff'] < 0:\n plot_data = plot_data[..., ::-1] \n plot_f = plot_f[::-1]\n\n fig_max = plot_data[0].max()\n fig_min = plot_data[0].min()\n\n print(\"averaging along time axis...\")\n\n \n if len(plot_data.shape) > 1:\n plot_max = plot_data.max(axis=0)\n plot_min = plot_data.min(axis=0)\n plot_data = plot_data.mean(axis=0)\n else:\n plot_max = plot_data.max()\n plot_min = plot_data.min()\n plot_data = plot_data.mean()\n\n \n dec_fac_x = 1\n MAX_PLT_POINTS = 8*64 \n if plot_data.shape[0] > MAX_PLT_POINTS:\n dec_fac_x = int(plot_data.shape[0] / MAX_PLT_POINTS)\n\n plot_data = rebin(plot_data, dec_fac_x, 1)\n plot_min = rebin(plot_min, dec_fac_x, 1)\n plot_max = rebin(plot_max, dec_fac_x, 1)\n plot_f = rebin(plot_f, dec_fac_x, 1)\n\n if logged:\n plt.plot(plot_f, db(plot_data), \"\n plt.plot(plot_f, db(plot_max), \"\n plt.plot(plot_f, db(plot_min), '\n plt.ylabel(\"Power [dB]\")\n else:\n plt.plot(plot_f, plot_data, \"\n plt.plot(plot_f, plot_max, \"\n plt.plot(plot_f, plot_min, '\n plt.ylabel(\"Power [counts]\")\n plt.xlabel(\"Frequency [MHz]\")\n plt.legend()\n\n try:\n plt.title(self.header[b'source_name'])\n except KeyError:\n plt.title(self.filename)\n\n plt.xlim(plot_f[0], plot_f[-1])\n if logged:\n plt.ylim(db(fig_min),db(fig_max))", "docstring": "Plot frequency spectrum of a given file\n\nArgs:\nlogged (bool): Plot in linear (False) or dB units (True)\nif_id (int): IF identification (if multiple IF signals in file)\nc: color for line\nkwargs: keyword args to be passed to matplotlib plot()", "source": "juraj-google-style"} {"code": "def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data):\n request = {'ModelPackageName': name, 'ModelPackageDescription': description, 'SourceAlgorithmSpecification': {'SourceAlgorithms': [{'AlgorithmName': algorithm_arn, 'ModelDataUrl': model_data}]}}\n try:\n LOGGER.info('Creating model package with name: {}'.format(name))\n self.sagemaker_client.create_model_package(**request)\n except ClientError as e:\n error_code = e.response['Error']['Code']\n message = e.response['Error']['Message']\n if ((error_code == 'ValidationException') and ('ModelPackage already exists' in message)):\n LOGGER.warning('Using already existing model package: {}'.format(name))\n else:\n raise", "docstring": "Create a SageMaker Model Package from the results of training with an Algorithm Package\n\nArgs:\nname (str): ModelPackage name\ndescription (str): Model Package description\nalgorithm_arn (str): arn or name of the algorithm used for training.\nmodel_data (str): s3 URI to the model artifacts produced by training", "source": "codesearchnet"} {"code": "def install(self, connection, partition, table_name=None, index_columns=None, materialize=False,\n logger=None):\n \n\n raise NotImplementedError", "docstring": "Installs partition's mpr to the database to allow to execute sql queries over mpr.\n\nArgs:\nconnection:\npartition (orm.Partition):\nmaterialize (boolean): if True, create generic table. If False create MED over mpr.\n\nReturns:\nstr: name of the created table.", "source": "juraj-google-style"} {"code": "def from_string(string):\n lines = string.split('\\n')\n toks = lines[0].split()\n lengths = [float(i) for i in toks]\n toks = lines[1].split()\n angles = [float(i) for i in toks[0:3]]\n latt = Lattice.from_lengths_and_angles(lengths, angles)\n sp = []\n coords = []\n for l in lines[4:]:\n m = re.match('\\\\d+\\\\s+(\\\\w+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)\\\\s+([0-9\\\\-\\\\.]+)', l.strip())\n if m:\n sp.append(m.group(1))\n coords.append([float(m.group(i)) for i in range(2, 5)])\n return Cssr(Structure(latt, sp, coords))", "docstring": "Reads a string representation to a Cssr object.\n\nArgs:\nstring (str): A string representation of a CSSR.\n\nReturns:\nCssr object.", "source": "codesearchnet"} {"code": "def __init__(self, memory_estimator, scheduler_alg=\"LIST\"):\n \n self._estimator = memory_estimator\n self._scheduler_alg = scheduler_alg\n self._layout_validator = self._estimator.get_layout_validator()\n self._graph = self._estimator.get_graph_interface()\n self._memory_contents = None \n\n \n self._model = cp_model.CpModel()\n\n self._preprocess_input()\n self._initialize_variables()\n self._add_constraints()\n self._build_objective_function()", "docstring": "Uses a auto_mtf.memory_estimator to set up the integer program.\n\nArgs:\nmemory_estimator: a memory_estimator.MemoryEstimator.\nscheduler_alg: an optional string, see scheduler.MinimizePeakMemory.", "source": "juraj-google-style"} {"code": "def get_computed_entry(self, inc_structure=True, parameters=None, data=None):\n param_names = {'is_hubbard', 'hubbards', 'potcar_symbols', 'potcar_spec', 'run_type'}\n if parameters:\n param_names.update(parameters)\n params = {p: getattr(self, p) for p in param_names}\n data = ({p: getattr(self, p) for p in data} if (data is not None) else {})\n if inc_structure:\n return ComputedStructureEntry(self.final_structure, self.final_energy, parameters=params, data=data)\n else:\n return ComputedEntry(self.final_structure.composition, self.final_energy, parameters=params, data=data)", "docstring": "Returns a ComputedStructureEntry from the vasprun.\n\nArgs:\ninc_structure (bool): Set to True if you want\nComputedStructureEntries to be returned instead of\nComputedEntries.\nparameters (list): Input parameters to include. It has to be one of\nthe properties supported by the Vasprun object. If\nparameters is None, a default set of parameters that are\nnecessary for typical post-processing will be set.\ndata (list): Output data to include. Has to be one of the properties\nsupported by the Vasprun object.\n\nReturns:\nComputedStructureEntry/ComputedEntry", "source": "codesearchnet"} {"code": "def __init__(self, workdays=None, holidays=None):\n \n if workdays is None:\n self.workdays = [MO, TU, WE, TH, FR]\n else:\n self.workdays = sorted(list(set(workdays))) \n\n if holidays is None:\n holidays = []\n\n \n \n \n weekdaymap = []\n for wkday in range(0, 7):\n wmap = {}\n wmap['dayofweek'] = wkday\n if wkday in self.workdays:\n wmap['isworkday'] = True\n i = self.workdays.index(wkday)\n \n if i == len(self.workdays) - 1: \n wmap['nextworkday'] = self.workdays[0]\n wmap['offsetnext'] = wmap['nextworkday'] + 7 - wkday\n else:\n wmap['nextworkday'] = self.workdays[i+1]\n wmap['offsetnext'] = wmap['nextworkday'] - wkday\n \n if i == 0: \n wmap['prevworkday'] = self.workdays[-1]\n wmap['offsetprev'] = wmap['prevworkday'] - wkday - 7\n else:\n wmap['prevworkday'] = self.workdays[i-1]\n wmap['offsetprev'] = wmap['prevworkday'] - wkday\n else:\n wmap['isworkday'] = False\n \n after = [x for x in range(wkday+1, 7) if x in self.workdays]\n if after: \n wmap['nextworkday'] = after[0]\n wmap['offsetnext'] = wmap['nextworkday'] - wkday\n else:\n wmap['nextworkday'] = self.workdays[0]\n wmap['offsetnext'] = wmap['nextworkday'] + 7 - wkday\n \n before = [x for x in range(0, wkday) if x in self.workdays]\n if before: \n wmap['prevworkday'] = before[-1]\n wmap['offsetprev'] = wmap['prevworkday'] - wkday\n else:\n wmap['prevworkday'] = self.workdays[-1]\n wmap['offsetprev'] = wmap['prevworkday'] - wkday - 7\n weekdaymap.append(DayOfWeek(**wmap))\n self.weekdaymap = weekdaymap\n\n \n holidays = set([parsefun(hol) for hol in holidays])\n self.holidays = sorted(\n [hol for hol in holidays if weekdaymap[hol.weekday()].isworkday])", "docstring": "Initialize object and creates the week day map.\n\nArgs:\nworkdays: List or tuple of week days considered 'work days'.\nAnything not in this list is considered a rest day.\nDefaults to [MO, TU, WE, TH, FR].\nholidays: List or tuple of holidays (or strings).\nDefault is [].", "source": "juraj-google-style"} {"code": "def find_import(self, module_name: str) -> tuple[str, bool] | None:\n module_name_split = module_name.split('.')\n for searchdir in self.options.pythonpath:\n path = path_utils.join(searchdir, *module_name_split)\n init_path = path_utils.join(path, '__init__')\n full_path = self.get_pyi_path(init_path)\n if full_path is not None:\n log.debug('Found module %r with path %r', module_name, init_path)\n return (full_path, True)\n elif self.options.imports_map is None and path_utils.isdir(path):\n log.debug('Created empty module %r with path %r', module_name, init_path)\n full_path = path_utils.join(path, '__init__.pyi')\n return (full_path, False)\n else:\n full_path = self.get_pyi_path(path)\n if full_path is not None:\n log.debug('Found module %r in path %r', module_name, path)\n return (full_path, True)\n return None", "docstring": "Search through pythonpath for a module.\n\nLoops over self.options.pythonpath, taking care of the semantics for\n__init__.pyi, and pretending there's an empty __init__.pyi if the path\n(derived from module_name) is a directory.\n\nArgs:\nmodule_name: module name\n\nReturns:\n- (path, file_exists) if we find a path (file_exists will be false if we\nhave found a directory where we need to create an __init__.pyi)\n- None if we cannot find a full path", "source": "github-repos"} {"code": "def get(self, txn_id):\n \n if txn_id not in self._receipt_db:\n raise KeyError('Unknown transaction id {}'.format(txn_id))\n\n txn_receipt_bytes = self._receipt_db[txn_id]\n txn_receipt = TransactionReceipt()\n txn_receipt.ParseFromString(txn_receipt_bytes)\n return txn_receipt", "docstring": "Returns the TransactionReceipt\n\nArgs:\ntxn_id (str): the id of the transaction for which the receipt\nshould be retrieved.\n\nReturns:\nTransactionReceipt: The receipt for the given transaction id.\n\nRaises:\nKeyError: if the transaction id is unknown.", "source": "juraj-google-style"} {"code": "def _separate_words(string):\n \n words = []\n separator = \"\"\n\n \n \n i = 1\n \n s = 0\n \n p = string[0:1]\n\n \n \n was_upper = False\n if string.isupper():\n string = string.lower()\n was_upper = True\n\n \n \n while i <= len(string):\n c = string[i:i + 1]\n\n split = False\n if i < len(string):\n \n if UPPER.match(c):\n split = True\n \n elif NOTSEP.match(c) and SEP.match(p):\n split = True\n \n elif SEP.match(c) and NOTSEP.match(p):\n split = True\n else:\n \n \n split = True\n\n if split:\n if NOTSEP.match(p):\n words.append(string[s:i])\n else:\n \n \n if not separator:\n separator = string[s:s + 1]\n\n \n words.append(None)\n \n \n \n \n s = i\n\n i += 1\n p = c\n\n return words, separator, was_upper", "docstring": "Segment string on separator into list of words.\n\nArguments:\nstring -- the string we want to process\n\nReturns:\nwords -- list of words the string got minced to\nseparator -- the separator char intersecting words\nwas_upper -- whether string happened to be upper-case", "source": "juraj-google-style"} {"code": "def verifyAccount(self, subject, vendorSpecific=None):\n \n response = self.verifyAccountResponse(subject, vendorSpecific)\n return self._read_boolean_response(response)", "docstring": "See Also: verifyAccountResponse()\n\nArgs:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"} {"code": "def _create_single_feature_method(feature):\n fx_name = feature.name.lower()\n if ('detection' in fx_name):\n fx_doc = 'Perform {0}.'.format(fx_name.replace('_', ' '))\n else:\n fx_doc = 'Return {desc} information.'.format(desc=fx_name.replace('_', ' '))\n fx_doc += '\\n\\n Args:\\n image (:class:`~.{module}.types.Image`): The image to analyze.\\n max_results (int):\\n Number of results to return, does not apply for\\n TEXT_DETECTION, DOCUMENT_TEXT_DETECTION, or CROP_HINTS.\\n retry (int): Number of retries to do before giving up.\\n timeout (int): Number of seconds before timing out.\\n kwargs (dict): Additional properties to be set on the\\n :class:`~.{module}.types.AnnotateImageRequest`.\\n\\n Returns:\\n :class:`~.{module}.types.AnnotateImageResponse`: The API response.\\n '\n feature_value = {'type': feature}\n\n def inner(self, image, max_results=None, retry=None, timeout=None, **kwargs):\n 'Return a single feature annotation for the given image.\\n\\n Intended for use with functools.partial, to create the particular\\n single-feature methods.\\n '\n copied_features = feature_value.copy()\n if (max_results is not None):\n copied_features['max_results'] = max_results\n request = dict(image=image, features=[copied_features], **kwargs)\n response = self.annotate_image(request, retry=retry, timeout=timeout)\n return response\n inner.__name__ = fx_name\n inner.__doc__ = fx_doc\n return inner", "docstring": "Return a function that will detect a single feature.\n\nArgs:\nfeature (enum): A specific feature defined as a member of\n:class:`~enums.Feature.Type`.\n\nReturns:\nfunction: A helper function to detect just that feature.", "source": "codesearchnet"} {"code": "def get_hgnc_id(gene_info, adapter):\n \n hgnc_id = gene_info.get('hgnc_id')\n hgnc_symbol = gene_info.get('hgnc_symbol')\n\n true_id = None\n\n if hgnc_id:\n true_id = int(hgnc_id)\n else:\n gene_result = adapter.hgnc_genes(hgnc_symbol)\n if gene_result.count() == 0:\n raise Exception(\"No gene could be found for {}\".format(hgnc_symbol))\n for gene in gene_result:\n if hgnc_symbol.upper() == gene.hgnc_symbol.upper():\n true_id = gene.hgnc_id\n if not gene_info['hgnc_id']:\n true_id = gene.hgnc_id\n\n return true_id", "docstring": "Get the hgnc id for a gene\n\nThe proprity order will be\n1. if there is a hgnc id this one will be choosen\n2. if the hgnc symbol matches a genes proper hgnc symbol\n3. if the symbol ony matches aliases on several genes one will be\nchoosen at random\n\nArgs:\ngene_info(dict)\nadapter\n\nReturns:\ntrue_id(int)", "source": "juraj-google-style"} {"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(LongInteger, self).write(ostream, kmip_version=kmip_version)\n ostream.write(pack('!q', self.value))", "docstring": "Write the encoding of the LongInteger to the output stream.\n\nArgs:\nostream (stream): A buffer to contain the encoded bytes of a\nLongInteger. Usually a BytearrayStream object. Required.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"} {"code": "def create_epub(self, output_directory, epub_name=None):\n \n def createTOCs_and_ContentOPF():\n for epub_file, name in ((self.toc_html, 'toc.html'), (self.toc_ncx, 'toc.ncx'), (self.opf, 'content.opf'),):\n epub_file.add_chapters(self.chapters)\n epub_file.write(os.path.join(self.OEBPS_DIR, name))\n\n def create_zip_archive(epub_name):\n try:\n assert isinstance(epub_name, basestring) or epub_name is None\n except AssertionError:\n raise TypeError('epub_name must be string or None')\n if epub_name is None:\n epub_name = self.title\n epub_name = ''.join([c for c in epub_name if c.isalpha() or c.isdigit() or c == ' ']).rstrip()\n epub_name_with_path = os.path.join(output_directory, epub_name)\n try:\n os.remove(os.path.join(epub_name_with_path, '.zip'))\n except OSError:\n pass\n shutil.make_archive(epub_name_with_path, 'zip', self.EPUB_DIR)\n return epub_name_with_path + '.zip'\n\n def turn_zip_into_epub(zip_archive):\n epub_full_name = zip_archive.strip('.zip') + '.epub'\n try:\n os.remove(epub_full_name)\n except OSError:\n pass\n os.rename(zip_archive, epub_full_name)\n return epub_full_name\n createTOCs_and_ContentOPF()\n epub_path = turn_zip_into_epub(create_zip_archive(epub_name))\n return epub_path", "docstring": "Create an epub file from this object.\n\nArgs:\noutput_directory (str): Directory to output the epub file to\nepub_name (Option[str]): The file name of your epub. This should not contain\n.epub at the end. If this argument is not provided, defaults to the title of the epub.", "source": "juraj-google-style"} {"code": "def print_resolve_diff(self, other, heading=None):\n \n d = self.get_resolve_diff(other)\n if not d:\n return\n\n rows = []\n if heading is True and self.load_path and other.load_path:\n a = os.path.basename(self.load_path)\n b = os.path.basename(other.load_path)\n heading = (a, b)\n if isinstance(heading, tuple):\n rows.append(list(heading) + [\"\"])\n rows.append(('-' * len(heading[0]), '-' * len(heading[1]), \"\"))\n\n newer_packages = d.get(\"newer_packages\", {})\n older_packages = d.get(\"older_packages\", {})\n added_packages = d.get(\"added_packages\", set())\n removed_packages = d.get(\"removed_packages\", set())\n\n if newer_packages:\n for name, pkgs in newer_packages.iteritems():\n this_pkg = pkgs[0]\n other_pkg = pkgs[-1]\n diff_str = \"(+%d versions)\" % (len(pkgs) - 1)\n rows.append((this_pkg.qualified_name,\n other_pkg.qualified_name,\n diff_str))\n\n if older_packages:\n for name, pkgs in older_packages.iteritems():\n this_pkg = pkgs[0]\n other_pkg = pkgs[-1]\n diff_str = \"(-%d versions)\" % (len(pkgs) - 1)\n rows.append((this_pkg.qualified_name,\n other_pkg.qualified_name,\n diff_str))\n\n if added_packages:\n for pkg in sorted(added_packages, key=lambda x: x.name):\n rows.append((\"-\", pkg.qualified_name, \"\"))\n\n if removed_packages:\n for pkg in sorted(removed_packages, key=lambda x: x.name):\n rows.append((pkg.qualified_name, \"-\", \"\"))\n\n print '\\n'.join(columnise(rows))", "docstring": "Print the difference between the resolve of two contexts.\n\nArgs:\nother (`ResolvedContext`): Context to compare to.\nheading: One of:\n- None: Do not display a heading;\n- True: Display the filename of each context as a heading, if\nboth contexts have a filepath;\n- 2-tuple: Use the given two strings as headings - the first is\nthe heading for `self`, the second for `other`.", "source": "juraj-google-style"} {"code": "def create_query(self, fields=None):\n if (fields is None):\n return Query(self.fields)\n non_contained_fields = (set(fields) - set(self.fields))\n if non_contained_fields:\n raise BaseLunrException('Fields {} are not part of the index', non_contained_fields)\n return Query(fields)", "docstring": "Convenience method to create a Query with the Index's fields.\n\nArgs:\nfields (iterable, optional): The fields to include in the Query,\ndefaults to the Index's `all_fields`.\n\nReturns:\nQuery: With the specified fields or all the fields in the Index.", "source": "codesearchnet"} {"code": "def _list_records_internal(self, rtype=None, name=None, content=None, identifier=None):\n name = (self._full_name(name) if (name is not None) else name)\n if (self._records is None):\n records = []\n rows = self._get_dns_entry_trs()\n for (index, row) in enumerate(rows):\n self._log('DNS list entry', row)\n try:\n rec = {}\n if row.has_attr('ondblclick'):\n rec['id'] = int(row['ondblclick'].split('id=')[1].split(\"'\")[0])\n else:\n rec['id'] = (- index)\n columns = row.find_all('td')\n rec['name'] = (columns[0].string or '').strip()\n rec['type'] = (columns[1].contents[1] or '').strip()\n rec['content'] = (columns[2].string or '').strip()\n rec['priority'] = (columns[3].string or '').strip()\n rec['ttl'] = (columns[4].string or '').strip()\n if rec['priority']:\n rec['priority'] = int(rec['priority'])\n if rec['ttl']:\n rec['ttl'] = int(rec['ttl'])\n except Exception as error:\n errmsg = 'Cannot parse DNS entry ({}).'.format(error)\n LOGGER.warning(errmsg)\n raise AssertionError(errmsg)\n records.append(rec)\n self._records = records\n records = self._filter_records(self._records, rtype, name, content, identifier)\n LOGGER.debug('Final records (%d): %s', len(records), records)\n return records", "docstring": "Filter and list DNS entries of domain zone on Easyname.\nEasyname shows each entry in a HTML table row and each attribute on a\ntable column.\n\nArgs:\n[rtype] (str): Filter by DNS rtype (e.g. A, TXT, MX, etc)\n[name] (str): Filter by the name of the DNS entry, e.g the domain for\nwhich a MX entry shall be valid.\n[content] (str): Filter by the content of the DNS entry, e.g. the\nmail server hostname for a MX entry.\n[identifier] (str): Filter by the easyname id of the DNS entry.\n\nReturns:\nlist: A list of DNS entries. A DNS entry is an object with DNS\nattribute names as keys (e.g. name, content, priority, etc)\nand additionally an id.\n\nRaises:\nAssertionError: When a request returns unexpected or unknown data.", "source": "codesearchnet"} {"code": "def ApprovalRevokeRaw(aff4_path, token):\n try:\n urn = rdf_client.ClientURN(aff4_path)\n except type_info.TypeValueError:\n urn = rdfvalue.RDFURN(aff4_path)\n approval_urn = aff4.ROOT_URN.Add('ACL').Add(urn.Path()).Add(token.username).Add(utils.EncodeReasonString(token.reason))\n super_token = access_control.ACLToken(username='raw-approval-superuser')\n super_token.supervisor = True\n approval_request = aff4.FACTORY.Open(approval_urn, mode='rw', token=super_token)\n approval_request.DeleteAttribute(approval_request.Schema.APPROVER)\n approval_request.Close()", "docstring": "Revokes an approval for a given token.\n\nThis method requires raw datastore access to manipulate approvals directly.\n\nArgs:\naff4_path: The aff4_path or client id the approval should be created for.\ntoken: The token that should be revoked.", "source": "codesearchnet"} {"code": "def multiprocess_mapping(func, iterable):\n if (os.name == 'nt'):\n return list(map(func, iterable))\n try:\n p = multiprocessing.Pool()\n return_data = list(p.imap(func, iterable))\n p.close()\n p.join()\n return return_data\n except OSError:\n return list(map(func, iterable))", "docstring": "Multiprocess mapping the given function on the given iterable.\n\nThis only works in Linux and Mac systems since Windows has no forking capability. On Windows we fall back on\nsingle processing. Also, if we reach memory limits we fall back on single cpu processing.\n\nArgs:\nfunc (func): the function to apply\niterable (iterable): the iterable with the elements we want to apply the function on", "source": "codesearchnet"} {"code": "def run(image, name=None, command=None, environment=None, ports=None, volumes=None):\n if (ports and (not name)):\n abort('The ports flag currently only works if you specify a container name')\n if ports:\n ports = [parse_port_spec(p) for p in ports.split(',')]\n else:\n ports = None\n if environment:\n environment = dict([x.split('=') for x in environment.split(',')])\n else:\n environment = None\n if volumes:\n volumes = dict([x.split(':') for x in volumes.split(',')])\n else:\n volumes = None\n run_container(image=image, name=name, command=command, ports=ports, environment=environment, volumes=volumes)", "docstring": "Run a docker container.\n\nArgs:\n* image: Docker image to run, e.g. orchardup/redis, quay.io/hello/world\n* name=None: Container name\n* command=None: Command to execute\n* environment: Comma separated environment variables in the format NAME=VALUE\n* ports=None: Comma separated port specs in the format CONTAINER_PORT[:EXPOSED_PORT][/PROTOCOL]\n* volumes=None: Comma separated volumes in the format HOST_DIR:CONTAINER_DIR\n\nExamples:\n* fab docker.run:orchardup/redis,name=redis,ports=6379\n* fab docker.run:quay.io/hello/world,name=hello,ports=\"80:8080,1000/udp\",volumes=\"/docker/hello/log:/var/log\"\n* fab docker.run:andreasjansson/redis,environment=\"MAX_MEMORY=4G,FOO=bar\",ports=6379", "source": "codesearchnet"} {"code": "def symbolic_trace(model: 'PreTrainedModel', input_names: Optional[list[str]]=None, disable_check: bool=False, tracer_cls: type[HFTracer]=HFTracer) -> GraphModule:\n if input_names is None:\n input_names = model.dummy_inputs.keys()\n input_names = list(input_names)\n concrete_args = get_concrete_args(model, input_names)\n if not disable_check:\n check_if_model_is_supported(model)\n if 'past_key_values' in input_names and (not getattr(model.config, 'use_cache', False)):\n logger.warning('`past_key_values` were specified as input names, but model.config.use_cache = False, this might lead to unexpected behavior.')\n if 'past_key_values' not in input_names and getattr(model.config, 'use_cache', False):\n logger.warning('`past_key_values` were not specified as input names, but model.config.use_cache = True. Setting model.config.use_cache = False.')\n model.config.use_cache = False\n tracer = tracer_cls()\n traced_graph = tracer.trace(model, concrete_args=concrete_args)\n traced = torch.fx.GraphModule(model, traced_graph)\n traced.config = model.config\n traced.class_for_deserialization = model.__class__\n traced.device = model.device\n return traced", "docstring": "Performs symbolic tracing on the model.\n\nArgs:\nmodel ([`PretrainedModel`]):\nThe model to trace.\ninput_names (`List[str]`, *optional*):\nThe names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead.\ndisable_check (`bool`, *optional*, defaults to `False`):\nIf `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes.\ntracer_cls (`Type[HFTracer]`, *optional*, defaults to `HFTracer`):\nThe tracer class to use for instantiating the tracer. If unset, `HFTracer` is used instead.\n\nReturns:\n`torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model.\n\nExample:\n\n```python\nfrom transformers.utils.fx import symbolic_trace\n\ntraced_model = symbolic_trace(model, input_names=[\"input_ids\", \"attention_mask\", \"token_type_ids\"])\n```", "source": "github-repos"} {"code": "def expand_dims(x, axis):\n if any_symbolic_tensors((x,)):\n return ExpandDims(axis=axis).symbolic_call(x)\n return backend.numpy.expand_dims(x, axis)", "docstring": "Expand the shape of a tensor.\n\nInsert a new axis at the `axis` position in the expanded tensor shape.\n\nArgs:\nx: Input tensor.\naxis: Position in the expanded axes where the new axis\n(or axes) is placed.\n\nReturns:\nOutput tensor with the number of dimensions increased.", "source": "github-repos"} {"code": "def IsInitializerList(clean_lines, linenum):\n for i in xrange(linenum, 1, (- 1)):\n line = clean_lines.elided[i]\n if (i == linenum):\n remove_function_body = Match('^(.*)\\\\{\\\\s*$', line)\n if remove_function_body:\n line = remove_function_body.group(1)\n if Search('\\\\s:\\\\s*\\\\w+[({]', line):\n return True\n if Search('\\\\}\\\\s*,\\\\s*$', line):\n return True\n if Search('[{};]\\\\s*$', line):\n return False\n return False", "docstring": "Check if current line is inside constructor initializer list.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if current line appears to be inside constructor initializer\nlist, False otherwise.", "source": "codesearchnet"} {"code": "def change_to_count_endpoint(endpoint):\n \n\n tokens = filter(lambda x: x != '', re.split(\"[/:]\", endpoint))\n filt_tokens = list(filter(lambda x: x != \"https\", tokens))\n last = filt_tokens[-1].split('.')[0] \n filt_tokens[-1] = last \n if last == 'counts':\n return endpoint\n else:\n return \"https:", "docstring": "Utility function to change a normal endpoint to a ``count`` api\nendpoint. Returns the same endpoint if it's already a valid count endpoint.\nArgs:\nendpoint (str): your api endpoint\n\nReturns:\nstr: the modified endpoint for a count endpoint.", "source": "juraj-google-style"} {"code": "def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)", "docstring": "Stream statistics for this container. Similar to the\n``docker stats`` command.\n\nArgs:\ndecode (bool): If set to true, stream will be decoded into dicts\non the fly. Only applicable if ``stream`` is True.\nFalse by default.\nstream (bool): If set to false, only the current stats will be\nreturned instead of a stream. True by default.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"} {"code": "def deserialize_dtype(d):\n \n if isinstance(d['descr'], six.string_types):\n return np.dtype(d['descr'])\n descr = []\n for col in d['descr']:\n col_descr = []\n for c in col:\n if isinstance(c, six.string_types):\n col_descr.append(str(c))\n elif type(c) is list:\n col_descr.append(tuple(c))\n else:\n col_descr.append(c)\n descr.append(tuple(col_descr))\n return np.dtype(descr)", "docstring": "Deserializes a JSONified :obj:`numpy.dtype`.\n\nArgs:\nd (:obj:`dict`): A dictionary representation of a :obj:`dtype` object.\n\nReturns:\nA :obj:`dtype` object.", "source": "juraj-google-style"} {"code": "def Add(self, category, label, age):\n \n now = rdfvalue.RDFDatetime.Now()\n category = utils.SmartUnicode(category)\n\n for active_time in self.active_days:\n self.categories[active_time].setdefault(label, {})\n if (now - age).seconds < active_time * 24 * 60 * 60:\n self.categories[active_time][label][\n category] = self.categories[active_time][label].get(category, 0) + 1", "docstring": "Adds another instance of this category into the active_days counter.\n\nWe automatically count the event towards all relevant active_days. For\nexample, if the category \"Windows\" was seen 8 days ago it will be counted\ntowards the 30 day active, 14 day active but not against the 7 and 1 day\nactives.\n\nArgs:\ncategory: The category name to account this instance against.\nlabel: Client label to which this should be applied.\nage: When this instance occurred.", "source": "juraj-google-style"} {"code": "def center_crop(self, image: np.ndarray, crop_size: Dict[str, int], size: Optional[int]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n size = self.size if size is None else size\n size = get_size_dict(size)\n crop_size = get_size_dict(crop_size, param_name='crop_size')\n height, width = get_image_size(image, channel_dim=input_data_format)\n min_dim = min(height, width)\n cropped_height = size['height'] / crop_size['height'] * min_dim\n cropped_width = size['width'] / crop_size['width'] * min_dim\n return center_crop(image, size=(cropped_height, cropped_width), data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Center crop an image to `(size[\"height\"] / crop_size[\"height\"] * min_dim, size[\"width\"] / crop_size[\"width\"] *\nmin_dim)`. Where `min_dim = min(size[\"height\"], size[\"width\"])`.\n\nIf the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then\ncenter cropped.\n\nArgs:\nimage (`np.ndarray`):\nImage to center crop.\ncrop_size (`Dict[str, int]`):\nDesired output size after applying the center crop.\nsize (`Dict[str, int]`, *optional*):\nSize of the image after resizing. If not provided, the self.size attribute will be used.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"} {"code": "def initAttrs(cls):\n \n def __init__(self, skype=None, raw=None, *args, **kwargs):\n super(cls, self).__init__(skype, raw)\n \n for i in range(len(args)):\n kwargs[cls.attrs[i]] = args[i]\n \n unknown = set(kwargs) - set(cls.attrs)\n if unknown:\n unknownDesc = \"an unexpected keyword argument\" if len(unknown) == 1 else \"unexpected keyword arguments\"\n unknownList = \", \".join(\"'{0}'\".format(k) for k in sorted(unknown))\n raise TypeError(\"__init__() got {0} {1}\".format(unknownDesc, unknownList))\n \n for k in cls.attrs:\n setattr(self, k, kwargs.get(k, cls.defaults.get(k)))\n\n \n setattr(cls, \"__init__\", __init__)\n return cls", "docstring": "Class decorator: automatically generate an ``__init__`` method that expects args from cls.attrs and stores them.\n\nArgs:\ncls (class): class to decorate\n\nReturns:\nclass: same, but modified, class", "source": "juraj-google-style"} {"code": "def _get_raw_data_feature_spec_per_column(self, typ: type, col_name: str) -> tf.io.VarLenFeature:\n typ = native_type_compatibility.convert_builtin_to_typing(typ)\n primitive_containers_type = (list, collections.abc.Sequence)\n is_primitive_container = typing.get_origin(typ) in primitive_containers_type\n if is_primitive_container:\n dtype = typing.get_args(typ)[0]\n if len(typing.get_args(typ)) > 1 or typing.get_origin(dtype) == Union:\n raise RuntimeError(f'Union type is not supported for column: {col_name}. Please pass a PCollection with valid schema for column {col_name} by passing a single type in container. For example, list[int].')\n elif issubclass(typ, np.generic) or typ in _default_type_to_tensor_type_map:\n dtype = typ\n else:\n raise TypeError(f'Unable to identify type: {typ} specified on column: {col_name}. Please provide a valid type from the following: {_default_type_to_tensor_type_map.keys()}')\n return tf.io.VarLenFeature(_default_type_to_tensor_type_map[dtype])", "docstring": "Return a FeatureSpec object to be used with\ntft_beam.AnalyzeAndTransformDataset\nArgs:\ntyp: A type of the column.\ncol_name: A name of the column.\nReturns:\nA FeatureSpec object.", "source": "github-repos"} {"code": "def boundary(self):\n return (int(self.WESTERNMOST_LONGITUDE), int(self.EASTERNMOST_LONGITUDE), int(self.MINIMUM_LATITUDE), int(self.MAXIMUM_LATITUDE))", "docstring": "Get the image boundary\n\nReturns:\nA tupple composed by the westernmost_longitude,\nthe westernmost_longitude, the minimum_latitude and\nthe maximum_latitude.", "source": "codesearchnet"} {"code": "def _assertOpOutputMatchesExpected(self, params, solution, high_level=True, rtol=0.001, atol=1e-05):\n input = params['input']\n diagonal = params['diagonal']\n with self.session() as session:\n for dtype in self.numeric_types - {np.int8, np.uint8}:\n expected = solution.astype(dtype)\n with self.test_scope():\n params['input'] = array_ops.placeholder(dtype, input.shape, name='input')\n params['diagonal'] = array_ops.placeholder(dtype, diagonal.shape, name='diagonal')\n if high_level:\n output = array_ops.matrix_set_diag(**params)\n else:\n output = gen_array_ops.matrix_set_diag(**params)\n result = session.run(output, {params['input']: input.astype(dtype), params['diagonal']: diagonal.astype(dtype)})\n self.assertEqual(output.dtype, expected.dtype)\n self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)", "docstring": "Verifies that matrix_set_diag produces `solution` when fed `params`.\n\nArgs:\nparams: dictionary containing input parameters to matrix_set_diag.\nsolution: numpy array representing the expected output of matrix_set_diag.\nhigh_level: call high_level matrix_set_diag\nrtol: relative tolerance for equality test.\natol: absolute tolerance for equality test.", "source": "github-repos"} {"code": "def _RemoveIllegalXMLCharacters(self, xml_string):\n \n if not isinstance(xml_string, py2to3.STRING_TYPES):\n return xml_string\n\n return self._ILLEGAL_XML_RE.sub('\\ufffd', xml_string)", "docstring": "Removes illegal characters for XML.\n\nIf the input is not a string it will be returned unchanged.\n\nArgs:\nxml_string (str): XML with possible illegal characters.\n\nReturns:\nstr: XML where all illegal characters have been removed.", "source": "juraj-google-style"} {"code": "def cafferesnet101(num_classes=1000, pretrained='imagenet'):\n \n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)\n if pretrained is not None:\n settings = pretrained_settings['cafferesnet101'][pretrained]\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model", "docstring": "Constructs a ResNet-101 model.\nArgs:\npretrained (bool): If True, returns a model pre-trained on ImageNet", "source": "juraj-google-style"} {"code": "def _insert_vars(self, path: str, data: dict) -> str:\n data = data.copy()\n while True:\n match = re.search(self.VAR_REPLACE_REGEX, path)\n if (not match):\n return path\n replace_from = match.group(0)\n replace_with = str(data.get(match.group(1)))\n path = path.replace(replace_from, replace_with)", "docstring": "Inserts variables into the ESI URL path.\n\nArgs:\npath: raw ESI URL path\ndata: data to insert into the URL\n\nReturns:\npath with variables filled", "source": "codesearchnet"} {"code": "def has_no_narrow_start(neuron, frac=0.9):\n bad_ids = [(neurite.root_node.id, [neurite.root_node.points[1]]) for neurite in neuron.neurites if (neurite.root_node.points[1][COLS.R] < (frac * neurite.root_node.points[2][COLS.R]))]\n return CheckResult((len(bad_ids) == 0), bad_ids)", "docstring": "Check if neurites have a narrow start\n\nArguments:\nneuron(Neuron): The neuron object to test\nfrac(float): Ratio that the second point must be smaller than the first\n\nReturns:\nCheckResult with a list of all first segments of neurites with a narrow start", "source": "codesearchnet"} {"code": "def crcMeterRead(self, raw_read, def_buf):\n try:\n if (len(raw_read) == 0):\n ekm_log((('(' + self.m_context) + ') Empty return read.'))\n return False\n sent_crc = self.calc_crc16(raw_read[1:(- 2)])\n logstr = ((('(' + self.m_context) + ')CRC sent = ') + str(def_buf['crc16'][MeterData.StringValue]))\n logstr += (' CRC calc = ' + sent_crc)\n ekm_log(logstr)\n if (int(def_buf['crc16'][MeterData.StringValue], 16) == int(sent_crc, 16)):\n return True\n except struct.error:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n (fname, lineno, fn, text) = frame\n ekm_log(('Error in %s on line %d' % (fname, lineno)))\n return False\n except TypeError:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n (fname, lineno, fn, text) = frame\n ekm_log(('Error in %s on line %d' % (fname, lineno)))\n return False\n except ValueError:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n (fname, lineno, fn, text) = frame\n ekm_log(('Error in %s on line %d' % (fname, lineno)))\n return False\n return False", "docstring": "Internal read CRC wrapper.\n\nArgs:\nraw_read (str): Bytes with implicit string cast from serial read\ndef_buf (SerialBlock): Populated read buffer.\n\nReturns:\nbool: True if passed CRC equals calculated CRC.", "source": "codesearchnet"} {"code": "def start_router(router_class, router_name):\n \n handle = router_class.remote(router_name)\n ray.experimental.register_actor(router_name, handle)\n handle.start.remote()\n return handle", "docstring": "Wrapper for starting a router and register it.\n\nArgs:\nrouter_class: The router class to instantiate.\nrouter_name: The name to give to the router.\n\nReturns:\nA handle to newly started router actor.", "source": "juraj-google-style"} {"code": "def load_object(obj) -> object:\n \n if isinstance(obj, str):\n if ':' in obj:\n module_name, obj_name = obj.split(':')\n if not module_name:\n module_name = '.'\n else:\n module_name = obj\n obj = importlib.import_module(module_name)\n if obj_name:\n attrs = obj_name.split('.')\n for attr in attrs:\n obj = getattr(obj, attr)\n return obj", "docstring": "Load an object.\n\nArgs:\nobj (str|object): Load the indicated object if this is a string;\notherwise, return the object as is.\n\nTo load a module, pass a dotted path like 'package.module';\nto load an an object from a module pass a path like\n'package.module:name'.\n\nReturns:\nobject", "source": "juraj-google-style"} {"code": "def GetAllPluginInformation(cls, show_all=True):\n results = []\n for plugin_class in iter(cls._plugin_classes.values()):\n plugin_object = plugin_class()\n if ((not show_all) and (not plugin_class.ENABLE_IN_EXTRACTION)):\n continue\n (doc_string, _, _) = plugin_class.__doc__.partition('\\n')\n type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)\n information_tuple = (plugin_object.plugin_name, doc_string, type_string)\n results.append(information_tuple)\n return sorted(results)", "docstring": "Retrieves a list of the registered analysis plugins.\n\nArgs:\nshow_all (Optional[bool]): True if all analysis plugin names should\nbe listed.\n\nReturns:\nlist[tuple[str, str, str]]: the name, docstring and type string of each\nanalysis plugin in alphabetical order.", "source": "codesearchnet"} {"code": "def __init__(self, indices, values, dense_shape):\n with ops.name_scope(None, 'SparseTensor', [indices, values, dense_shape]):\n indices = ops.convert_to_tensor(indices, name='indices', dtype=dtypes.int64)\n values = ops.convert_to_tensor(values, name='values')\n dense_shape = ops.convert_to_tensor(dense_shape, name='dense_shape', dtype=dtypes.int64)\n dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)\n self._indices = indices\n self._values = values\n self._dense_shape = dense_shape\n self._dense_shape_default = dense_shape_default\n indices_shape = indices.shape.with_rank(2)\n values_shape = values.shape.with_rank(1)\n dense_shape_shape = dense_shape.shape.with_rank(1)\n indices_shape.dims[0].assert_is_compatible_with(values_shape.dims[0])\n indices_shape.dims[1].assert_is_compatible_with(dense_shape_shape.dims[0])", "docstring": "Creates a `SparseTensor`.\n\nArgs:\nindices: A 2-D int64 tensor of shape `[N, ndims]`.\nvalues: A 1-D tensor of any type and shape `[N]`.\ndense_shape: A 1-D int64 tensor of shape `[ndims]`.\n\nRaises:\nValueError: When building an eager SparseTensor if `dense_shape` is\nunknown or contains unknown elements (None or -1).", "source": "github-repos"} {"code": "def _ParseLogline(self, parser_mediator, structure):\n (month, day_of_month, year, hours, minutes, seconds, milliseconds) = structure.date_time\n time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds, milliseconds)\n try:\n date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(time_elements_tuple=time_elements_tuple)\n except ValueError:\n parser_mediator.ProduceExtractionWarning('invalid date time value: {0!s}'.format(structure.date_time))\n return\n event_data = SkyDriveOldLogEventData()\n event_data.log_level = structure.log_level\n event_data.offset = self.offset\n event_data.source_code = structure.source_code\n event_data.text = structure.text\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n self._last_date_time = date_time\n self._last_event_data = event_data", "docstring": "Parse a logline and store appropriate attributes.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nstructure (pyparsing.ParseResults): structure of tokens derived from\na line of a text file.", "source": "codesearchnet"} {"code": "def render(self, value: Any, *, name: Optional[str]=None, root_path: Optional[utils.KeyPath]=None, **kwargs) -> Content:", "docstring": "Renders the input value.\n\nArgs:\nvalue: The value to render.\nname: (Optional) The referred name of the value from its container.\nroot_path: (Optional) The path of `value` under its object tree.\n**kwargs: Additional keyword arguments passed from `pg.view` or wrapper\nfunctions (e.g. `pg.to_html`).\n\nReturns:\nThe rendered content.", "source": "github-repos"} {"code": "def _full_axis_reduce(self, axis, func, alternate_index=None):\n result = self.data.map_across_full_axis(axis, func)\n if (axis == 0):\n columns = (alternate_index if (alternate_index is not None) else self.columns)\n return self.__constructor__(result, index=['__reduced__'], columns=columns)\n else:\n index = (alternate_index if (alternate_index is not None) else self.index)\n return self.__constructor__(result, index=index, columns=['__reduced__'])", "docstring": "Applies map that reduce Manager to series but require knowledge of full axis.\n\nArgs:\nfunc: Function to reduce the Manager by. This function takes in a Manager.\naxis: axis to apply the function to.\nalternate_index: If the resulting series should have an index\ndifferent from the current query_compiler's index or columns.\n\nReturn:\nPandas series containing the reduced data.", "source": "codesearchnet"} {"code": "def add_link(self, name: str, url: str) -> None:", "docstring": "Adds a related link to current trial.\n\nAdded links can be retrieved from the `Trial.related_links` property via\n`pg.poll_result`.\n\nArgs:\nname: Name for the related link.\nurl: URL for this link.", "source": "github-repos"} {"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n \n super(FixedSizeDataTypeDefinition, self).__init__(\n name, aliases=aliases, description=description, urls=urls)\n self.size = definitions.SIZE_NATIVE\n self.units = 'bytes'", "docstring": "Initializes a fixed-size data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"} {"code": "def predict_dataset(self, df):\n \n if len(list(df.columns)) == 2:\n df.columns = [\"A\", \"B\"]\n if self.model is None:\n raise AssertionError(\"Model has not been trained before predictions\")\n df2 = DataFrame()\n\n for idx, row in df.iterrows():\n df2 = df2.append(row, ignore_index=True)\n df2 = df2.append({'A': row[\"B\"], 'B': row[\"A\"]}, ignore_index=True)\n return predict.predict(deepcopy(df2), deepcopy(self.model))[::2]", "docstring": "Runs Jarfo independently on all pairs.\n\nArgs:\nx (pandas.DataFrame): a CEPC format Dataframe.\nkwargs (dict): additional arguments for the algorithms\n\nReturns:\npandas.DataFrame: a Dataframe with the predictions.", "source": "juraj-google-style"} {"code": "def assignHolidayDate(self, holiday, month, day):\n holiday += 1\n if ((month > 12) or (month < 0) or (day > 31) or (day < 0) or (holiday < 1) or (holiday > Extents.Holidays)):\n ekm_log(((((('Out of bounds: month ' + str(month)) + ' day ') + str(day)) + ' holiday ') + str(holiday)))\n return False\n day_str = (('Holiday_' + str(holiday)) + '_Day')\n mon_str = (('Holiday_' + str(holiday)) + '_Month')\n if (day_str not in self.m_holiday_date_params):\n ekm_log(('Incorrect index: ' + day_str))\n return False\n if (mon_str not in self.m_holiday_date_params):\n ekm_log(('Incorrect index: ' + mon_str))\n return False\n self.m_holiday_date_params[day_str] = day\n self.m_holiday_date_params[mon_str] = month\n return True", "docstring": "Set a singe holiday day and month in object buffer.\n\nThere is no class style enum for holidays.\n\nArgs:\nholiday (int): 0-19 or range(Extents.Holidays).\nmonth (int): Month 1-12.\nday (int): Day 1-31\n\nReturns:\nbool: True on completion.", "source": "codesearchnet"} {"code": "def _send_offset_requests(self, timestamps):\n timestamps_by_node = collections.defaultdict(dict)\n for (partition, timestamp) in six.iteritems(timestamps):\n node_id = self._client.cluster.leader_for_partition(partition)\n if (node_id is None):\n self._client.add_topic(partition.topic)\n log.debug('Partition %s is unknown for fetching offset, wait for metadata refresh', partition)\n return Future().failure(Errors.StaleMetadata(partition))\n elif (node_id == (- 1)):\n log.debug('Leader for partition %s unavailable for fetching offset, wait for metadata refresh', partition)\n return Future().failure(Errors.LeaderNotAvailableError(partition))\n else:\n timestamps_by_node[node_id][partition] = timestamp\n list_offsets_future = Future()\n responses = []\n node_count = len(timestamps_by_node)\n\n def on_success(value):\n responses.append(value)\n if (len(responses) == node_count):\n offsets = {}\n for r in responses:\n offsets.update(r)\n list_offsets_future.success(offsets)\n\n def on_fail(err):\n if (not list_offsets_future.is_done):\n list_offsets_future.failure(err)\n for (node_id, timestamps) in six.iteritems(timestamps_by_node):\n _f = self._send_offset_request(node_id, timestamps)\n _f.add_callback(on_success)\n _f.add_errback(on_fail)\n return list_offsets_future", "docstring": "Fetch offsets for each partition in timestamps dict. This may send\nrequest to multiple nodes, based on who is Leader for partition.\n\nArguments:\ntimestamps (dict): {TopicPartition: int} mapping of fetching\ntimestamps.\n\nReturns:\nFuture: resolves to a mapping of retrieved offsets", "source": "codesearchnet"} {"code": "def interm_range_type(self) -> Sequence[str]:\n fluents = self.domain.intermediate_fluents\n ordering = self.domain.interm_fluent_ordering\n return self._fluent_range_type(fluents, ordering)", "docstring": "The range type of each intermediate fluent in canonical order.\n\nReturns:\nSequence[str]: A tuple of range types representing\nthe range of each fluent.", "source": "codesearchnet"} {"code": "def make_fixture(model_class, **kwargs):\n all_fields = get_fields(model_class)\n fields_for_random_generation = map((lambda x: getattr(model_class, x)), all_fields)\n overrides = {}\n for (kwarg, value) in kwargs.items():\n if (kwarg in all_fields):\n kwarg_field = getattr(model_class, kwarg)\n fields_for_random_generation.remove(kwarg_field)\n overrides.update({kwarg_field: value})\n random_values = get_random_values(fields_for_random_generation)\n values = dict(overrides, **random_values)\n assert (len(all_fields) == len(values)), 'Mismatch in values, {} != {}'.format(len(all_fields), len(values))\n data = {k.name: v for (k, v) in values.items()}\n return model_class(**data)", "docstring": "Take the model_klass and generate a fixure for it\n\nArgs:\nmodel_class (MongoEngine Document): model for which a fixture\nis needed\nkwargs (dict): any overrides instead of random values\n\nReturns:\ndict for now, other fixture types are not implemented yet", "source": "codesearchnet"} {"code": "def _create_simulator_trial_result(self, params: study.ParamResolver, measurements: Dict[(str, np.ndarray)], final_simulator_state: Any) -> 'SimulationTrialResult':\n return SimulationTrialResult(params=params, measurements=measurements, final_simulator_state=final_simulator_state)", "docstring": "This method can be overridden to creation of a trial result.\n\nArgs:\nparams: The ParamResolver for this trial.\nmeasurements: The measurement results for this trial.\nfinal_simulator_state: The final state of the simulator for the\nStepResult.\n\nReturns:\nThe SimulationTrialResult.", "source": "codesearchnet"} {"code": "def parse_data_types_and_routes_from_doc_ref(\n api,\n doc,\n namespace_context,\n ignore_missing_entries=False\n):\n \n assert doc is not None\n data_types = set()\n routes = defaultdict(set)\n\n for match in doc_ref_re.finditer(doc):\n try:\n tag = match.group('tag')\n val = match.group('val')\n supplied_namespace = api.namespaces[namespace_context]\n if tag == 'field':\n if '.' in val:\n type_name, __ = val.split('.', 1)\n doc_type = supplied_namespace.data_type_by_name[type_name]\n data_types.add(doc_type)\n else:\n pass \n elif tag == 'route':\n if '.' in val:\n namespace_name, val = val.split('.', 1)\n namespace = api.namespaces[namespace_name]\n else:\n namespace = supplied_namespace\n\n try:\n route_name, version = parse_route_name_and_version(val)\n except ValueError as ex:\n raise KeyError(str(ex))\n\n route = namespace.routes_by_name[route_name].at_version[version]\n routes[namespace.name].add(route)\n elif tag == 'type':\n if '.' in val:\n namespace_name, val = val.split('.', 1)\n doc_type = api.namespaces[namespace_name].data_type_by_name[val]\n data_types.add(doc_type)\n else:\n doc_type = supplied_namespace.data_type_by_name[val]\n data_types.add(doc_type)\n except KeyError:\n if not ignore_missing_entries:\n raise\n return data_types, routes", "docstring": "Given a documentation string, parse it and return all references to other\ndata types and routes.\n\nArgs:\n- api: The API containing this doc ref.\n- doc: The documentation string to parse.\n- namespace_context: The namespace name relative to this documentation.\n- ignore_missing_entries: If set, this will skip references to nonexistent data types instead\nof raising an exception.\n\nReturns:\n- a tuple of referenced data types and routes", "source": "juraj-google-style"} {"code": "def _process_allow_headers(self, req, resp, requested_headers):\n if (not requested_headers):\n return True\n elif self._cors_config['allow_all_headers']:\n self._set_allowed_headers(resp, requested_headers)\n return True\n approved_headers = []\n for header in requested_headers:\n if (header.lower() in self._cors_config['allow_headers_list']):\n approved_headers.append(header)\n elif self._cors_config.get('allow_headers_regex'):\n if self._cors_config['allow_headers_regex'].match(header):\n approved_headers.append(header)\n if (len(approved_headers) == len(requested_headers)):\n self._set_allowed_headers(resp, approved_headers)\n return True\n return False", "docstring": "Adds the Access-Control-Allow-Headers header to the response,\nusing the cors settings to determine which headers are allowed.\n\nReturns:\nTrue if all the headers the client requested are allowed.\nFalse if some or none of the headers the client requested are allowed.", "source": "codesearchnet"} {"code": "def ParseInput(self, a_file):\n input_lines = a_file.read().splitlines()\n self.ParseLines(input_lines)", "docstring": "Consumes input extracting definitions.\n\nArgs:\na_file: The file like stream to parse.\n\nRaises:\nPDDMError if there are any issues.", "source": "codesearchnet"} {"code": "def format_statevector(vec, decimals=None):\n \n num_basis = len(vec)\n vec_complex = np.zeros(num_basis, dtype=complex)\n for i in range(num_basis):\n vec_complex[i] = vec[i][0] + 1j * vec[i][1]\n if decimals:\n vec_complex = np.around(vec_complex, decimals=decimals)\n return vec_complex", "docstring": "Format statevector coming from the backend to present to the Qiskit user.\n\nArgs:\nvec (list): a list of [re, im] complex numbers.\ndecimals (int): the number of decimals in the statevector.\nIf None, no rounding is done.\n\nReturns:\nlist[complex]: a list of python complex numbers.", "source": "juraj-google-style"} {"code": "def update_detector(self, detector_id, detector):\n \n resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id),\n data=detector)\n resp.raise_for_status()\n return resp.json()", "docstring": "Update an existing detector.\n\nArgs:\ndetector_id (string): the ID of the detector.\ndetector (object): the detector model object. Will be serialized as\nJSON.\nReturns:\ndictionary of the response (updated detector model).", "source": "juraj-google-style"} {"code": "def create_binary_array(self, key, value):\n \n data = None\n if key is not None and value is not None:\n value_encoded = []\n for v in value:\n try:\n \n \n \n value_encoded.append(base64.b64encode(bytes(v)).decode('utf-8'))\n except TypeError:\n \n \n \n value_encoded.append(base64.b64encode(bytes(v, 'utf-8')).decode('utf-8'))\n data = self.db.create(key.strip(), json.dumps(value_encoded))\n else:\n self.tcex.log.warning(u'The key or value field was None.')\n return data", "docstring": "Create method of CRUD operation for binary array data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"} {"code": "def consumer_partitions_for_topic(consumer, topic):\n \n topic_partitions = []\n partitions = consumer.partitions_for_topic(topic)\n if partitions is not None:\n for partition in partitions:\n topic_partitions.append(TopicPartition(topic, partition))\n else:\n logging.error(\n \"No partitions found for topic {}. Maybe it doesn't exist?\".format(topic),\n )\n return topic_partitions", "docstring": "Returns a list of all TopicPartitions for a given topic.\n\nArguments:\nconsumer: an initialized KafkaConsumer\ntopic: a topic name to fetch TopicPartitions for\n\n:returns:\nlist(TopicPartition): A list of TopicPartitions that belong to the given topic", "source": "juraj-google-style"} {"code": "def PmfProbLess(pmf1, pmf2):\n \n total = 0.0\n for v1, p1 in pmf1.Items():\n for v2, p2 in pmf2.Items():\n if v1 < v2:\n total += p1 * p2\n return total", "docstring": "Probability that a value from pmf1 is less than a value from pmf2.\n\nArgs:\npmf1: Pmf object\npmf2: Pmf object\n\nReturns:\nfloat probability", "source": "juraj-google-style"} {"code": "def download(url, file=None):\n \n import urllib.request\n import shutil\n if isinstance(file, str):\n file = open(file, 'wb')\n try:\n with urllib.request.urlopen(url) as response:\n if file:\n shutil.copyfileobj(response, file)\n else:\n return response.read()\n finally:\n if file:\n file.close()", "docstring": "Pass file as a filename, open file object, or None to return the request bytes\n\nArgs:\nurl (str): URL of file to download\nfile (Union[str, io, None]): One of the following:\n- Filename of output file\n- File opened in binary write mode\n- None: Return raw bytes instead\n\nReturns:\nUnion[bytes, None]: Bytes of file if file is None", "source": "juraj-google-style"} {"code": "def get_image_embeddings(self, pixel_values, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):\n vision_output = self.vision_encoder(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n image_embeddings = vision_output[0]\n return image_embeddings", "docstring": "Returns the image embeddings by passing the pixel values through the vision encoder.\n\nArgs:\npixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\nInput pixel values\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple.", "source": "github-repos"} {"code": "def _get_stringlist_from_commastring(self, field):\n strings = self.data.get(field)\n if strings:\n return strings.split(',')\n else:\n return list()", "docstring": "Return list of strings from comma separated list\n\nArgs:\nfield (str): Field containing comma separated list\n\nReturns:\nList[str]: List of strings", "source": "codesearchnet"} {"code": "def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:\n \n logger.debug(\"Yielding from product iterator\")\n if isinstance(variables, list):\n raise ValueError(\n f\"Product only takes mappings of values, got {variables} of type {type(variables)}\"\n )\n\n yield list(variable_matrix(variables, parent, \"product\"))", "docstring": "Apply the product operator to a set of variables.\n\nThis uses the python itertools.product iterator to combine multiple variables\nsuch that all possible combinations are generated. This is the default iterator\nhowever this is a method of manually specifying the option.\n\nArgs:\nvariables: The variables object\nparent: Unused", "source": "juraj-google-style"} {"code": "def reverse_taskname(name: str) -> str:\n \n components = name.split('.')\n assert len(components) <= 3\n return '.'.join(components[::-1])", "docstring": "Reverses components in the name of task. Reversed convention is used for filenames since\nit groups log/scratch files of related tasks together\n\n0.somejob.somerun -> somerun.somejob.0\n0.somejob -> somejob.0\nsomename -> somename\n\nArgs:\nname: name of task", "source": "juraj-google-style"} {"code": "def delete_by_hash(self, file_hash):\n \n full_path = self.file_path_from_hash(file_hash)\n\n return self.delete_by_path(full_path)", "docstring": "Remove file/archive by it's `file_hash`.\n\nArgs:\nfile_hash (str): Hash, which is used to find the file in storage.\n\nRaises:\nIOError: If the file for given `file_hash` was not found in \\\nstorage.", "source": "juraj-google-style"} {"code": "def _transform_local_field_to_expression(expression, node, context):\n \n column_name = expression.field_name\n column = sql_context_helpers.get_column(column_name, node, context)\n return column", "docstring": "Transform a LocalField compiler expression into its SQLAlchemy expression representation.\n\nArgs:\nexpression: expression, LocalField compiler expression.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression.", "source": "juraj-google-style"} {"code": "def __is_bound_method(method):\n if (not (hasattr(method, '__func__') and hasattr(method, '__self__'))):\n return False\n return (six.get_method_self(method) is not None)", "docstring": "Return ``True`` if the `method` is a bound method (attached to an class\ninstance.\n\nArgs:\nmethod: A method or function type object.", "source": "codesearchnet"} {"code": "def find_eq_stress(strains, stresses, tol=1e-10):\n \n stress_array = np.array(stresses)\n strain_array = np.array(strains)\n eq_stress = stress_array[np.all(abs(strain_array) 1 and not all_same:\n raise ValueError(\"Multiple stresses found for equilibrium strain\"\n \" state, please specify equilibrium stress or \"\n \" remove extraneous stresses.\")\n eq_stress = eq_stress[0]\n else:\n warnings.warn(\"No eq state found, returning zero voigt stress\")\n eq_stress = Stress(np.zeros((3, 3)))\n return eq_stress", "docstring": "Finds stress corresponding to zero strain state in stress-strain list\n\nArgs:\nstrains (Nx3x3 array-like): array corresponding to strains\nstresses (Nx3x3 array-like): array corresponding to stresses\ntol (float): tolerance to find zero strain state", "source": "juraj-google-style"} {"code": "def timestr2time(time_str):\n if any(((c not in '0123456789:') for c in time_str)):\n raise ValueError('Illegal character in time string')\n if (time_str.count(':') == 2):\n (h, m, s) = time_str.split(':')\n elif (time_str.count(':') == 1):\n (h, m) = time_str.split(':')\n s = '00'\n elif (len(time_str) == 6):\n h = time_str[:2]\n m = time_str[2:4]\n s = time_str[4:]\n else:\n raise ValueError('Time format not recognised. {}'.format(VALID_TIME_FORMATS_TEXT))\n if ((len(m) == 2) and (len(s) == 2)):\n mins = int(m)\n sec = int(s)\n else:\n raise ValueError('m and s must be 2 digits')\n try:\n return datetime.time(int(h), mins, sec)\n except ValueError:\n raise ValueError('Invalid time {}. {}'.format(time_str, VALID_TIME_FORMATS_TEXT))", "docstring": "Turns a string into a datetime.time object. This will only work if the\nformat can be \"guessed\", so the string must have one of the formats from\nVALID_TIME_FORMATS_TEXT.\n\nArgs:\ntime_str (str) a string that represents a date\nReturns:\ndatetime.time object\nRaises:\nValueError if the input string does not have a valid format.", "source": "codesearchnet"} {"code": "def can_user_access_build(param_name):\n \n build_id = (\n request.args.get(param_name, type=int) or\n request.form.get(param_name, type=int) or\n request.json[param_name])\n if not build_id:\n logging.debug('Build ID in param_name=%r was missing', param_name)\n abort(400)\n\n ops = operations.UserOps(current_user.get_id())\n build, user_is_owner = ops.owns_build(build_id)\n if not build:\n logging.debug('Could not find build_id=%r', build_id)\n abort(404)\n\n if current_user.is_authenticated() and not user_is_owner:\n \n \n \n \n ops.evict()\n claim_invitations(current_user)\n build, user_is_owner = ops.owns_build(build_id)\n\n if not user_is_owner:\n if current_user.is_authenticated() and current_user.superuser:\n pass\n elif request.method != 'GET':\n logging.debug('No way to log in user via modifying request')\n abort(403)\n elif build.public:\n pass\n elif current_user.is_authenticated():\n logging.debug('User does not have access to this build')\n abort(flask.Response('You cannot access this build', 403))\n else:\n logging.debug('Redirecting user to login to get build access')\n abort(login.unauthorized())\n elif not login_fresh():\n logging.debug('User login is old; forcing refresh')\n abort(login.needs_refresh())\n\n return build", "docstring": "Determines if the current user can access the build ID in the request.\n\nArgs:\nparam_name: Parameter name to use for getting the build ID from the\nrequest. Will fetch from GET or POST requests.\n\nReturns:\nThe build the user has access to.", "source": "juraj-google-style"} {"code": "def FileEntryExistsByPathSpec(self, path_spec):\n \n location = getattr(path_spec, 'location', None)\n\n if (location is None or\n not location.startswith(self.LOCATION_ROOT)):\n return False\n\n if len(location) == 1:\n return True\n\n try:\n self._zip_file.getinfo(location[1:])\n return True\n except KeyError:\n pass\n\n \n for name in iter(self._zip_file.namelist()):\n \n \n if name.startswith(location[1:]):\n return True\n\n return False", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): path specification of the file entry.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"} {"code": "def get_devices(ads, **kwargs):\n\n def _get_device_filter(ad):\n for (k, v) in kwargs.items():\n if (not hasattr(ad, k)):\n return False\n elif (getattr(ad, k) != v):\n return False\n return True\n filtered = filter_devices(ads, _get_device_filter)\n if (not filtered):\n raise Error(('Could not find a target device that matches condition: %s.' % kwargs))\n else:\n return filtered", "docstring": "Finds a list of AndroidDevice instance from a list that has specific\nattributes of certain values.\n\nExample:\nget_devices(android_devices, label='foo', phone_number='1234567890')\nget_devices(android_devices, model='angler')\n\nArgs:\nads: A list of AndroidDevice instances.\nkwargs: keyword arguments used to filter AndroidDevice instances.\n\nReturns:\nA list of target AndroidDevice instances.\n\nRaises:\nError: No devices are matched.", "source": "codesearchnet"} {"code": "def scale(self, replicas):\n if ('Global' in self.attrs['Spec']['Mode'].keys()):\n raise InvalidArgument('Cannot scale a global container')\n service_mode = ServiceMode('replicated', replicas)\n return self.client.api.update_service(self.id, self.version, mode=service_mode, fetch_current_spec=True)", "docstring": "Scale service container.\n\nArgs:\nreplicas (int): The number of containers that should be running.\n\nReturns:\nbool: ``True`` if successful.", "source": "codesearchnet"} {"code": "def delete(self, url, callback, json=None):\n return self.adapter.delete(url, callback, json=json)", "docstring": "Delete a URL.\n\nArgs:\n\nurl(string): URL for the request\n\ncallback(func): The response callback function\n\nKeyword Args:\n\njson(dict): JSON body for the request\n\nReturns:\n\nThe result of the callback handling the resopnse from the\nexecuted request", "source": "codesearchnet"} {"code": "def __init__(self,config_file):\n\t\t\n\t\tself.game = DoomGame()\n\n\t\t\n\t\tself.game.load_config(config_file)\n\t\tself.game.init()\n\n\t\tself.state_shape = self.featurize(self.game.get_state()).shape\n\t\tself.num_actions = len(self.game.get_available_buttons())", "docstring": "Initialize ViZDoom environment.\n\nArgs:\nconfig_file: .cfg file path, which defines how a world works and look like (maps)", "source": "juraj-google-style"} {"code": "def is_sample_set(self, md5):\n \n try:\n self.get_sample_set(md5)\n return True\n except WorkBench.DataNotFound:\n return False", "docstring": "Does the md5 represent a sample_set?\nArgs:\nmd5: the md5 of the sample_set\nReturns:\nTrue/False", "source": "juraj-google-style"} {"code": "def input_mask(self):\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)", "docstring": "Retrieves the input mask tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one inbound node,\ni.e. if it is connected to one incoming layer.\n\nReturns:\nInput mask tensor (potentially None) or list of input\nmask tensors.\n\nRaises:\nAttributeError: if the layer is connected to\nmore than one incoming layers.", "source": "github-repos"} {"code": "def load_data(self,\n data,\n datatype=\"ttl\",\n namespace=None,\n graph=None,\n is_file=False,\n **kwargs):\n \n if kwargs.get('debug'):\n log.setLevel(logging.DEBUG)\n time_start = datetime.datetime.now()\n datatype_map = {\n 'ttl': 'turtle',\n 'xml': 'xml',\n 'rdf': 'xml',\n 'nt': 'nt',\n 'n3': 'n3',\n 'nquads': 'nquads',\n 'hturtle': 'hturtle'\n }\n if is_file:\n datatype = data.split(os.path.extsep)[-1]\n file_name = data\n log.debug('starting data load of %s', file_name)\n data = open(data, 'rb').read()\n try:\n content_type = datatype_map[datatype]\n except KeyError:\n raise NotImplementedError(\"'%s' is not an implemented data fromat\",\n datatype)\n conn = self.conn\n if namespace:\n conn = self.tstore.get_namespace(namespace)\n else:\n namespace = self.namespace\n graph = pick(graph, self.graph)\n start = datetime.datetime.now()\n try:\n result = conn.parse(data=data, publicID=graph, format=content_type)\n except:\n if is_file:\n print(\"Datafile \", file_name)\n raise\n if is_file:\n log.info (\" loaded %s into rdflib namespace '%s'\",\n file_name,\n namespace)\n else:\n log.info(\" loaded data into rdflib namespace '%s' in time: %s\",\n namespace,\n (datetime.datetime.now() - start))\n return result", "docstring": "loads data via file stream from python to triplestore\n\nArgs:\ndata: The data or filepath to load\ndatatype(['ttl', 'xml', 'rdf']): the type of data to load\nnamespace: the namespace to use\ngraph: the graph to load the data to.\nis_file(False): If true python will read the data argument as a\nfilepath, determine the datatype from the file extension,\nread the file and send it to blazegraph as a datastream", "source": "juraj-google-style"} {"code": "def myGrades(year, candidateNumber, badFormat, length):\n \n\n weights1 = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]\n weights2 = [1, 1, 1, 1, 1, 1, 0.5, 0.5]\n if year == 1:\n myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)])\n * weights1[i] for i in range(length-1)]) / 6\n elif year == 2 or year == 3:\n myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)])\n * weights2[i] for i in range(length-1)]) / 7\n elif year == 4:\n myFinalResult = sum([int(badFormat[candidateNumber][2*(i + 1)])\n for i in range(length-1)]) / 8\n\n return myFinalResult", "docstring": "returns final result of candidateNumber in year\n\nArguments:\nyear {int} -- the year candidateNumber is in\ncandidateNumber {str} -- the candidateNumber of candidateNumber\nbadFormat {dict} -- candNumber : [results for candidate]\nlength {int} -- length of each row in badFormat divided by 2\n\n\nReturns:\nint -- a weighted average for a specific candidate number and year", "source": "juraj-google-style"} {"code": "def __init__(self, group_id=None):\n \n super().__init__(action_type=ActionType.OFPAT_GROUP, length=8)\n self.group_id = group_id", "docstring": "Create an ActionGroup with the optional parameters below.\n\nArgs:\ngroup_id (int): The group_id indicates the group used to process\nthis packet. The set of buckets to apply depends on the group\ntype.", "source": "juraj-google-style"} {"code": "def __init__(self, input_set, contcar_only=True, **kwargs):\n \n self.input_set = input_set\n self.contcar_only = contcar_only\n self.kwargs = kwargs", "docstring": "Generates a VASP input based on an existing directory. This is typically\nused to modify the VASP input files before the next VaspJob.\n\nArgs:\ninput_set (str): Full path to the input set. E.g.,\n\"pymatgen.io.vasp.sets.MPNonSCFSet\".\ncontcar_only (bool): If True (default), only CONTCAR structures\nare used as input to the input set.", "source": "juraj-google-style"} {"code": "def post(self, resource):\n response = self.api.execute('POST', self.endpoint, json=resource.as_dict())\n if (not response.ok):\n raise Error.parse(response.json())\n return self._cls.parse(response.json())", "docstring": "Creates a new instance of the resource.\n\nArgs:\nresource - gophish.models.Model - The resource instance", "source": "codesearchnet"} {"code": "def has(self, url, xpath=None):\n \n if not path.exists(self.db_path):\n return False\n\n return self._query(url, xpath).count() > 0", "docstring": "Check if a URL (and xpath) exists in the cache\n\nIf DB has not been initialized yet, returns ``False`` for any URL.\n\nArgs:\nurl (str): If given, clear specific item only. Otherwise remove the DB file.\nxpath (str): xpath to search (may be ``None``)\n\nReturns:\nbool: ``True`` if URL exists, ``False`` otherwise", "source": "juraj-google-style"} {"code": "def to_hsl(self):\n from .hsl import HSL\n (h, l, s) = colorsys.rgb_to_hls((float(self.r) / 255), (float(self.g) / 255), (float(self.b) / 255))\n return HSL(round((h * 360)), s, l, self.a)", "docstring": "Return a corresponding HSL color for this RGB color.\n\nReturns:\n:class:`~bokeh.colors.rgb.RGB`", "source": "codesearchnet"} {"code": "def _decode_image(fobj, session, filename):\n buf = fobj.read()\n image = tfds.core.lazy_imports.cv2.imdecode(np.fromstring(buf, dtype=np.uint8), flags=3)\n if (image is None):\n logging.warning('Image %s could not be decoded by OpenCV, falling back to TF', filename)\n try:\n image = tf.image.decode_image(buf, channels=3)\n image = session.run(image)\n except tf.errors.InvalidArgumentError:\n logging.fatal('Image %s could not be decoded by Tensorflow', filename)\n if (len(image.shape) == 4):\n image = image.reshape(image.shape[1:])\n return image", "docstring": "Reads and decodes an image from a file object as a Numpy array.\n\nThe SUN dataset contains images in several formats (despite the fact that\nall of them have .jpg extension). Some of them are:\n- BMP (RGB)\n- PNG (grayscale, RGBA, RGB interlaced)\n- JPEG (RGB)\n- GIF (1-frame RGB)\nSince TFDS assumes that all images have the same number of channels, we\nconvert all of them to RGB.\n\nArgs:\nfobj: File object to read from.\nsession: TF session used to decode the images.\nfilename: Filename of the original image in the archive.\n\nReturns:\nNumpy array with shape (height, width, channels).", "source": "codesearchnet"} {"code": "def __init__(self, msg):\n \n super(BaseDownloadError, self).__init__(\n msg, exit_code=STATUSES['resource-unavailable']\n )", "docstring": "Initialize Download404.\n\nArgs:\nmsg (string): the error message", "source": "juraj-google-style"} {"code": "def get_users_by_email(cls, emails):\n \n users = User.objects.filter(email__in=emails)\n present_emails = users.values_list('email', flat=True)\n missing_emails = list(set(emails) - set(present_emails))\n return users, missing_emails", "docstring": "Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.\n\nArgs:\nemails: An iterable of email addresses to split between existing and nonexisting\n\nReturns:\nusers: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses\nmissing_emails: List of unique emails which were in the original list, but do not yet exist as users", "source": "juraj-google-style"} {"code": "def __setstate__(self, state):\n \n field_instance, message_class, number = state\n if field_instance is None:\n self.__field = message_class.field_by_number(number)\n else:\n self.__field = field_instance", "docstring": "Enable unpickling.\n\nArgs:\nstate: A 3-tuple containing:\n- The field instance, or None if it belongs to a Message class.\n- The Message class that the field instance belongs to, or None.\n- The field instance number of the Message class it belongs to, or\nNone.", "source": "juraj-google-style"} {"code": "def get_tool_filepath(self, tool_alias):\n \n tools_dict = self.get_tools()\n if tool_alias in tools_dict:\n if self.tools_path is None:\n return None\n else:\n return os.path.join(self.tools_path, tool_alias)\n else:\n return None", "docstring": "Given a visible tool alias, return the full path to the executable.\n\nArgs:\ntool_alias (str): Tool alias to search for.\n\nReturns:\n(str): Filepath of executable, or None if the tool is not in the\nsuite. May also return None because this suite has not been saved\nto disk, so a filepath hasn't yet been established.", "source": "juraj-google-style"} {"code": "def get(self, column, default_value=None):\n if isinstance(column, (list, tuple)):\n ret = []\n for col in column:\n ret.append(self.get(col, default_value))\n return ret\n try:\n return self._values[column]\n except (IndexError, TypeError):\n pass\n try:\n return self[column]\n except IndexError:\n return default_value", "docstring": "Get an item from the Row by column name.\n\nArgs:\ncolumn: Tuple of column names, or a (str) column name, or positional\ncolumn number, 0-indexed.\ndefault_value: The value to use if the key is not found.\n\nReturns:\nA list or string with column value(s) or default_value if not found.", "source": "codesearchnet"} {"code": "def waveform_to_examples(data, sample_rate):\n import resampy\n if (len(data.shape) > 1):\n data = np.mean(data, axis=1)\n if (sample_rate != vggish_params.SAMPLE_RATE):\n data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)\n log_mel = mel_features.log_mel_spectrogram(data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ)\n features_sample_rate = (1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS)\n example_window_length = int(round((vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)))\n example_hop_length = int(round((vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)))\n log_mel_examples = mel_features.frame(log_mel, window_length=example_window_length, hop_length=example_hop_length)\n return log_mel_examples", "docstring": "Converts audio waveform into an array of examples for VGGish.\n\nArgs:\ndata: np.array of either one dimension (mono) or two dimensions\n(multi-channel, with the outer dimension representing channels).\nEach sample is generally expected to lie in the range [-1.0, +1.0],\nalthough this is not required.\nsample_rate: Sample rate of data.\n\nReturns:\n3-D np.array of shape [num_examples, num_frames, num_bands] which represents\na sequence of examples, each of which contains a patch of log mel\nspectrogram, covering num_frames frames of audio and num_bands mel frequency\nbands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.", "source": "codesearchnet"} {"code": "def ParseOptions(cls, options, configuration_object):\n \n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n parsers = cls._ParseStringOption(options, 'parsers', default_value='')\n parsers = parsers.replace('\\\\', '/')\n\n \n\n setattr(configuration_object, '_parser_filter_expression', parsers)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"} {"code": "def pearson_correlation_coefficient(predictions, labels, weights_fn=None):\n \n del weights_fn\n _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions,\n labels)\n return pearson, tf.constant(1.0)", "docstring": "Calculate pearson correlation coefficient.\n\nArgs:\npredictions: The raw predictions.\nlabels: The actual labels.\nweights_fn: Weighting function.\n\nReturns:\nThe pearson correlation coefficient.", "source": "juraj-google-style"} {"code": "def read_from_hdx(identifier, configuration=None):\n if (is_valid_uuid(identifier) is False):\n raise HDXError(('%s is not a valid resource id!' % identifier))\n resource = Resource(configuration=configuration)\n result = resource._load_from_hdx('resource', identifier)\n if result:\n return resource\n return None", "docstring": "Reads the resource given by identifier from HDX and returns Resource object\n\nArgs:\nidentifier (str): Identifier of resource\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nOptional[Resource]: Resource object if successful read, None if not", "source": "codesearchnet"} {"code": "def get_channel(self, channel_name, project_name, dataset_name):\n \n url = self.url() + \"/nd/resource/dataset/{}\".format(dataset_name)\\\n + \"/project/{}\".format(project_name) + \\\n \"/channel/{}/\".format(channel_name)\n\n req = self.remote_utils.get_url(url)\n\n if req.status_code is not 200:\n raise RemoteDataNotFoundError('Could not find {}'.format(req.text))\n else:\n return req.json()", "docstring": "Gets info about a channel given its name, name of its project\n, and name of its dataset.\n\nArguments:\nchannel_name (str): Channel name\nproject_name (str): Project name\ndataset_name (str): Dataset name\n\nReturns:\ndict: Channel info", "source": "juraj-google-style"} {"code": "def process_data(self):\n further_processing = False\n if ((self.state == self.WaitingForReportType) and (len(self.raw_data) > 0)):\n self.current_type = self.raw_data[0]\n try:\n self.current_header_size = self.calculate_header_size(self.current_type)\n self.state = self.WaitingForReportHeader\n further_processing = True\n except Exception as exc:\n self.state = self.ErrorState\n if self.error_callback:\n self.error_callback(self.ErrorFindingReportType, str(exc), self.context)\n else:\n raise\n if ((self.state == self.WaitingForReportHeader) and (len(self.raw_data) >= self.current_header_size)):\n try:\n self.current_report_size = self.calculate_report_size(self.current_type, self.raw_data[:self.current_header_size])\n self.state = self.WaitingForCompleteReport\n further_processing = True\n except Exception as exc:\n self.state = self.ErrorState\n if self.error_callback:\n self.error_callback(self.ErrorParsingReportHeader, str(exc), self.context)\n else:\n raise\n if ((self.state == self.WaitingForCompleteReport) and (len(self.raw_data) >= self.current_report_size)):\n try:\n report_data = self.raw_data[:self.current_report_size]\n self.raw_data = self.raw_data[self.current_report_size:]\n report = self.parse_report(self.current_type, report_data)\n self._handle_report(report)\n self.state = self.WaitingForReportType\n further_processing = True\n except Exception as exc:\n self.state = self.ErrorState\n if self.error_callback:\n self.error_callback(self.ErrorParsingCompleteReport, str(exc), self.context)\n else:\n raise\n return further_processing", "docstring": "Attempt to extract a report from the current data stream contents\n\nReturns:\nbool: True if further processing is required and process_data should be\ncalled again.", "source": "codesearchnet"} {"code": "def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_string=None):\n \n\n try:\n os.makedirs(os.path.dirname(filename), 511, True)\n except Exception as e:\n print(\"Caught exception with trying to make log dirs: {}\".format(e))\n\n if format_string is None:\n format_string = \"%(asctime)s %(name)s:%(lineno)d Rank:{0} [%(levelname)s] %(message)s\".format(\n rank)\n global logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(level)\n formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)", "docstring": "Add a stream log handler.\n\nArgs:\n- filename (string): Name of the file to write logs to\n- name (string): Logger name\n- level (logging.LEVEL): Set the logging level.\n- format_string (string): Set the format string\n\nReturns:\n- None", "source": "juraj-google-style"} {"code": "def on_epoch_end(self, epoch, logs=None):\n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_epoch_end(epoch, logs)", "docstring": "Calls the `on_epoch_end` methods of its callbacks.\n\nThis function should only be called during TRAIN mode.\n\nArgs:\nepoch: Integer, index of epoch.\nlogs: Dict, metric results for this training epoch, and for the\nvalidation epoch if validation is performed. Validation result keys\nare prefixed with `val_`.", "source": "github-repos"} {"code": "def get_config_file(program, system_wide=False):\n program_config_homes = get_config_dir(program, system_wide)\n config_homes = get_config_dir(system_wide=system_wide)\n config_files = []\n for home in config_homes:\n for sub in os.listdir(home):\n if os.path.isfile(os.path.join(home, sub)):\n if sub.startswith(program):\n config_files.append(os.path.join(home, sub))\n if (not program.startswith('.')):\n config_files.extend(get_config_file(('.' + program), system_wide))\n for home in program_config_homes:\n for sub in os.listdir(home):\n if (os.path.isfile(os.path.join(home, sub)) and sub.startswith(program)):\n config_files.append(os.path.join(home, sub))\n return config_files", "docstring": "Get the configuration file for a program.\n\nGets the configuration file for a given program, assuming it stores it in\na standard location. See also :func:`get_config_dir()`.\n\nArgs:\nprogram\t (str): The program for which to get the configuration file.\nsystem_wide (bool):Whether to get the system-wide file for the program.\n\nReturns:\nlist: A list of all matching configuration files found.", "source": "codesearchnet"} {"code": "def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n self.input_dtype = hidden_states.dtype\n hidden_states = hidden_states.to(self.dtype)\n if self.training and self.jitter_noise > 0:\n hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)\n self._cast_classifier()\n router_logits = self.classifier(hidden_states)\n router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)\n return (router_probabilities, router_logits)", "docstring": "Computes router probabilities from input hidden states.\n\nArgs:\nhidden_states (`torch.Tensor`):\n(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.\nReturns:\nrouter_probabilities (`torch.Tensor`):\nTensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each\ntoken and expert. Used for routing tokens to experts.\nrouter_logits (`torch.Tensor`):\nLogits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.\nThis is used later for computing router z-loss.", "source": "github-repos"} {"code": "def _search_for_child_node(self, parent_id, path_to_child):\n if not path_to_child:\n return parent_id\n for child in self._proto.nodes[parent_id].children:\n if child.local_name == path_to_child[0]:\n return self._search_for_child_node(child.node_id, path_to_child[1:])\n return None", "docstring": "Returns node id of child node.\n\nA helper method for traversing the object graph proto.\n\nAs an example, say that the object graph proto in the SavedModel contains an\nobject with the following child and grandchild attributes:\n\n`parent.child_a.child_b`\n\nThis method can be used to retrieve the node id of `child_b` using the\nparent's node id by calling:\n\n`_search_for_child_node(parent_id, ['child_a', 'child_b'])`.\n\nArgs:\nparent_id: node id of parent node\npath_to_child: list of children names.\n\nReturns:\nnode_id of child, or None if child isn't found.", "source": "github-repos"} {"code": "def load(path: str, *args, **kwargs) -> Any:\n load_handler = flags.get_load_handler() or default_load_handler\n value = load_handler(path, *args, **kwargs)\n if flags.is_tracking_origin() and isinstance(value, Symbolic):\n value.sym_setorigin(path, 'load')\n return value", "docstring": "Load a symbolic value using the global load handler.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Any())\n])\nclass A(pg.Object):\npass\n\na1 = A(1)\nfile = 'my_file.json'\na1.save(file)\na2 = pg.load(file)\nassert pg.eq(a1, a2)\n\nArgs:\npath: A path string for loading an object.\n*args: Positional arguments that will be passed through to the global\nload handler.\n**kwargs: Keyword arguments that will be passed through to the global\nload handler.\n\nReturns:\nReturn value from the global load handler.", "source": "github-repos"} {"code": "def setChannel(self, channel=11):\n \n print '%s call setChannel' % self.port\n print channel\n try:\n cmd = 'channel %s' % channel\n datasetCmd = 'dataset channel %s' % channel\n self.hasActiveDatasetToCommit = True\n return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done'\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger(\"setChannel() Error: \" + str(e))", "docstring": "set channel of Thread device operates on.\n\nArgs:\nchannel:\n(0 - 10: Reserved)\n(11 - 26: 2.4GHz channels)\n(27 - 65535: Reserved)\n\nReturns:\nTrue: successful to set the channel\nFalse: fail to set the channel", "source": "juraj-google-style"} {"code": "def remove(self, force=False):\n \n return self.client.api.remove_volume(self.id, force=force)", "docstring": "Remove this volume.\n\nArgs:\nforce (bool): Force removal of volumes that were already removed\nout of band by the volume driver plugin.\nRaises:\n:py:class:`docker.errors.APIError`\nIf volume failed to remove.", "source": "juraj-google-style"} {"code": "def register(self, numerics_alert):\n \n key = (numerics_alert.device_name, numerics_alert.tensor_name)\n if key in self._data:\n self._data[key].add(numerics_alert)\n else:\n if len(self._data) < self._capacity:\n history = NumericsAlertHistory()\n history.add(numerics_alert)\n self._data[key] = history", "docstring": "Register an alerting numeric event.\n\nArgs:\nnumerics_alert: An instance of `NumericsAlert`.", "source": "juraj-google-style"} {"code": "def read(self, file_des, num_bytes):\n file_handle = self.filesystem.get_open_file(file_des)\n file_handle.raw_io = True\n return file_handle.read(num_bytes)", "docstring": "Read number of bytes from a file descriptor, returns bytes read.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\nnum_bytes: Number of bytes to read from file.\n\nReturns:\nBytes read from file.\n\nRaises:\nOSError: bad file descriptor.\nTypeError: if file descriptor is not an integer.", "source": "codesearchnet"} {"code": "def _GetPathSegmentIndexForSimilarityWeights(\n self, similarity_weights, occurrence_weights, value_weights):\n \n largest_weight = similarity_weights.GetLargestWeight()\n\n if largest_weight > 0:\n similarity_weight_indexes = similarity_weights.GetIndexesForWeight(\n largest_weight)\n number_of_similarity_indexes = len(similarity_weight_indexes)\n else:\n number_of_similarity_indexes = 0\n\n path_segment_index = None\n if number_of_similarity_indexes == 0:\n path_segment_index = self._GetPathSegmentIndexForOccurrenceWeights(\n occurrence_weights, value_weights)\n\n elif number_of_similarity_indexes == 1:\n path_segment_index = similarity_weight_indexes[0]\n\n else:\n largest_weight = 0\n largest_value_weight = 0\n\n for similarity_index in similarity_weight_indexes:\n occurrence_weight = occurrence_weights.GetWeightForIndex(\n similarity_index)\n\n if largest_weight > 0 and largest_weight == occurrence_weight:\n value_weight = value_weights.GetWeightForIndex(similarity_index)\n\n if largest_value_weight < value_weight:\n largest_weight = 0\n\n if not path_segment_index or largest_weight < occurrence_weight:\n largest_weight = occurrence_weight\n path_segment_index = similarity_index\n\n largest_value_weight = value_weights.GetWeightForIndex(\n similarity_index)\n\n return path_segment_index", "docstring": "Retrieves the index of the path segment based on similarity weights.\n\nArgs:\nsimilarity_weights: the similarity weights object (instance of\n_PathSegmentWeights).\noccurrence_weights: the occurrence weights object (instance of\n_PathSegmentWeights).\nvalue_weights: the value weights object (instance of _PathSegmentWeights).\n\nReturns:\nAn integer containing the path segment index.", "source": "juraj-google-style"} {"code": "def db_wb010(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `db_wb010`'.format(value))\n\n self._db_wb010 = value", "docstring": "Corresponds to IDD Field `db_wb010`\nmean coincident dry-bulb temperature to\nWet-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `db_wb010`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n if hidden_states.dtype == torch.float16:\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"} {"code": "def _load_method(self, state, self_obj, name):\n state, result = self.load_attr(state, self_obj, name)\n state = self._push_null(state)\n return (state.push(result), result)", "docstring": "Loads and pushes a method on the stack.\n\nArgs:\nstate: the current VM state.\nself_obj: the `self` object of the method.\nname: the name of the method.\n\nReturns:\n(state, method) where `state` is the updated VM state and `method` is the\nmethod that was loaded. The method is already pushed onto the stack,\neither at the top or below the `self` object.", "source": "github-repos"} {"code": "def recommend_k_items_slow(self, test, top_k=10, remove_seen=True):\n \n\n \n if remove_seen:\n raise ValueError(\"Not implemented\")\n\n self.get_user_affinity(test)\\\n .write.mode(\"overwrite\")\\\n .saveAsTable(self.f(\"{prefix}user_affinity\"))\n\n \n \n query = self.f(\n ,\n top_k=top_k,\n )\n\n return self.spark.sql(query)", "docstring": "Recommend top K items for all users which are in the test set.\n\nArgs:\ntest: test Spark dataframe\ntop_k: top n items to return\nremove_seen: remove items test users have already seen in the past from the recommended set.", "source": "juraj-google-style"} {"code": "def get_path(self, path: str, data: dict) -> Tuple[(dict, dict)]:\n path = self._insert_vars(path, data)\n path = (self.BASE_URL + path)\n data = self.cache.check(path)\n if data:\n return data\n self._try_refresh_access_token()\n r = self.session.get(path)\n self.cache.set(r)\n return r.json()", "docstring": "Queries the ESI by an endpoint URL.\n\nThis method is not marked \"private\" as it _can_ be used\nby consuming code, but it's probably easier to call the\n`get_op` method instead.\n\nArgs:\npath: raw ESI URL path\ndata: data to insert into the URL\n\nReturns:\nESI data", "source": "codesearchnet"} {"code": "def get_task_param_string(task):\n param_dict = task.to_str_params()\n items = []\n for key in sorted(param_dict.keys()):\n items.append(\"'{:s}': '{:s}'\".format(key, param_dict[key]))\n return (('{' + ', '.join(items)) + '}')", "docstring": "Get all parameters of a task as one string\n\nReturns:\nstr: task parameter string", "source": "codesearchnet"} {"code": "def __init__(self, rate=None, burst_size=None):\n \n super().__init__(MeterBandType.OFPMBT_DROP, rate, burst_size)", "docstring": "Create a MeterBandDrop with the optional parameters below.\n\nArgs:\nrate (int): Rate for dropping packets.\nburst_size (int): Size of bursts.", "source": "juraj-google-style"} {"code": "def window_at(self, geom, window_shape):\n (y_size, x_size) = (window_shape[0], window_shape[1])\n bounds = box(*geom.bounds)\n px = ops.transform(self.__geo_transform__.rev, bounds).centroid\n (miny, maxy) = (int((px.y - (y_size / 2))), int((px.y + (y_size / 2))))\n (minx, maxx) = (int((px.x - (x_size / 2))), int((px.x + (x_size / 2))))\n (_, y_max, x_max) = self.shape\n if ((minx < 0) or (miny < 0) or (maxx > x_max) or (maxy > y_max)):\n raise ValueError('Input geometry resulted in a window outside of the image')\n return self[(:, miny:maxy, minx:maxx)]", "docstring": "Return a subsetted window of a given size, centered on a geometry object\n\nUseful for generating training sets from vector training data\nWill throw a ValueError if the window is not within the image bounds\n\nArgs:\ngeom (shapely,geometry): Geometry to center the image on\nwindow_shape (tuple): The desired shape of the image as (height, width) in pixels.\n\nReturns:\nimage: image object of same type", "source": "codesearchnet"} {"code": "def split(self, file):\n with open(file, 'rb') as f:\n for record in sagemaker.amazon.common.read_recordio(f):\n (yield record)", "docstring": "Split a file into records using a specific strategy\n\nThis RecordIOSplitter splits the data into individual RecordIO records.\n\nArgs:\nfile (str): path to the file to split\n\nReturns: generator for the individual records that were split from the file", "source": "codesearchnet"} {"code": "def assert_on_branch(branch_name):\n \n \n branch = git.current_branch(refresh=True)\n\n if branch.name != branch_name:\n if context.get('pretend', False):\n log.info(\"Would assert that you're on a <33>{}<32> branch\",\n branch_name)\n else:\n log.err(\"You're not on a <33>{}<31> branch!\", branch_name)\n sys.exit(1)", "docstring": "Print error and exit if *branch_name* is not the current branch.\n\nArgs:\nbranch_name (str):\nThe supposed name of the current branch.", "source": "juraj-google-style"} {"code": "def export_model(module_spec, class_count, saved_model_dir):\n \n \n sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count)\n with sess.graph.as_default() as graph:\n tf.saved_model.simple_save(\n sess,\n saved_model_dir,\n inputs={'image': in_image},\n outputs={'prediction': graph.get_tensor_by_name('final_result:0')},\n legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op')\n )", "docstring": "Exports model for serving.\n\nArgs:\nmodule_spec: The hub.ModuleSpec for the image module being used.\nclass_count: The number of classes.\nsaved_model_dir: Directory in which to save exported model and variables.", "source": "juraj-google-style"} {"code": "def get_kinks(self):\n c1_coord = self.pd.pd_coords(self.comp1)\n c2_coord = self.pd.pd_coords(self.comp2)\n n1 = self.comp1.num_atoms\n n2 = self.comp2.num_atoms\n critical_comp = self.pd.get_critical_compositions(self.comp1, self.comp2)\n (x_kink, energy_kink, react_kink, energy_per_rxt_formula) = ([], [], [], [])\n if all((c1_coord == c2_coord)):\n x_kink = [0, 1]\n energy_kink = [self._get_energy(x) for x in x_kink]\n react_kink = [self._get_reaction(x) for x in x_kink]\n num_atoms = [((x * self.comp1.num_atoms) + ((1 - x) * self.comp2.num_atoms)) for x in x_kink]\n energy_per_rxt_formula = [(((energy_kink[i] * self._get_elmt_amt_in_rxt(react_kink[i])) / num_atoms[i]) * InterfacialReactivity.EV_TO_KJ_PER_MOL) for i in range(2)]\n else:\n for i in reversed(critical_comp):\n c = self.pd.pd_coords(i)\n x = (np.linalg.norm((c - c2_coord)) / np.linalg.norm((c1_coord - c2_coord)))\n x = ((x * n2) / (n1 + (x * (n2 - n1))))\n n_atoms = ((x * self.comp1.num_atoms) + ((1 - x) * self.comp2.num_atoms))\n x_converted = InterfacialReactivity._convert(x, self.factor1, self.factor2)\n x_kink.append(x_converted)\n normalized_energy = self._get_energy(x)\n energy_kink.append(normalized_energy)\n rxt = self._get_reaction(x)\n react_kink.append(rxt)\n rxt_energy = ((normalized_energy * self._get_elmt_amt_in_rxt(rxt)) / n_atoms)\n energy_per_rxt_formula.append((rxt_energy * InterfacialReactivity.EV_TO_KJ_PER_MOL))\n index_kink = range(1, (len(critical_comp) + 1))\n return zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula)", "docstring": "Finds all the kinks in mixing ratio where reaction products changes\nalong the tie line of composition self.c1 and composition self.c2.\n\nReturns:\nZip object of tuples (index, mixing ratio,\nreaction energy per atom in eV/atom,\nreaction formula,\nreaction energy per mol of reaction\nformula in kJ/mol).", "source": "codesearchnet"} {"code": "def remove_father(self, father):\n \n self._fathers = [x for x in self._fathers if x.node_id != father.node_id]", "docstring": "Remove the father node. Do nothing if the node is not a father\n\nArgs:\nfathers: list of fathers to add", "source": "juraj-google-style"} {"code": "def run(self, args):\n \n jlink = self.create_jlink(args)\n if args.product:\n print('Product: %s' % jlink.product_name)\n\n manufacturer = 'SEGGER' if jlink.oem is None else jlink.oem\n print('Manufacturer: %s' % manufacturer)\n\n print('Hardware Version: %s' % jlink.hardware_version)\n print('Firmware: %s' % jlink.firmware_version)\n print('DLL Version: %s' % jlink.version)\n print('Features: %s' % ', '.join(jlink.features))\n elif args.jtag:\n status = jlink.hardware_status\n print('TCK Pin Status: %d' % status.tck)\n print('TDI Pin Status: %d' % status.tdi)\n print('TDO Pin Status: %d' % status.tdo)\n print('TMS Pin Status: %d' % status.tms)\n print('TRES Pin Status: %d' % status.tres)\n print('TRST Pin Status: %d' % status.trst)", "docstring": "Runs the information command.\n\nArgs:\nself (InfoCommand): the ``InfoCommand`` instance\nargs (Namespace): the arguments passed on the command-line\n\nReturns:\n``None``", "source": "juraj-google-style"} {"code": "def w8a8_block_fp8_matmul_compile(input_q: torch.Tensor, weight_q: torch.Tensor, input_scale: torch.Tensor, weight_scale: torch.Tensor, block_size: Optional[Tuple[int, int]]=None, output_dtype: torch.dtype=torch.float32) -> torch.Tensor:\n batch_size, seq_len, hidden_dim = input_q.shape if input_q.ndim == 3 else (1, input_q.shape[0], input_q.shape[1])\n out_features = weight_q.shape[0]\n input_reshaped = input_q.view(-1, hidden_dim)\n input_scale_reshaped = input_scale.view(input_scale.shape[0], -1)\n num_weight_blocks_m = out_features \n num_weight_blocks_n = hidden_dim \n output = torch.zeros((batch_size * seq_len, out_features), dtype=torch.float32, device=input_q.device)\n for i in range(num_weight_blocks_m):\n m_start = i * block_size[0]\n m_end = m_start + block_size[0]\n for j in range(num_weight_blocks_n):\n n_start = j * block_size[1]\n n_end = n_start + block_size[1]\n input_block = input_reshaped[:, n_start:n_end]\n weight_block = weight_q[m_start:m_end, n_start:n_end]\n curr_input_scale = input_scale_reshaped[:, j:j + 1]\n curr_weight_scale = weight_scale[i, j]\n block_result = torch._scaled_mm(input_block, weight_block.t(), scale_a=torch.tensor(1, dtype=torch.float32, device=input_q.device), scale_b=curr_weight_scale, out_dtype=output_dtype) * curr_input_scale\n output[:, m_start:m_end] += block_result\n output = output.view(batch_size, seq_len, out_features)\n return output.to(output_dtype)", "docstring": "Performs blocked matrix multiplication with FP8 quantized matrices.\n\nArgs:\ninput_q: Quantized input tensor with 1x128 block quantization\nweight_q: Quantized weight tensor with 128x128 block quantization\ninput_scale: Scaling factors for input blocks\nweight_scale: Scaling factors for weight blocks\nblock_size: Tuple of (M, N) for weight block dimensions\noutput_dtype: Desired output dtype", "source": "github-repos"} {"code": "def get_contents(self, path):\n try:\n if (not os.path.exists(path)):\n raise ConfigurationError(('specified path does not exist %s' % path))\n with open(path) as f:\n data = f.read()\n return data\n except (IOError, OSError) as exc:\n raise ConfigurationError(('error trying to load file contents: %s' % exc))", "docstring": "Loads the contents of the file specified by path\n\nArgs:\npath (string): The relative or absolute path to the file to\nbe loaded. If the path is relative, then it is combined\nwith the base_path to generate a full path string\n\nReturns:\nstring: The contents of the file as a string\n\nRaises:\nConfigurationError: If the file cannot be loaded", "source": "codesearchnet"} {"code": "def insert(self, index, value):\n\t\t\n\t\tif value in self:\n\t\t\traise ValueError\n\t\tindex = self._fix_neg_index(index)\n\t\tself._dict[value] = index\n\t\tfor elem in self._list[index:]:\n\t\t\tself._dict[elem] += 1\n\t\tself._list.insert(index, value)", "docstring": "Insert value at index.\n\nArgs:\nindex (int): Index to insert value at\nvalue: Value to insert\nRaises:\nValueError: If value already in self\nIndexError: If start or end are out of range", "source": "juraj-google-style"} {"code": "def get_files_in_tree(profile, sha):\n \n data = trees.get_tree(profile, sha)\n tree = data.get(\"tree\")\n blobs = [x for x in tree if x.get(\"type\") == \"blob\"]\n return blobs", "docstring": "Get the files (blobs) in a tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of a tree.\n\nReturns:\nA list of dicts containing info about each blob in the tree.", "source": "juraj-google-style"} {"code": "def get_attributes(self, uuid=None, attribute_names=None):\n batch_item = self._build_get_attributes_batch_item(uuid, attribute_names)\n request = self._build_request_message(None, [batch_item])\n response = self._send_and_receive_message(request)\n results = self._process_batch_items(response)\n return results[0]", "docstring": "Send a GetAttributes request to the server.\n\nArgs:\nuuid (string): The ID of the managed object with which the\nretrieved attributes should be associated. Optional, defaults\nto None.\nattribute_names (list): A list of AttributeName values indicating\nwhat object attributes the client wants from the server.\nOptional, defaults to None.\n\nReturns:\nresult (GetAttributesResult): A structure containing the results\nof the operation.", "source": "codesearchnet"} {"code": "def parse_cl_function(cl_code, dependencies=()):\n \n from mot.lib.cl_function import SimpleCLFunction\n\n def separate_cl_functions(input_str):\n \n class Semantics:\n\n def __init__(self):\n self._functions = []\n\n def result(self, ast):\n return self._functions\n\n def arglist(self, ast):\n return '({})'.format(', '.join(ast))\n\n def function(self, ast):\n def join(items):\n result = ''\n for item in items:\n if isinstance(item, str):\n result += item\n else:\n result += join(item)\n return result\n\n self._functions.append(join(ast).strip())\n return ast\n\n return _extract_cl_functions_parser.parse(input_str, semantics=Semantics())\n\n functions = separate_cl_functions(cl_code)\n return SimpleCLFunction.from_string(functions[-1], dependencies=list(dependencies or []) + [\n SimpleCLFunction.from_string(s) for s in functions[:-1]])", "docstring": "Parse the given OpenCL string to a single SimpleCLFunction.\n\nIf the string contains more than one function, we will return only the last, with all the other added as a\ndependency.\n\nArgs:\ncl_code (str): the input string containing one or more functions.\ndependencies (Iterable[CLCodeObject]): The list of CL libraries this function depends on\n\nReturns:\nmot.lib.cl_function.SimpleCLFunction: the CL function for the last function in the given strings.", "source": "juraj-google-style"} {"code": "def add(self, promise, bitoffset, *, _offsetideal=None):\n if (_offsetideal is None):\n _offsetideal = bitoffset\n if isinstance(promise, TDOPromise):\n newpromise = promise.makesubatoffset(bitoffset, _offsetideal=_offsetideal)\n self._promises.append(newpromise)\n elif isinstance(promise, TDOPromiseCollection):\n for p in promise._promises:\n self.add(p, bitoffset, _offsetideal=_offsetideal)", "docstring": "Add a promise to the promise collection at an optional offset.\n\nArgs:\npromise: A TDOPromise to add to this collection.\nbitoffset: An integer offset for this new promise in the collection.\n_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control.", "source": "codesearchnet"} {"code": "def FindFileContainingSymbol(self, symbol):\n symbol = _NormalizeFullyQualifiedName(symbol)\n try:\n return self._descriptors[symbol].file\n except KeyError:\n pass\n try:\n return self._enum_descriptors[symbol].file\n except KeyError:\n pass\n try:\n file_proto = self._internal_db.FindFileContainingSymbol(symbol)\n except KeyError as error:\n if self._descriptor_db:\n file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)\n else:\n raise error\n if (not file_proto):\n raise KeyError(('Cannot find a file containing %s' % symbol))\n return self._ConvertFileProtoToFileDescriptor(file_proto)", "docstring": "Gets the FileDescriptor for the file containing the specified symbol.\n\nArgs:\nsymbol: The name of the symbol to search for.\n\nReturns:\nA FileDescriptor that contains the specified symbol.\n\nRaises:\nKeyError: if the file can not be found in the pool.", "source": "codesearchnet"} {"code": "def localize(dt, force_to_local=True):\n if (not isinstance(dt, datetime_tz)):\n if (not dt.tzinfo):\n return datetime_tz(dt, tzinfo=localtz())\n dt = datetime_tz(dt)\n if force_to_local:\n return dt.astimezone(localtz())\n return dt", "docstring": "Localize a datetime to the local timezone.\n\nIf dt is naive, returns the same datetime with the local timezone, otherwise\nuses astimezone to convert.\n\nArgs:\ndt: datetime object.\nforce_to_local: Force all results to be in local time.\n\nReturns:\nA datetime_tz object.", "source": "codesearchnet"} {"code": "def execute_forever(method, interval_s):\n interval = Interval(method)\n interval.start(interval_s)\n return interval", "docstring": "Executes a method forever at the specified interval.\n\nArgs:\nmethod: The callable to execute.\ninterval_s: The number of seconds to start the execution after each method\nfinishes.\nReturns:\nAn Interval object.", "source": "codesearchnet"} {"code": "def argmax(x, axis=-1):\n return math_ops.argmax(x, axis)", "docstring": "Returns the index of the maximum value along an axis.\n\nArgs:\nx: Tensor or variable.\naxis: axis along which to perform the reduction.\n\nReturns:\nA tensor.", "source": "github-repos"} {"code": "def set_of_vars(arg_plot):\n return set((var for var in arg_plot.split(',') if (var in phyvars.PLATES)))", "docstring": "Build set of needed variables.\n\nArgs:\narg_plot (str): string with variable names separated with ``,``.\nReturns:\nset of str: set of variables.", "source": "codesearchnet"} {"code": "def ChangeScaleFactor(self, newfactor):\n \n if float(newfactor) > 0 and float(newfactor) < self._MAX_ZOOM:\n self._zoomfactor = newfactor", "docstring": "Changes the zoom of the graph manually.\n\n1.0 is the original canvas size.\n\nArgs:\n# float value between 0.0 and 5.0\nnewfactor: 0.7", "source": "juraj-google-style"} {"code": "def get_image(verbose=False):\n base_data = tf.constant(image_data(verbose=verbose))\n base_image = tf.image.decode_image(base_data, channels=3)\n base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3))\n parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8)\n return parsed_image", "docstring": "Get the image as a TensorFlow variable.\n\nReturns:\nA `tf.Variable`, which must be initialized prior to use:\ninvoke `sess.run(result.initializer)`.", "source": "codesearchnet"} {"code": "def _evaluate3(nodes, x_val, y_val):\n \n \n \n sylvester_mat = np.zeros((6, 6), order=\"F\")\n delta = nodes - np.asfortranarray([[x_val], [y_val]])\n delta[:, 1:3] *= 3.0\n \n \n sylvester_mat[:2, :4] = delta\n sylvester_mat[2:4, 1:5] = delta\n sylvester_mat[4:, 2:] = delta\n return np.linalg.det(sylvester_mat)", "docstring": "Helper for :func:`evaluate` when ``nodes`` is degree 3.\n\nArgs:\nnodes (numpy.ndarray): ``2 x 4`` array of nodes in a curve.\nx_val (float): ``x``-coordinate for evaluation.\ny_val (float): ``y``-coordinate for evaluation.\n\nReturns:\nfloat: The computed value of :math:`f(x, y)`.", "source": "juraj-google-style"} {"code": "def _make_query_from_terms(self, terms, limit=None):\n \n\n expanded_terms = self._expand_terms(terms)\n\n if expanded_terms['doc']:\n \n query_parts = [\"SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score\"]\n if expanded_terms['doc'] and expanded_terms['keywords']:\n query_parts = [\"SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) \"\n \" + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))\"\n ' as score']\n else:\n \n query_parts = ['SELECT vid, 1 as score']\n\n query_parts.append('FROM dataset_index')\n query_params = {}\n where_counter = 0\n\n if expanded_terms['doc']:\n where_counter += 1\n query_parts.append('WHERE doc @@ to_tsquery(:doc)')\n query_params['doc'] = self.backend._and_join(expanded_terms['doc'])\n\n if expanded_terms['keywords']:\n\n query_params['keywords'] = self.backend._and_join(expanded_terms['keywords'])\n\n kw_q = \"to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)\"\n\n query_parts.append( (\"AND \" if where_counter else \"WHERE \") + kw_q )\n\n\n query_parts.append('ORDER BY score DESC')\n if limit:\n query_parts.append('LIMIT :limit')\n query_params['limit'] = limit\n\n query_parts.append(';')\n deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\\\n .format(terms, query_parts, query_params)\n logger.debug(deb_msg)\n\n\n q = text('\\n'.join(query_parts)), query_params\n logger.debug('Dataset search query: {}'.format(q))\n return q", "docstring": "Creates a query for dataset from decomposed search terms.\n\nArgs:\nterms (dict or unicode or string):\n\nReturns:\ntuple of (TextClause, dict): First element is FTS query, second is parameters\nof the query. Element of the execution of the query is pair: (vid, score).", "source": "juraj-google-style"} {"code": "class UnitNormalization(Layer):\n\n def __init__(self, axis=-1, **kwargs):\n super().__init__(**kwargs)\n if isinstance(axis, (list, tuple)):\n self.axis = list(axis)\n elif isinstance(axis, int):\n self.axis = axis\n else:\n raise TypeError(f'Invalid value for `axis` argument: expected an int or a list/tuple of ints. Received: axis={axis}')\n self.supports_masking = True\n self._build_at_init()\n\n def call(self, inputs):\n return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)\n\n def compute_output_shape(self, input_shape):\n if isinstance(self.axis, int):\n axes = [self.axis]\n else:\n axes = self.axis\n for axis in axes:\n if axis >= len(input_shape) or axis < -len(input_shape):\n raise ValueError(f'Axis {self.axis} is out of bounds for input shape {input_shape}.')\n return input_shape\n\n def get_config(self):\n config = super().get_config()\n config.update({'axis': self.axis})\n return config", "docstring": "Unit normalization layer.\n\nNormalize a batch of inputs so that each input in the batch has a L2 norm\nequal to 1 (across the axes specified in `axis`).\n\nExample:\n\n>>> data = np.arange(6).reshape(2, 3)\n>>> normalized_data = keras.layers.UnitNormalization()(data)\n>>> np.sum(normalized_data[0, :] ** 2)\n1.0\n\nArgs:\naxis: Integer or list/tuple. The axis or axes to normalize across.\nTypically, this is the features axis or axes. The left-out axes are\ntypically the batch axis or axes. `-1` is the last dimension\nin the input. Defaults to `-1`.", "source": "github-repos"} {"code": "def decode(self, spec, encoded_value):\n return spec._from_components(encoded_value)", "docstring": "Decodes `value` from a batchable tensor encoding.\n\nSee `encode` for a description of the default encoding. Subclasses may\noverride this default definition, when necessary.\n\nArgs:\nspec: The TypeSpec for the result value. If encoded values with spec `s`\nwere batched, then `spec` should be `s.batch(batch_size)`; or if encoded\nvalues with spec `s` were unbatched, then `spec` should be\n`s.unbatch()`.\nencoded_value: A nest of values returned by `encode`; or a nest of values\nthat was formed by stacking, unstacking, or concatenating the\ncorresponding elements of values returned by `encode`.\n\nReturns:\nA value compatible with `type_spec`.", "source": "github-repos"} {"code": "def find_element(self, name, type=ElementType.ANY):\n for e in self.e_list:\n if (type.value and (not (e['elementType'] == type))):\n continue\n if (e['name'] == name):\n uri = self.uri\n uri.eid = e['id']\n return uri", "docstring": "Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.\n\nArgs:\nname: str\nthe name of the element.\n\nReturns:\n- onshapepy.uri of the element", "source": "codesearchnet"} {"code": "def member_update(self, member_id, params):\n \n config = self.config\n config['members'][member_id].update(params.get(\"rsParams\", {}))\n return self.repl_update(config)", "docstring": "update member's values with reconfig replica\nArgs:\nmember_id - member index\nparams - updates member params\n\nreturn True if operation success otherwise False", "source": "juraj-google-style"} {"code": "def init_app(self, app):\n \n\n app.ldap3_login_manager = self\n\n servers = list(self._server_pool)\n for s in servers:\n self._server_pool.remove(s)\n\n self.init_config(app.config)\n\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n else: \n app.teardown_request(self.teardown)\n\n self.app = app", "docstring": "Configures this extension with the given app. This registers an\n``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``\nto it as ``app.ldap3_login_manager``.\n\nArgs:\napp (flask.Flask): The flask app to initialise with", "source": "juraj-google-style"} {"code": "def _default_tolerance(dtype):\n if dtype == np.float16:\n return 0.005\n elif dtype in (np.float32, np.complex64):\n return 0.001\n elif dtype in (np.float64, np.complex128):\n return 1e-05\n else:\n return None", "docstring": "Returns a sensible default tolerance for comparing results of a given type.\n\nArgs:\ndtype: A datatype.", "source": "github-repos"} {"code": "def _inputrc_enables_vi_mode():\n for filepath in (os.path.expanduser('~/.inputrc'), '/etc/inputrc'):\n try:\n with open(filepath) as f:\n for line in f:\n if _setre.fullmatch(line):\n return True\n except IOError:\n continue\n return False", "docstring": "Emulate a small bit of readline behavior.\n\nReturns:\n(bool) True if current user enabled vi mode (\"set editing-mode vi\") in .inputrc", "source": "codesearchnet"} {"code": "def send(signal):\n \n if hasattr(ray.worker.global_worker, \"actor_creation_task_id\"):\n source_key = ray.worker.global_worker.actor_id.hex()\n else:\n \n source_key = ray.worker.global_worker.current_task_id.hex()\n\n encoded_signal = ray.utils.binary_to_hex(cloudpickle.dumps(signal))\n ray.worker.global_worker.redis_client.execute_command(\n \"XADD \" + source_key + \" * signal \" + encoded_signal)", "docstring": "Send signal.\n\nThe signal has a unique identifier that is computed from (1) the id\nof the actor or task sending this signal (i.e., the actor or task calling\nthis function), and (2) an index that is incremented every time this\nsource sends a signal. This index starts from 1.\n\nArgs:\nsignal: Signal to be sent.", "source": "juraj-google-style"} {"code": "def dispose(json_str):\n result_str = list(json_str)\n escaped = False\n normal = True\n sl_comment = False\n ml_comment = False\n quoted = False\n a_step_from_comment = False\n a_step_from_comment_away = False\n former_index = None\n for (index, char) in enumerate(json_str):\n if escaped:\n escaped = False\n continue\n if a_step_from_comment:\n if ((char != '/') and (char != '*')):\n a_step_from_comment = False\n normal = True\n continue\n if a_step_from_comment_away:\n if (char != '/'):\n a_step_from_comment_away = False\n if (char == '\"'):\n if (normal and (not escaped)):\n quoted = True\n normal = False\n elif (quoted and (not escaped)):\n quoted = False\n normal = True\n elif (char == '\\\\'):\n if (normal or quoted):\n escaped = True\n elif (char == '/'):\n if a_step_from_comment:\n a_step_from_comment = False\n sl_comment = True\n normal = False\n former_index = (index - 1)\n elif a_step_from_comment_away:\n a_step_from_comment_away = False\n normal = True\n ml_comment = False\n for i in range(former_index, (index + 1)):\n result_str[i] = ''\n elif normal:\n a_step_from_comment = True\n normal = False\n elif (char == '*'):\n if a_step_from_comment:\n a_step_from_comment = False\n ml_comment = True\n normal = False\n former_index = (index - 1)\n elif ml_comment:\n a_step_from_comment_away = True\n elif (char == '\\n'):\n if sl_comment:\n sl_comment = False\n normal = True\n for i in range(former_index, (index + 1)):\n result_str[i] = ''\n elif ((char == ']') or (char == '}')):\n if normal:\n _remove_last_comma(result_str, index)\n return ('' if isinstance(json_str, str) else u'').join(result_str)", "docstring": "Clear all comments in json_str.\n\nClear JS-style comments like // and /**/ in json_str.\nAccept a str or unicode as input.\n\nArgs:\njson_str: A json string of str or unicode to clean up comment\n\nReturns:\nstr: The str without comments (or unicode if you pass in unicode)", "source": "codesearchnet"} {"code": "def _post_process(self, feed_item, item):\n pass", "docstring": "Provides an opportunity for sub classes to perform any required operations after the item has been processed.\n\nArgs:\nfeed_item: The Bulkdozer feed item that was processed.\nitem: The CM object resulting from the process operation.", "source": "github-repos"} {"code": "def add_user(self, username, raise_on_error=False, **kwargs):\n if ('password' not in kwargs):\n raise ValueError('missing password')\n if ('email' not in kwargs):\n raise ValueError('missing email')\n try:\n data = {'name': username, 'first-name': username, 'last-name': username, 'display-name': username, 'email': kwargs['email'], 'password': {'value': kwargs['password']}, 'active': True}\n except KeyError:\n return ValueError\n del kwargs['password']\n for (k, v) in kwargs.items():\n new_k = k.replace('_', '-')\n if (new_k not in data):\n raise ValueError(('invalid argument %s' % k))\n data[new_k] = v\n response = self._post((self.rest_url + '/user'), data=json.dumps(data))\n if (response.status_code == 201):\n return True\n if raise_on_error:\n raise RuntimeError(response.json()['message'])\n return False", "docstring": "Add a user to the directory\n\nArgs:\nusername: The account username\nraise_on_error: optional (default: False)\n**kwargs: key-value pairs:\npassword: mandatory\nemail: mandatory\nfirst_name: optional\nlast_name: optional\ndisplay_name: optional\nactive: optional (default True)\n\nReturns:\nTrue: Succeeded\nFalse: If unsuccessful", "source": "codesearchnet"} {"code": "def _parse_flowcontrol_send(self, config):\n value = 'off'\n match = re.search('flowcontrol send (\\\\w+)$', config, re.M)\n if match:\n value = match.group(1)\n return dict(flowcontrol_send=value)", "docstring": "Scans the config block and returns the flowcontrol send value\n\nArgs:\nconfig (str): The interface config block to scan\n\nReturns:\ndict: Returns a dict object with the flowcontrol send value\nretrieved from the config block. The returned dict object\nis intended to be merged into the interface resource dict", "source": "codesearchnet"} {"code": "def make_velocity_profile(self, v1, v2, distance, min_time):\n if (min_time > 0):\n min_time -= self.velocity_settle\n distance -= (self.velocity_settle * v2)\n ramp_time = self.acceleration_time(v1, v2)\n ramp_distance = self.ramp_distance(v1, v2, ramp_time)\n remaining_distance = (distance - ramp_distance)\n if (min_time > ramp_time):\n pad_velocity = (remaining_distance / (min_time - ramp_time))\n if (pad_velocity > max(v1, v2)):\n it = self._make_hat(v1, v2, self.acceleration, distance, min_time)\n elif (pad_velocity < min(v1, v2)):\n it = self._make_hat(v1, v2, (- self.acceleration), distance, min_time)\n else:\n it = self._make_padded_ramp(v1, v2, pad_velocity, min_time)\n elif (remaining_distance < 0):\n it = self._make_hat(v1, v2, (- self.acceleration), distance, min_time)\n else:\n it = self._make_hat(v1, v2, self.acceleration, distance, min_time)\n time_array = [0.0]\n velocity_array = [v1]\n for (t, v) in it:\n assert (t >= 0), ('Got negative t %s' % t)\n if (t == 0):\n assert (v == velocity_array[(- 1)]), \"Can't move velocity in zero time\"\n continue\n if ((v * velocity_array[(- 1)]) < 0):\n fraction = (velocity_array[(- 1)] / (velocity_array[(- 1)] - v))\n time_array.append((time_array[(- 1)] + (fraction * t)))\n velocity_array.append(0)\n t -= (fraction * t)\n time_array.append((time_array[(- 1)] + t))\n velocity_array.append(v)\n if (self.velocity_settle > 0):\n time_array.append((time_array[(- 1)] + self.velocity_settle))\n velocity_array.append(v2)\n return (time_array, velocity_array)", "docstring": "Calculate PVT points that will perform the move within motor params\n\nArgs:\nv1 (float): Starting velocity in EGUs/s\nv2 (float): Ending velocity in EGUs/s\ndistance (float): Relative distance to travel in EGUs\nmin_time (float): The minimum time the move should take\n\nReturns:\ntuple: (time_list, position_list) where time_list is a list of\nrelative time points in seconds, and position_list is the\nposition in EGUs that the motor should be", "source": "codesearchnet"} {"code": "def reply(self,message,message_type):\n \n if message_type == MULTIPART:\n raise Exception(\"Unsupported reply type\")\n \n super(Replier,self).send(message,message_type)", "docstring": "Send a reply message of the given type\n\nArgs:\n- message: the message to publish\n- message_type: the type of message being sent", "source": "juraj-google-style"} {"code": "def select_update_method(self, force_interactive, force_change_set):\n \n if self.interactive or force_interactive:\n return self.interactive_update_stack\n elif force_change_set:\n return self.noninteractive_changeset_update\n else:\n return self.default_update_stack", "docstring": "Select the correct update method when updating a stack.\n\nArgs:\nforce_interactive (str): Whether or not to force interactive mode\nno matter what mode the provider is in.\nforce_change_set (bool): Whether or not to force change set use.\n\nReturns:\nfunction: The correct object method to use when updating.", "source": "juraj-google-style"} {"code": "def extract_names(source):\n if (source is None):\n return None\n source = dedent(source)\n funcdef = find_funcdef(source)\n params = extract_params(source)\n names = []\n if isinstance(funcdef, ast.FunctionDef):\n stmts = funcdef.body\n elif isinstance(funcdef, ast.Lambda):\n stmts = [funcdef.body]\n else:\n raise ValueError('must not happen')\n for stmt in stmts:\n for node in ast.walk(stmt):\n if isinstance(node, ast.Name):\n if ((node.id not in names) and (node.id not in params)):\n names.append(node.id)\n return names", "docstring": "Extract names from a function definition\n\nLooks for a function definition in the source.\nOnly the first function definition is examined.\n\nReturns:\na list names(identifiers) used in the body of the function\nexcluding function parameters.", "source": "codesearchnet"} {"code": "def post_process_for_mask_generation(self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors='pt'):\n if return_tensors == 'pt':\n return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh)\n elif return_tensors == 'tf':\n return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh)", "docstring": "Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.\n\nArgs:\nall_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`):\nList of all predicted segmentation masks\nall_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`):\nList of all predicted iou scores\nall_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`):\nList of all bounding boxes of the predicted masks\ncrops_nms_thresh (`float`):\nThreshold for NMS (Non Maximum Suppression) algorithm.\nreturn_tensors (`str`, *optional*, defaults to `pt`):\nIf `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.", "source": "github-repos"} {"code": "def dict_to_pyxb(rp_dict):\n rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy()\n rp_pyxb.replicationAllowed = rp_dict['allowed']\n rp_pyxb.numberReplicas = rp_dict['num']\n rp_pyxb.blockedMemberNode = rp_dict['block']\n rp_pyxb.preferredMemberNode = rp_dict['pref']\n normalize(rp_pyxb)\n return rp_pyxb", "docstring": "Convert dict to ReplicationPolicy PyXB object.\n\nArgs:\nrp_dict: Native Python structure representing a Replication Policy.\n\nExample::\n\n{\n'allowed': True,\n'num': 3,\n'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},\n'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},\n}\n\nReturns:\nReplicationPolicy PyXB object.", "source": "codesearchnet"} {"code": "def protorpc_to_endpoints_error(self, status, body):\n \n try:\n rpc_error = self.__PROTOJSON.decode_message(remote.RpcStatus, body)\n except (ValueError, messages.ValidationError):\n rpc_error = remote.RpcStatus()\n\n if rpc_error.state == remote.RpcStatus.State.APPLICATION_ERROR:\n\n \n error_class = _ERROR_NAME_MAP.get(rpc_error.error_name)\n if error_class:\n status, body = self.__write_error(error_class.http_status,\n rpc_error.error_message)\n return status, body", "docstring": "Convert a ProtoRPC error to the format expected by Google Endpoints.\n\nIf the body does not contain an ProtoRPC message in state APPLICATION_ERROR\nthe status and body will be returned unchanged.\n\nArgs:\nstatus: HTTP status of the response from the backend\nbody: JSON-encoded error in format expected by Endpoints frontend.\n\nReturns:\nTuple of (http status, body)", "source": "juraj-google-style"} {"code": "def _data_from_dotnotation(self, key, default=None):\n if (key is None):\n raise KeyError('NoneType is not a valid key!')\n doc = self._collection.find_one({'_id': ObjectId(self._workflow_id)})\n if (doc is None):\n return default\n for k in key.split('.'):\n doc = doc[k]\n return doc", "docstring": "Returns the MongoDB data from a key using dot notation.\n\nArgs:\nkey (str): The key to the field in the workflow document. Supports MongoDB's\ndot notation for embedded fields.\ndefault (object): The default value that is returned if the key\ndoes not exist.\n\nReturns:\nobject: The data for the specified key or the default value.", "source": "codesearchnet"} {"code": "def _parse_title_url(html_chunk):\n \n title = html_chunk.find(\"div\", {\"class\": \"comment\"})\n\n if not title:\n return _parse_alt_title(html_chunk), None\n\n title = title[0].find(\"h2\")\n if not title:\n return _parse_alt_title(html_chunk), None\n\n \n url = None\n url_tag = title[0].find(\"a\")\n if url_tag:\n url = url_tag[0].params.get(\"href\", None)\n title = url_tag\n\n return title[0].getContent(), normalize_url(BASE_URL, url)", "docstring": "Parse title/name of the book and URL of the book.\n\nArgs:\nhtml_chunk (obj): HTMLElement containing slice of the page with details.\n\nReturns:\ntuple: (title, url), both as strings.", "source": "juraj-google-style"} {"code": "def count_variables_by_type(variables=None):\n \n if variables is None:\n variables = tf.global_variables() + tf.local_variables()\n unique_types = set(v.dtype.base_dtype for v in variables)\n results_dict = {}\n for dtype in unique_types:\n if dtype == tf.string:\n tf.logging.warning(\n \"NB: string Variables present. The memory usage for these Variables \"\n \"will not be accurately computed as it depends on the exact strings \"\n \"stored in a particular session.\")\n vars_of_type = [v for v in variables if v.dtype.base_dtype == dtype]\n num_scalars = sum(v.shape.num_elements() for v in vars_of_type)\n results_dict[dtype] = {\n \"num_variables\": len(vars_of_type),\n \"num_scalars\": num_scalars\n }\n return results_dict", "docstring": "Returns a dict mapping dtypes to number of variables and scalars.\n\nArgs:\nvariables: iterable of `tf.Variable`s, or None. If None is passed, then all\nglobal and local variables in the current graph are used.\n\nReturns:\nA dict mapping tf.dtype keys to a dict containing the keys 'num_scalars' and\n'num_variables'.", "source": "juraj-google-style"} {"code": "def RemoveClass(self, class_name):\n if (class_name not in self._class_mapping):\n raise problems.NonexistentMapping(class_name)\n del self._class_mapping[class_name]", "docstring": "Removes an entry from the list of known classes.\n\nArgs:\nclass_name: A string with the class name that is to be removed.\nRaises:\nNonexistentMapping if there is no class with the specified class_name.", "source": "codesearchnet"} {"code": "def forward(self, past_values: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[Tuple, PatchTSMixerEncoderOutput]:\n return_dict = return_dict if return_dict is not None else self.use_return_dict\n patches = self.patcher(past_values)\n if self.positional_encoder is not None:\n patches = self.positional_encoder(patches)\n last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states)\n if not return_dict:\n return tuple((v for v in [last_hidden_state, hidden_states]))\n return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states)", "docstring": "past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):\nContext values of the time series. For a pretraining task, this denotes the input time series to\npredict the masked portion. For a forecasting task, this denotes the history/past time series values.\nSimilarly, for classification or regression tasks, it denotes the appropriate context values of the\ntime series.\n\nFor univariate time series, `num_input_channels` dimension should be 1. For multivariate time series,\nit is greater than 1.\n\nReturns:\n`torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)`", "source": "github-repos"} {"code": "def disable(self, identity, params=None, headers=None):\n path = self._sub_url_params('/creditor_bank_accounts/:identity/actions/disable', {'identity': identity})\n if (params is not None):\n params = {'data': params}\n response = self._perform_request('POST', path, params, headers, retry_failures=False)\n return self._resource_for(response)", "docstring": "Disable a creditor bank account.\n\nImmediately disables the bank account, no money can be paid out to a\ndisabled account.\n\nThis will return a `disable_failed` error if the bank account has\nalready been disabled.\n\nA disabled bank account can be re-enabled by creating a new bank\naccount resource with the same details.\n\nArgs:\nidentity (string): Unique identifier, beginning with \"BA\".\nparams (dict, optional): Request body.\n\nReturns:\nListResponse of CreditorBankAccount instances", "source": "codesearchnet"} {"code": "def get_repo_data(saltenv='base'):\n \n \n \n \n repo_details = _get_repo_details(saltenv)\n\n if repo_details.winrepo_age == -1:\n \n log.debug('No winrepo.p cache file. Refresh pkg db now.')\n refresh_db(saltenv=saltenv)\n\n if 'winrepo.data' in __context__:\n log.trace('get_repo_data returning results from __context__')\n return __context__['winrepo.data']\n else:\n log.trace('get_repo_data called reading from disk')\n\n try:\n serial = salt.payload.Serial(__opts__)\n with salt.utils.files.fopen(repo_details.winrepo_file, 'rb') as repofile:\n try:\n repodata = salt.utils.data.decode(serial.loads(repofile.read()) or {})\n __context__['winrepo.data'] = repodata\n return repodata\n except Exception as exc:\n log.exception(exc)\n return {}\n except IOError as exc:\n log.error('Not able to read repo file')\n log.exception(exc)\n return {}", "docstring": "Returns the existing package metadata db. Will create it, if it does not\nexist, however will not refresh it.\n\nArgs:\nsaltenv (str): Salt environment. Default ``base``\n\nReturns:\ndict: A dict containing contents of metadata db.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' pkg.get_repo_data", "source": "juraj-google-style"} {"code": "def validate_all_keys_in_obj(obj_name, obj, validation_fun):\n \n for key, value in obj.items():\n validation_fun(obj_name, key)\n if isinstance(value, dict):\n validate_all_keys_in_obj(obj_name, value, validation_fun)\n elif isinstance(value, list):\n validate_all_items_in_list(obj_name, value, validation_fun)", "docstring": "Validate all (nested) keys in `obj` by using `validation_fun`.\n\nArgs:\nobj_name (str): name for `obj` being validated.\nobj (dict): dictionary object.\nvalidation_fun (function): function used to validate the value\nof `key`.\n\nReturns:\nNone: indicates validation successful\n\nRaises:\nValidationError: `validation_fun` will raise this error on failure", "source": "juraj-google-style"} {"code": "def display_task_progress(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \n total_completed = 0\n\n while True:\n task_results = self.client.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n tasks = {task['id']: task for task in task_results}\n completed_tasks = set()\n pending_tasks = set()\n\n for task in tasks.values():\n if task.get('successful') is not None:\n completed_tasks.add(task['id'])\n else:\n pending_tasks.add(task['id'])\n\n if len(completed_tasks) > total_completed or not completed_tasks:\n total_completed = len(completed_tasks)\n\n print('Task status update (completed: {0:d} | pending: {1:d})'.format(\n len(completed_tasks), len(pending_tasks)))\n\n print('Completed tasks:')\n for task_id in completed_tasks:\n self._print_task_data(tasks[task_id])\n\n print('Pending tasks:')\n for task_id in pending_tasks:\n self._print_task_data(tasks[task_id])\n\n if len(completed_tasks) == len(task_results) and completed_tasks:\n print('All {0:d} Tasks completed'.format(len(task_results)))\n return\n\n time.sleep(poll_interval)", "docstring": "Displays the overall progress of tasks in a Turbinia job.\n\nArgs:\ninstance (string): The name of the Turbinia instance\nproject (string): The project containing the disk to process\nregion (string): Region where turbinia is configured.\nrequest_id (string): The request ID provided by Turbinia.\nuser (string): The username to filter tasks by.\npoll_interval (int): The interval at which to poll for new results.", "source": "juraj-google-style"} {"code": "def set_privilege(self, name, value=None):\n cmd = ('username %s' % name)\n if (value is not None):\n if (not isprivilege(value)):\n raise TypeError('priviledge value must be between 0 and 15')\n cmd += (' privilege %s' % value)\n else:\n cmd += ' privilege 1'\n return self.configure(cmd)", "docstring": "Configures the user privilege value in EOS\n\nArgs:\nname (str): The name of the user to craete\n\nvalue (int): The privilege value to assign to the user. Valid\nvalues are in the range of 0 to 15\n\nReturns:\nTrue if the operation was successful otherwise False\n\nRaises:\nTypeError: if the value is not in the valid range", "source": "codesearchnet"} {"code": "def transpose(self, name=None):\n if any(((x > 1) for x in self._rate)):\n raise base.NotSupportedError('Cannot transpose a dilated convolution module.')\n if any(((p != self._conv_op_padding) for p in self._padding)):\n raise base.NotSupportedError('Cannot tranpose a convolution using mixed paddings or paddings other than SAME or VALID.')\n if (name is None):\n name = (self.module_name + '_transpose')\n\n def output_shape():\n if (self._data_format == DATA_FORMAT_NCHW):\n return self.input_shape[2:4]\n else:\n return self.input_shape[1:3]\n return Conv2DTranspose(output_channels=(lambda : self._input_channels), output_shape=output_shape, kernel_shape=self._kernel_shape, stride=self._stride, padding=self._conv_op_padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, data_format=self._data_format, custom_getter=self._custom_getter, name=name)", "docstring": "Returns matching `Conv2DTranspose` module.\n\nArgs:\nname: Optional string assigning name of transpose module. The default name\nis constructed by appending \"_transpose\" to `self.name`.\n\nReturns:\n`Conv2DTranspose` module.\n\nRaises:\nbase.NotSupportedError: If `rate` in any dimension > 1.", "source": "codesearchnet"} {"code": "def mahalanobis_distances(df, axis=0):\n \n df = df.transpose() if axis == 1 else df\n means = df.mean()\n try:\n inv_cov = np.linalg.inv(df.cov())\n except LinAlgError:\n return pd.Series([np.NAN] * len(df.index), df.index,\n name='Mahalanobis')\n dists = []\n for i, sample in df.iterrows():\n dists.append(mahalanobis(sample, means, inv_cov))\n\n return pd.Series(dists, df.index, name='Mahalanobis')", "docstring": "Returns a pandas Series with Mahalanobis distances for each sample on the\naxis.\n\nNote: does not work well when # of observations < # of dimensions\nWill either return NaN in answer\nor (in the extreme case) fail with a Singular Matrix LinAlgError\n\nArgs:\ndf: pandas DataFrame with columns to run diagnostics on\naxis: 0 to find outlier rows, 1 to find outlier columns", "source": "juraj-google-style"} {"code": "def output_shape(self):\n return nest.map_structure(backend.int_shape, self.output)", "docstring": "Retrieves the output shape(s) of a layer.\n\nOnly applicable if the layer has one output,\nor if all outputs have the same shape.\n\nReturns:\nOutput shape, as an integer shape tuple\n(or list of shape tuples, one tuple per output tensor).\n\nRaises:\nAttributeError: if the layer has no defined output shape.\nRuntimeError: if called in Eager mode.", "source": "github-repos"} {"code": "def locked_put(self, credentials):\n serialized = credentials.to_json()\n self._dictionary[self._key] = serialized", "docstring": "Save the credentials to the dictionary.\n\nArgs:\ncredentials: A :class:`oauth2client.client.OAuth2Credentials`\ninstance.", "source": "codesearchnet"} {"code": "def _HasExpectedLineLength(self, file_object):\n \n original_file_position = file_object.tell()\n line_reader = self._CreateLineReader(file_object)\n for _ in range(0, 20):\n \n \n sample_line = line_reader.readline(self._maximum_line_length + 1)\n if len(sample_line) > self._maximum_line_length:\n file_object.seek(original_file_position)\n return False\n file_object.seek(original_file_position)\n return True", "docstring": "Determines if a file begins with lines of the expected length.\n\nAs we know the maximum length of valid lines in the DSV file, the presence\nof lines longer than this indicates that the file will not be parsed\nsuccessfully, without reading excessive data from a large file.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\n\nReturns:\nbool: True if the file has lines of the expected length.", "source": "juraj-google-style"} {"code": "def parse_elements(elements):\n \n if not len(elements) == 5:\n raise ValueError('Invalid WPL waypoint data')\n \n \n latitude = parse_latitude(elements[0], elements[1])\n longitude = parse_longitude(elements[2], elements[3])\n name = elements[4]\n return Waypoint(latitude, longitude, name)", "docstring": "Parse waypoint data elements.\n\nArgs:\nelements (list): Data values for fix\n\nReturns:\nnmea.Waypoint: Object representing data", "source": "juraj-google-style"} {"code": "def write(self, mi_cmd_to_write, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True, read_response=True):\n self.verify_valid_gdb_subprocess()\n if (timeout_sec < 0):\n self.logger.warning('timeout_sec was negative, replacing with 0')\n timeout_sec = 0\n if (type(mi_cmd_to_write) in [str, unicode]):\n pass\n elif (type(mi_cmd_to_write) == list):\n mi_cmd_to_write = '\\n'.join(mi_cmd_to_write)\n else:\n raise TypeError(('The gdb mi command must a be str or list. Got ' + str(type(mi_cmd_to_write))))\n self.logger.debug('writing: %s', mi_cmd_to_write)\n if (not mi_cmd_to_write.endswith('\\n')):\n mi_cmd_to_write_nl = (mi_cmd_to_write + '\\n')\n else:\n mi_cmd_to_write_nl = mi_cmd_to_write\n if USING_WINDOWS:\n outputready = [self.stdin_fileno]\n else:\n (_, outputready, _) = select.select([], self.write_list, [], timeout_sec)\n for fileno in outputready:\n if (fileno == self.stdin_fileno):\n self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode())\n self.gdb_process.stdin.flush()\n else:\n self.logger.error(('got unexpected fileno %d' % fileno))\n if (read_response is True):\n return self.get_gdb_response(timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout)\n else:\n return []", "docstring": "Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.\n\nArgs:\nmi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.\ntimeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.\nraise_error_on_timeout (bool): If read_response is True, raise error if no response is received\nread_response (bool): Block and read response. If there is a separate thread running,\nthis can be false, and the reading thread read the output.\nReturns:\nList of parsed gdb responses if read_response is True, otherwise []\nRaises:\nNoGdbProcessError if there is no gdb subprocess running\nTypeError if mi_cmd_to_write is not valid", "source": "codesearchnet"} {"code": "def copy(self, no_overlap: bool = None,\n no_contiguous: bool = None) -> \"IntervalList\":\n \n if no_overlap is None:\n no_overlap = self.no_overlap\n if no_contiguous is None:\n no_contiguous = self.no_contiguous\n return IntervalList(self.intervals, no_overlap=no_overlap,\n no_contiguous=no_contiguous)", "docstring": "Makes and returns a copy of the :class:`IntervalList`. The\n``no_overlap``/``no_contiguous`` parameters can be changed.\n\nArgs:\nno_overlap: merge intervals that overlap (now and on subsequent\naddition)?\nno_contiguous: if ``no_overlap`` is set, merge intervals that are\ncontiguous too?", "source": "juraj-google-style"} {"code": "def _get_initial_request(self):\n if (self._leaser is not None):\n lease_ids = list(self._leaser.ack_ids)\n else:\n lease_ids = []\n request = types.StreamingPullRequest(modify_deadline_ack_ids=list(lease_ids), modify_deadline_seconds=([self.ack_deadline] * len(lease_ids)), stream_ack_deadline_seconds=self.ack_histogram.percentile(99), subscription=self._subscription)\n return request", "docstring": "Return the initial request for the RPC.\n\nThis defines the initial request that must always be sent to Pub/Sub\nimmediately upon opening the subscription.\n\nReturns:\ngoogle.cloud.pubsub_v1.types.StreamingPullRequest: A request\nsuitable for being the first request on the stream (and not\nsuitable for any other purpose).", "source": "codesearchnet"} {"code": "def _store_outputs_in_object_store(self, object_ids, outputs):\n for i in range(len(object_ids)):\n if isinstance(outputs[i], ray.actor.ActorHandle):\n raise Exception('Returning an actor handle from a remote function is not allowed).')\n if (outputs[i] is ray.experimental.no_return.NoReturn):\n if (not self.plasma_client.contains(pyarrow.plasma.ObjectID(object_ids[i].binary()))):\n raise RuntimeError(\"Attempting to return 'ray.experimental.NoReturn' from a remote function, but the corresponding ObjectID does not exist in the local object store.\")\n else:\n self.put_object(object_ids[i], outputs[i])", "docstring": "Store the outputs of a remote function in the local object store.\n\nThis stores the values that were returned by a remote function in the\nlocal object store. If any of the return values are object IDs, then\nthese object IDs are aliased with the object IDs that the scheduler\nassigned for the return values. This is called by the worker that\nexecutes the remote function.\n\nNote:\nThe arguments object_ids and outputs should have the same length.\n\nArgs:\nobject_ids (List[ObjectID]): The object IDs that were assigned to\nthe outputs of the remote function call.\noutputs (Tuple): The value returned by the remote function. If the\nremote function was supposed to only return one value, then its\noutput was wrapped in a tuple with one element prior to being\npassed into this function.", "source": "codesearchnet"} {"code": "def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None):\n \n\n variable_type = entities.Variable.Type.DOUBLE\n return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)", "docstring": "Returns value for a certain double variable attached to a feature flag.\n\nArgs:\nfeature_key: Key of the feature whose variable's value is being accessed.\nvariable_key: Key of the variable whose value is to be accessed.\nuser_id: ID for user.\nattributes: Dict representing user attributes.\n\nReturns:\nDouble value of the variable. None if:\n- Feature key is invalid.\n- Variable key is invalid.\n- Mismatch with type of variable.", "source": "juraj-google-style"} {"code": "def AddArguments(cls, argument_group):\n \n argument_group.add_argument(\n '--nsrlsvr-hash', '--nsrlsvr_hash', dest='nsrlsvr_hash', type=str,\n action='store', choices=nsrlsvr.NsrlsvrAnalyzer.SUPPORTED_HASHES,\n default=cls._DEFAULT_HASH, metavar='HASH', help=(\n 'Type of hash to use to query nsrlsvr instance, the default is: '\n '{0:s}. Supported options: {1:s}'.format(\n cls._DEFAULT_HASH, ', '.join(\n nsrlsvr.NsrlsvrAnalyzer.SUPPORTED_HASHES))))\n\n argument_group.add_argument(\n '--nsrlsvr-host', '--nsrlsvr_host', dest='nsrlsvr_host', type=str,\n action='store', default=cls._DEFAULT_HOST, metavar='HOST',\n help=(\n 'Hostname or IP address of the nsrlsvr instance to query, the '\n 'default is: {0:s}').format(cls._DEFAULT_HOST))\n\n argument_group.add_argument(\n '--nsrlsvr-label', '--nsrlsvr_label', dest='nsrlsvr_label', type=str,\n action='store', default=cls._DEFAULT_LABEL, metavar='LABEL', help=(\n 'Label to apply to events, the default is: '\n '{0:s}.').format(cls._DEFAULT_LABEL))\n\n argument_group.add_argument(\n '--nsrlsvr-port', '--nsrlsvr_port', dest='nsrlsvr_port', type=int,\n action='store', default=cls._DEFAULT_PORT, metavar='PORT', help=(\n 'Port number of the nsrlsvr instance to query, the default is: '\n '{0:d}.').format(cls._DEFAULT_PORT))", "docstring": "Adds command line arguments the helper supports to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser): group\nto append arguments to.", "source": "juraj-google-style"} {"code": "def from_file(cls, name: str, mod_path: Tuple[str]=('.',), description: str=None) -> 'DataModel':\n with open(name, encoding='utf-8') as infile:\n yltxt = infile.read()\n return cls(yltxt, mod_path, description)", "docstring": "Initialize the data model from a file with YANG library data.\n\nArgs:\nname: Name of a file with YANG library data.\nmod_path: Tuple of directories where to look for YANG modules.\ndescription: Optional description of the data model.\n\nReturns:\nThe data model instance.\n\nRaises:\nThe same exceptions as the class constructor above.", "source": "codesearchnet"} {"code": "class Flatten(Layer):\n\n def __init__(self, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.data_format = backend.standardize_data_format(data_format)\n self.input_spec = InputSpec(min_ndim=1)\n self._channels_first = self.data_format == 'channels_first'\n\n def call(self, inputs):\n input_shape = inputs.shape\n rank = len(input_shape)\n if self._channels_first and rank > 1:\n inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))\n output_shape = tuple((dim if dim is not None else -1 for dim in self.compute_output_shape(input_shape)))\n return ops.reshape(inputs, output_shape)\n\n def compute_output_shape(self, input_shape):\n non_batch_dims = input_shape[1:]\n if len(non_batch_dims) == 0:\n flattened_dim = 1\n elif any((d is None for d in non_batch_dims)):\n flattened_dim = None\n else:\n flattened_dim = math.prod(non_batch_dims)\n return (input_shape[0], flattened_dim)\n\n def compute_output_spec(self, inputs):\n output_shape = self.compute_output_shape(inputs.shape)\n return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse)\n\n def get_config(self):\n config = {'data_format': self.data_format}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Flattens the input. Does not affect the batch size.\n\nNote: If inputs are shaped `(batch,)` without a feature axis, then\nflattening adds an extra channel dimension and output shape is `(batch, 1)`.\n\nArgs:\ndata_format: A string, one of `\"channels_last\"` (default) or\n`\"channels_first\"`. The ordering of the dimensions in the inputs.\n`\"channels_last\"` corresponds to inputs with shape\n`(batch, ..., channels)` while `\"channels_first\"` corresponds to\ninputs with shape `(batch, channels, ...)`.\nWhen unspecified, uses `image_data_format` value found in your Keras\nconfig file at `~/.keras/keras.json` (if exists). Defaults to\n`\"channels_last\"`.\n\nExample:\n\n>>> x = keras.Input(shape=(10, 64))\n>>> y = keras.layers.Flatten()(x)\n>>> y.shape\n(None, 640)", "source": "github-repos"} {"code": "def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names):\n \n print('Converting matmul ...')\n\n if names == 'short':\n tf_name = 'MMUL' + random_string(4)\n elif names == 'keep':\n tf_name = w_name\n else:\n tf_name = w_name + str(random.random())\n\n if len(inputs) == 1:\n weights_name = '{0}.weight'.format(w_name)\n\n W = weights[weights_name].numpy().transpose()\n input_channels, output_channels = W.shape\n\n keras_weights = [W]\n\n dense = keras.layers.Dense(\n output_channels,\n weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros',\n )\n layers[scope_name] = dense(layers[inputs[0]])\n elif len(inputs) == 2:\n weights_name = '{0}.weight'.format(w_name)\n\n W = weights[weights_name].numpy().transpose()\n input_channels, output_channels = W.shape\n\n keras_weights = [W]\n\n dense = keras.layers.Dense(\n output_channels,\n weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros',\n )\n layers[scope_name] = dense(layers[inputs[0]])\n else:\n raise AssertionError('Cannot convert matmul layer')", "docstring": "Convert matmul layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "juraj-google-style"} {"code": "def if_then_else(cls, condition: 'TensorFluent', true_case: 'TensorFluent', false_case: 'TensorFluent') -> 'TensorFluent':\n true = TensorFluent.constant(True, tf.bool)\n false = TensorFluent.constant(False, tf.bool)\n ite = (((condition == true) * true_case) + ((condition == false) * false_case))\n if ((true_case.dtype == tf.bool) and (false_case.dtype == tf.bool)):\n ite = ite.cast(tf.bool)\n return ite", "docstring": "Returns a TensorFluent for the control op if-then-else.\n\nArgs:\ncondition: Boolean fluent for the if condition.\ntrue_case: Fluent returned in the true clause.\nfalse_case: Fluent returned in the false clause.\n\nReturns:\nA TensorFluent wrapping the if-then-else control statement.\n\nRaises:\nValueError: If cases don't have same shape.", "source": "codesearchnet"} {"code": "def replace_by_xml(self, xml_str, el_idx=0):\n \n root_el = self.parse_xml(xml_str)\n self.replace_by_etree(root_el, el_idx)", "docstring": "Replace element.\n\nSelect element that has the same name as ``xml_str``, then replace the selected\nelement with ``xml_str``\n\n- ``xml_str`` must have a single element in the root.\n- The root element in ``xml_str`` can have an arbitrary number of children.\n\nArgs:\nxml_str : str\nNew element that will replace the existing element.", "source": "juraj-google-style"} {"code": "def get_status(self):\n self.__update_state()\n if self._state.active:\n return self.RUNNING\n else:\n return self._state.result_status", "docstring": "Get status enum.\n\nReturns:\nOne of the status enum.", "source": "codesearchnet"} {"code": "def _sparse_tensor(self, data, batch_size=-1):\n indices = []\n values = []\n max_col_count = 0\n for batch, batch_ix in zip(data, range(len(data))):\n for column, column_ix in zip(batch, range(len(batch))):\n indices.append([batch_ix, column_ix])\n values.append(column)\n max_col_count = max(max_col_count, column_ix + 1)\n shape = [batch_size if batch_size != -1 else len(data), max_col_count]\n value_type = dtypes.string if not values or isinstance(values[0], str) else dtypes.int64\n return sparse_tensor.SparseTensor(constant_op.constant(indices, dtypes.int64, [len(indices), 2]), constant_op.constant(values, value_type, [len(indices)]), constant_op.constant(shape, dtypes.int64))", "docstring": "Generates a SparseTensor.\n\nArgs:\ndata: Should be a list of list of strings or int64. Each item of the outer\nlist represents a batch. Each item of the batch is a feature of a\nspecific feature column.\nbatch_size: optional batch size, especially for cases when data has no\nentry for some batches.\n\nReturns:\nA SparseTensor.", "source": "github-repos"} {"code": "def is_link(url, processed, files):\n if (url not in processed):\n is_file = url.endswith(BAD_TYPES)\n if is_file:\n files.add(url)\n return False\n return True\n return False", "docstring": "Determine whether or not a link should be crawled\nA url should not be crawled if it\n- Is a file\n- Has already been crawled\n\nArgs:\nurl: str Url to be processed\nprocessed: list[str] List of urls that have already been crawled\n\nReturns:\nbool If `url` should be crawled", "source": "codesearchnet"} {"code": "def get_flights_from_to(self, origin, destination):\n url = AIRLINE_FLT_BASE_POINTS.format(origin, destination)\n return self._fr24.get_airline_flight_data(url, by_airports=True)", "docstring": "Get the flights for a particular origin and destination.\n\nGiven an origin and destination this method returns the upcoming scheduled flights between these two points.\nThe data returned has the airline, airport and schedule information - this is subject to change in future.\n\nArgs:\norigin (str): The origin airport code\ndestination (str): The destination airport code\n\nReturns:\nA list of dicts, one for each scheduled flight between the two points.\n\nExample::\nfrom pyflightdata import FlightData\nf=FlightData()\n#optional login\nf.login(myemail,mypassword)\nf.get_flights_from_to('SIN','HYD')", "source": "codesearchnet"} {"code": "def GetWindowsEventMessage(self, log_source, message_identifier):\n \n database_reader = self._GetWinevtRcDatabaseReader()\n if not database_reader:\n return None\n\n if self._lcid != self.DEFAULT_LCID:\n message_string = database_reader.GetMessage(\n log_source, self.lcid, message_identifier)\n if message_string:\n return message_string\n\n return database_reader.GetMessage(\n log_source, self.DEFAULT_LCID, message_identifier)", "docstring": "Retrieves the message string for a specific Windows Event Log source.\n\nArgs:\nlog_source (str): Event Log source, such as \"Application Error\".\nmessage_identifier (int): message identifier.\n\nReturns:\nstr: message string or None if not available.", "source": "juraj-google-style"} {"code": "def _define_step(self, done, score, summary):\n \n if done.shape.ndims == 0:\n done = done[None]\n if score.shape.ndims == 0:\n score = score[None]\n score_mean = streaming_mean.StreamingMean((), tf.float32)\n with tf.control_dependencies([done, score, summary]):\n done_score = tf.gather(score, tf.where(done)[:, 0])\n submit_score = tf.cond(\n tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)\n with tf.control_dependencies([submit_score]):\n mean_score = tf.cond(self._report, score_mean.clear, float)\n steps_made = tf.shape(score)[0]\n next_step = self._step.assign_add(steps_made)\n with tf.control_dependencies([mean_score, next_step]):\n return tf.identity(summary), mean_score, next_step, steps_made", "docstring": "Combine operations of a phase.\n\nKeeps track of the mean score and when to report it.\n\nArgs:\ndone: Tensor indicating whether current score can be used.\nscore: Tensor holding the current, possibly intermediate, score.\nsummary: Tensor holding summary string to write if not an empty string.\n\nReturns:\nTuple of summary tensor, mean score, and new global step. The mean score\nis zero for non reporting steps.", "source": "juraj-google-style"} {"code": "def _run_simple_loop_test(self, mode, inp, body, out):\n self._maybe_skip(mode)\n with ops.device(_get_device(mode)):\n random_seed.set_random_seed(0)\n expected_types = []\n for section in [inp, body, out]:\n section_expected_types = []\n for color in section:\n if color.isupper():\n expected_type = self._lower_precision_dtype(mode).as_datatype_enum\n else:\n expected_type = types_pb2.DT_FLOAT\n section_expected_types.append(expected_type)\n expected_types.append(section_expected_types)\n a = _build_simple_loop_graph(inp, body, out)\n output_val_ref, output_val, cost_graph = self._run(mode, a)\n node_map = _build_node_map(cost_graph.node)\n section_names = ['input', 'while/body', 'output']\n all_types_correct = True\n for section_name, expected_types in zip(section_names, expected_types):\n for i, expected_type in enumerate(expected_types):\n node_name = section_name + '_%i' % i\n output_port = 0\n optimized_type = node_map[node_name].output_info[output_port].dtype\n if optimized_type != expected_type:\n print('Expected node %s to have type %s but got type %s' % (node_name, expected_type, optimized_type))\n all_types_correct = False\n self.assertTrue(all_types_correct)\n if mode == 'mkl':\n self.assertAllClose(output_val_ref, output_val, atol=0.02, rtol=0.02)\n else:\n self.assertAllClose(output_val_ref, output_val, atol=0.002, rtol=0.001)", "docstring": "Runs a test of a simple loop.\n\nThe loop has different node colors in different sections of the graph. The\narguments must be strings where each character represents the color of a\nnode in that section of the graph: w = allow, g = infer, c = clear,\nb = deny. CAPITALIZED characters indicate that the node is expected to be\nchanged to DT_HALF during graph optimization.\n\ninp -> loop [ body ] -> out.\n\nArgs:\nmode: Either 'cuda' or 'mkl'.\ninp: A string of letters indicating the colors and expected dtypes of the\ninput nodes.\nbody: A string of letters indicating the colors and expected dtypes of the\nbody nodes.\nout: A string of letters indicating the colors and expected dtypes of the\noutput nodes.", "source": "github-repos"} {"code": "def unpack(rv):\n \n status = headers = None\n if isinstance(rv, tuple):\n rv, status, headers = rv + (None,) * (3 - len(rv))\n if isinstance(status, (dict, list)):\n headers, status = status, headers\n return (rv, status, headers)", "docstring": "Convert rv to tuple(data, code, headers)\n\nArgs:\nrv: data or tuple that contain code and headers\nReturns:\ntuple (rv, status, headers)", "source": "juraj-google-style"} {"code": "def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,\n moderngl.Texture3D, moderngl.TextureCube]:\n \n return self._get_resource(label, self._textures, \"texture\")", "docstring": "Get a texture by label\n\nArgs:\nlabel (str): The label for the texture to fetch\n\nReturns:\nTexture instance", "source": "juraj-google-style"} {"code": "def _ParseNumericOption(cls, options, argument_name, default_value=None):\n \n argument_value = getattr(options, argument_name, None)\n if argument_value is None:\n return default_value\n\n if not isinstance(argument_value, py2to3.INTEGER_TYPES):\n raise errors.BadConfigOption(\n 'Unsupported option: {0:s} integer type required.'.format(\n argument_name))\n\n return argument_value", "docstring": "Parses a numeric command line argument.\n\nArgs:\noptions (argparse.Namespace): parser options.\nargument_name (str): name of the command line argument.\ndefault_value (Optional[int]): default value of the command line argument.\n\nReturns:\nint: command line argument value or the default value if the command line\nargument is not set\n\nRaises:\nBadConfigOption: if the command line argument value cannot be converted\nto a Unicode string.", "source": "juraj-google-style"} {"code": "def create_sp(operations, operation):\n operations.execute(('CREATE FUNCTION %s %s' % (operation.target.name, operation.target.sqltext)))", "docstring": "Implements ``CREATE FUNCTION``.\n\nArgs:\noperations: instance of ``alembic.operations.base.Operations``\noperation: instance of :class:`.ReversibleOp`\n\nReturns:\n``None``", "source": "codesearchnet"} {"code": "def join(input_layer, others, include_self=True, join_function=None):\n if include_self:\n list_of_tensors = [input_layer]\n list_of_tensors.extend(others)\n else:\n list_of_tensors = others\n return prettytensor.join_pretty_tensors(list_of_tensors, input_layer, join_function)", "docstring": "Joins the provided PrettyTensors with this using the join function.\n\nArgs:\ninput_layer: The input layer for this op.\nothers: Sequence of PrettyTensor objects.\ninclude_self: Whether or not this includes itself or if the value is only\nderived from others.\njoin_function: The function to use for joining, must accept a list of\ntensors. Use None for concat on the final dimension.\nReturns:\nself.", "source": "codesearchnet"} {"code": "def get_contained_resource(contained_resource: message.Message) -> message.Message:\n if contained_resource.DESCRIPTOR.name != 'ContainedResource':\n raise TypeError(f'Expected `ContainedResource` but got: {type(contained_resource)}.')\n oneof_field = contained_resource.WhichOneof('oneof_resource')\n if oneof_field is None:\n raise ValueError('`ContainedResource` oneof not set.')\n return proto_utils.get_value_at_field(contained_resource, oneof_field)", "docstring": "Returns the resource instance contained within `contained_resource`.\n\nArgs:\ncontained_resource: The containing `ContainedResource` instance.\n\nReturns:\nThe resource contained by `contained_resource`.\n\nRaises:\nTypeError: In the event that `contained_resource` is not of type\n`ContainedResource`.\nValueError: In the event that the oneof on `contained_resource` is not set.", "source": "github-repos"} {"code": "def batch_row_coder_benchmark_factory(generate_fn, use_batch):\n\n class CoderBenchmark(object):\n\n def __init__(self, num_elements_per_benchmark):\n self._use_batch = use_batch\n row_instance = generate_fn()\n row_type = trivial_inference.instance_to_type(row_instance)\n self._row_coder = get_row_coder(row_instance)\n self._batch_converter = DataFrameBatchConverterDropIndex(row_type)\n self._seq_coder = coders.IterableCoder(self._row_coder)\n self._data = self._batch_converter.produce_batch([generate_fn() for _ in range(num_elements_per_benchmark)])\n\n def __call__(self):\n if self._use_batch:\n impl = self._row_coder.get_impl()\n columnar = {col: self._data[col].to_numpy() for col in self._data.columns}\n output_stream = coder_impl.create_OutputStream()\n impl.encode_batch_to_stream(columnar, output_stream)\n impl.decode_batch_from_stream(columnar, coder_impl.create_InputStream(output_stream.get()))\n else:\n self._batch_converter.produce_batch(self._seq_coder.decode(self._seq_coder.encode(self._batch_converter.explode_batch(self._data))))\n CoderBenchmark.__name__ = '%s, BatchRowCoder%s' % (generate_fn.__name__, use_batch)\n return CoderBenchmark", "docstring": "Creates a benchmark that encodes and decodes a list of elements.\n\nArgs:\ncoder: coder to use to encode an element.\ngenerate_fn: a callable that generates an element.", "source": "github-repos"} {"code": "def reflect_static_member(cls, name):\n for scope in reversed(cls.scopes):\n try:\n return structured.reflect_static_member(scope, name)\n except (NotImplementedError, KeyError, AttributeError):\n continue\n return protocol.AnyType", "docstring": "Reflect 'name' using ONLY static reflection.\n\nYou most likely want to use ScopeStack.reflect instead.\n\nReturns:\nType of 'name', or protocol.AnyType.", "source": "codesearchnet"} {"code": "def put_key(self, source, rel_path):\n \n k = self._get_boto_key(rel_path)\n\n try:\n k.set_contents_from_file(source)\n except AttributeError:\n if os.path.getsize(source) > 4.8 * 1024 * 1024 * 1024:\n \n k.set_contents_from_filename(source)\n else:\n k.set_contents_from_filename(source)", "docstring": "Copy a file to the repository\n\nArgs:\nsource: Absolute path to the source file, or a file-like object\nrel_path: path relative to the root of the repository", "source": "juraj-google-style"} {"code": "def load_table_from_config(input_dir, config):\n path = pathlib.Path(input_dir).joinpath(config['path'])\n kwargs = config['pd_read_kwargs']\n return pd.read_csv(path, **kwargs)", "docstring": "Load table from table config dict\n\nArgs:\ninput_dir (path-like): directory containing input files\nconfig (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.\n\nReturns:\npd.DataFrame", "source": "codesearchnet"} {"code": "def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):\n super().__init__()\n in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)\n out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]\n self.layers = []\n for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):\n activation = nn.ReLU() if i < num_layers - 1 else nn.Identity()\n layer = Mask2FormerPredictionBlock(in_dim, out_dim, activation=activation)\n self.layers.append(layer)\n self.add_module(str(i), layer)", "docstring": "A classic Multi Layer Perceptron (MLP).\n\nArgs:\ninput_dim (`int`):\nThe input dimensions.\nhidden_dim (`int`):\nThe hidden dimensions.\noutput_dim (`int`):\nThe output dimensions.\nnum_layers (int, *optional*, defaults to 3):\nThe number of layers.", "source": "github-repos"} {"code": "def get_uniquely_named_objects_by_name(object_list):\n \n if not object_list:\n return dict()\n\n result = dict()\n for obj in object_list:\n name = obj.name.value\n if name in result:\n raise GraphQLCompilationError(u'Found duplicate object key: '\n u'{} {}'.format(name, object_list))\n result[name] = obj\n\n return result", "docstring": "Return dict of name -> object pairs from a list of objects with unique names.\n\nArgs:\nobject_list: list of objects, each X of which has a unique name accessible as X.name.value\n\nReturns:\ndict, { X.name.value: X for x in object_list }\nIf the list is empty or None, returns an empty dict.", "source": "juraj-google-style"} {"code": "def itersplit_to_fields(str_,\n fsep=DEFAULT_FSEP,\n revtuple=None,\n fields=[],\n preparse=None):\n \n if preparse:\n str_ = preparse(str_)\n _fields = itersplit(str_, fsep)\n\n if revtuple is not None:\n try:\n values = (t[1] for t in izip_longest(revtuple._fields, _fields))\n return revtuple(*values)\n except Exception as e:\n log.error(revtuple)\n log.error(_fields)\n log.exception(e)\n raise\n\n return tuple(izip_longest(fields, _fields, fillvalue=None))", "docstring": "Itersplit a string into a (named, if specified) tuple.\n\nArgs:\nstr_ (str): string to split\nfsep (str): field separator (delimiter to split by)\nrevtuple (object): namedtuple (or class with a ``._fields`` attr)\n(optional)\nfields (list of str): field names (if revtuple is not specified)\npreparse (callable): function to parse str with before itersplitting\n\nReturns:\ntuple or revtuple: fields as a tuple or revtuple, if specified", "source": "juraj-google-style"} {"code": "def fit(self, X):\n LOGGER.debug('Fitting Gaussian Copula')\n column_names = self.get_column_names(X)\n distribution_class = import_object(self.distribution)\n for column_name in column_names:\n self.distribs[column_name] = distribution_class()\n column = self.get_column(X, column_name)\n self.distribs[column_name].fit(column)\n self.covariance = self._get_covariance(X)\n self.fitted = True", "docstring": "Compute the distribution for each variable and then its covariance matrix.\n\nArgs:\nX(numpy.ndarray or pandas.DataFrame): Data to model.\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def rollaxis(vari, axis, start=0):\n \n if isinstance(vari, Poly):\n core_old = vari.A.copy()\n core_new = {}\n for key in vari.keys:\n core_new[key] = rollaxis(core_old[key], axis, start)\n return Poly(core_new, vari.dim, None, vari.dtype)\n\n return numpy.rollaxis(vari, axis, start)", "docstring": "Roll the specified axis backwards, until it lies in a given position.\n\nArgs:\nvari (chaospy.poly.base.Poly, numpy.ndarray):\nInput array or polynomial.\naxis (int):\nThe axis to roll backwards. The positions of the other axes do not\nchange relative to one another.\nstart (int):\nThe axis is rolled until it lies before thes position.", "source": "juraj-google-style"} {"code": "def get_experiment_from_key(self, experiment_key):\n \n\n experiment = self.experiment_key_map.get(experiment_key)\n\n if experiment:\n return experiment\n\n self.logger.error('Experiment key \"%s\" is not in datafile.' % experiment_key)\n self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR))\n return None", "docstring": "Get experiment for the provided experiment key.\n\nArgs:\nexperiment_key: Experiment key for which experiment is to be determined.\n\nReturns:\nExperiment corresponding to the provided experiment key.", "source": "juraj-google-style"} {"code": "def _do_serialize(struct, fmt, encoding):\n \n res = None\n _check_lib_installed(fmt, 'serialize')\n\n if fmt == 'ini':\n config = configobj.ConfigObj(encoding=encoding)\n for k, v in struct.items():\n config[k] = v\n res = b'\\n'.join(config.write())\n elif fmt in ['json', 'json5']:\n \n \n \n res = (json if fmt == 'json' else json5).dumps(struct,\n indent=2,\n separators=(',', ': '),\n ensure_ascii=False).encode(encoding)\n elif fmt == 'toml':\n if not _is_utf8(encoding):\n raise AnyMarkupError('toml must always be utf-8 encoded according to specification')\n res = toml.dumps(struct).encode(encoding)\n elif fmt == 'xml':\n \n res = xmltodict.unparse(struct, pretty=True, encoding='utf-8').encode('utf-8')\n elif fmt == 'yaml':\n res = yaml.safe_dump(struct, encoding='utf-8', default_flow_style=False)\n else:\n raise \n\n return res", "docstring": "Actually serialize input.\n\nArgs:\nstruct: structure to serialize to\nfmt: format to serialize to\nencoding: encoding to use while serializing\nReturns:\nencoded serialized structure\nRaises:\nvarious sorts of errors raised by libraries while serializing", "source": "juraj-google-style"} {"code": "def convert_to_list(self, item, separator):\n out = None\n if not isinstance(item, list):\n if 'range' in item:\n out = [item]\n else:\n out = item.split(separator)\n for i in range(len(out)):\n out[i] = out[i].replace(',', '')\n else:\n out = [item]\n return out", "docstring": "Converts a string into a list with a separator.\n\nArgs:\nitem: String that needs to be separated into a list by a given separator.\nList item is also accepted but will take no effect.\nseparator: String with which the `item` will be splited.\n\nReturns:\nList that is a splited version of a given input string.\ne.g. Input: `1.0, 2.0, 3.0` with `, ` separator\nOutput: [1.0, 2.0, 3.0]", "source": "github-repos"} {"code": "def build_listen(self, listen_node):\n proxy_name = listen_node.listen_header.proxy_name.text\n service_address_node = listen_node.listen_header.service_address\n config_block_lines = self.__build_config_block(listen_node.config_block)\n (host, port) = ('', '')\n if isinstance(service_address_node, pegnode.ServiceAddress):\n host = service_address_node.host.text\n port = service_address_node.port.text\n else:\n for line in config_block_lines:\n if isinstance(line, config.Bind):\n (host, port) = (line.host, line.port)\n break\n else:\n raise Exception('Not specify host and port in `listen` definition')\n return config.Listen(name=proxy_name, host=host, port=port, config_block=config_block_lines)", "docstring": "parse `listen` sections, and return a config.Listen\n\nArgs:\nlisten_node (TreeNode): Description\n\nReturns:\nconfig.Listen: an object", "source": "codesearchnet"} {"code": "def get_storage_account_keys(access_token, subscription_id, rgname, account_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts/', account_name, '/listKeys', '?api-version=', STORAGE_API])\n return do_post(endpoint, '', access_token)", "docstring": "Get the access keys for the specified storage account.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new storage account.\n\nReturns:\nHTTP response. JSON body of storage account keys.", "source": "codesearchnet"} {"code": "async def bootstrap(self, addrs):\n \n log.debug(\"Attempting to bootstrap node with %i initial contacts\",\n len(addrs))\n cos = list(map(self.bootstrap_node, addrs))\n gathered = await asyncio.gather(*cos)\n nodes = [node for node in gathered if node is not None]\n spider = NodeSpiderCrawl(self.protocol, self.node, nodes,\n self.ksize, self.alpha)\n return await spider.find()", "docstring": "Bootstrap the server by connecting to other known nodes in the network.\n\nArgs:\naddrs: A `list` of (ip, port) `tuple` pairs. Note that only IP\naddresses are acceptable - hostnames will cause an error.", "source": "juraj-google-style"} {"code": "def _invalid_docstring_quote(self, quote, row, col=None):\n self.add_message('invalid-docstring-quote', line=row, args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)), **self.get_offset(col))", "docstring": "Add a message for an invalid docstring quote.\n\nArgs:\nquote: The quote characters that were found.\nrow: The row number the quote characters were found on.\ncol: The column the quote characters were found on.", "source": "codesearchnet"} {"code": "def _v2_get_resized_lm_head_bias(self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int) -> Dict[str, tf.Tensor]:\n new_lm_head_bias = {}\n for attr, weight in old_lm_head_bias.items():\n first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)\n size_diff = new_num_tokens - old_num_tokens\n if old_num_tokens > new_num_tokens:\n new_bias = weight.value()[..., :new_num_tokens]\n else:\n padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]\n new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape))\n new_lm_head_bias[attr] = new_bias\n return new_lm_head_bias", "docstring": "Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.\nReducing the size will remove vectors from the end\n\nArgs:\nold_lm_head_bias (`Dict[str, tf.Variable]`):\nOld lm head bias to be resized.\nnew_num_tokens (`int`):\nNew number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at\nthe end. Reducing the size will remove vectors from the end.\n\nReturn:\n`tf.Tensor`: Values for the resized bias.", "source": "github-repos"} {"code": "def vsiprefix(path):\n \n vpath = path.lower()\n scheme = VSI_SCHEMES.get(urlparse(vpath).scheme, '')\n for ext in VSI_TYPES:\n if ext in vpath:\n filesys = VSI_TYPES[ext]\n break\n else:\n filesys = ''\n if filesys and scheme:\n filesys = filesys[:-1]\n return ''.join((filesys, scheme, path))", "docstring": "Returns a GDAL virtual filesystem prefixed path.\n\nArguments:\npath -- file path as str", "source": "juraj-google-style"} {"code": "def find_yang_file(profile, filename, path):\n \n \n module_dir = os.path.dirname(__file__)\n full_path = os.path.join(module_dir, \"mappings\", profile, path, filename)\n\n if os.path.exists(full_path):\n return full_path\n else:\n msg = \"Couldn't find parsing file: {}\".format(full_path)\n logger.error(msg)\n raise IOError(msg)", "docstring": "Find the necessary file for the given test case.\n\nArgs:\ndevice(napalm device connection): for which device\nfilename(str): file to find\npath(str): where to find it relative to where the module is installed", "source": "juraj-google-style"} {"code": "def clustering_factory(clf):\n required_methods = ['fit', 'fit_predict']\n for method in required_methods:\n if (not hasattr(clf, method)):\n raise TypeError('\"{}\" is not in clf. Did you pass a clusterer instance?'.format(method))\n additional_methods = {'plot_silhouette': plot_silhouette, 'plot_elbow_curve': plot_elbow_curve}\n for (key, fn) in six.iteritems(additional_methods):\n if hasattr(clf, key):\n warnings.warn('\"{}\" method already in clf. Overriding anyway. This may result in unintended behavior.'.format(key))\n setattr(clf, key, types.MethodType(fn, clf))\n return clf", "docstring": "Embeds scikit-plot plotting methods in an sklearn clusterer instance.\n\nArgs:\nclf: Scikit-learn clusterer instance\n\nReturns:\nThe same scikit-learn clusterer instance passed in **clf** with\nembedded scikit-plot instance methods.\n\nRaises:\nValueError: If **clf** does not contain the instance methods necessary\nfor scikit-plot instance methods.", "source": "codesearchnet"} {"code": "def uchroot(*args, **kwargs):\n \n uchroot_cmd = with_mounts(*args, uchroot_cmd_fn=no_llvm, **kwargs)\n return uchroot_cmd[\"--\"]", "docstring": "Return a customizable uchroot command.\n\nArgs:\nargs: List of additional arguments for uchroot (typical: mounts)\nReturn:\nchroot_cmd", "source": "juraj-google-style"} {"code": "def extrapolate_points(points, n_points):\n points = points[:n_points]\n lat = []\n lon = []\n last = None\n for point in points:\n if (last is not None):\n lat.append((last.lat - point.lat))\n lon.append((last.lon - point.lon))\n last = point\n dts = np.mean([p.dt for p in points])\n lons = np.mean(lon)\n lats = np.mean(lat)\n gen_sample = []\n last = points[0]\n for _ in range(n_points):\n point = Point((last.lat + lats), (last.lon + lons), None)\n point.dt = dts\n gen_sample.append(point)\n last = point\n return gen_sample", "docstring": "Extrapolate a number of points, based on the first ones\n\nArgs:\npoints (:obj:`list` of :obj:`Point`)\nn_points (int): number of points to extrapolate\nReturns:\n:obj:`list` of :obj:`Point`", "source": "codesearchnet"} {"code": "class RepeatVector(Layer):\n\n def __init__(self, n, **kwargs):\n super().__init__(**kwargs)\n self.n = n\n if not isinstance(n, int):\n raise TypeError(f'Expected an integer value for `n`, got {type(n)}.')\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.n, input_shape[1])\n\n def call(self, inputs):\n input_shape = ops.shape(inputs)\n reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1]))\n return ops.repeat(reshaped, self.n, axis=1)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Repeats the input n times.\n\nExample:\n\n>>> x = keras.Input(shape=(32,))\n>>> y = keras.layers.RepeatVector(3)(x)\n>>> y.shape\n(None, 3, 32)\n\nArgs:\nn: Integer, repetition factor.\n\nInput shape:\n2D tensor with shape `(batch_size, features)`.\n\nOutput shape:\n3D tensor with shape `(batch_size, n, features)`.", "source": "github-repos"} {"code": "def _get_covariance(self, X):\n \n result = pd.DataFrame(index=range(len(X)))\n column_names = self.get_column_names(X)\n for column_name in column_names:\n column = self.get_column(X, column_name)\n distrib = self.distribs[column_name]\n\n \n cdf = distrib.cumulative_distribution(column)\n\n if distrib.constant_value is not None:\n \n cdf = np.ones(column.shape) - EPSILON\n\n \n result = self.set_column(result, column_name, stats.norm.ppf(cdf))\n\n \n result = result[(result != np.inf).all(axis=1)]\n return pd.DataFrame(data=result).cov().values", "docstring": "Compute covariance matrix with transformed data.\n\nArgs:\nX: `numpy.ndarray` or `pandas.DataFrame`.\n\nReturns:\nnp.ndarray", "source": "juraj-google-style"} {"code": "def attach(self, **kwargs):\n return self.client.api.attach(self.id, **kwargs)", "docstring": "Attach to this container.\n\n:py:meth:`logs` is a wrapper around this method, which you can\nuse instead if you want to fetch/stream container output without first\nretrieving the entire backlog.\n\nArgs:\nstdout (bool): Include stdout.\nstderr (bool): Include stderr.\nstream (bool): Return container output progressively as an iterator\nof strings, rather than a single string.\nlogs (bool): Include the container's previous output.\n\nReturns:\nBy default, the container's output as a single string.\n\nIf ``stream=True``, an iterator of output strings.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"} {"code": "def send(self, request):\n \n if self.call is None:\n raise ValueError(\"Can not send() on an RPC that has never been open()ed.\")\n\n \n \n if self.call.is_active():\n self._request_queue.put(request)\n else:\n \n next(self.call)", "docstring": "Queue a message to be sent on the stream.\n\nSend is non-blocking.\n\nIf the underlying RPC has been closed, this will raise.\n\nArgs:\nrequest (protobuf.Message): The request to send.", "source": "juraj-google-style"} {"code": "def reverse_transform(self, col):\n \n\n output = pd.DataFrame()\n output[self.col_name] = self.get_category(col[self.col_name])\n\n return output", "docstring": "Converts data back into original format.\n\nArgs:\ncol(pandas.DataFrame): Data to transform.\n\nReturns:\npandas.DataFrame", "source": "juraj-google-style"} {"code": "def CreateSmartCampaign(client, budget_id, merchant_id):\n \n campaign_service = client.GetService('CampaignService', version='v201809')\n \n campaign = {\n 'name': 'Shopping campaign \n \n 'advertisingChannelType': 'SHOPPING',\n \n \n 'advertisingChannelSubType': 'SHOPPING_GOAL_OPTIMIZED_ADS',\n \n \n \n 'status': 'PAUSED',\n \n 'budget': {'budgetId': budget_id},\n \n 'biddingStrategyConfiguration': {\n 'biddingStrategyType': 'MAXIMIZE_CONVERSION_VALUE'\n },\n 'settings': [{\n \n 'xsi_type': 'ShoppingSetting',\n 'salesCountry': 'US',\n 'merchantId': merchant_id\n }]\n }\n\n campaign_operations = [{\n 'operator': 'ADD',\n 'operand': campaign\n }]\n\n result = campaign_service.mutate(campaign_operations)['value'][0]\n\n print ('Smart Shopping campaign with name \"%s\" and ID \"%s\" was added.'\n % (result['name'], result['id']))\n\n return result['id']", "docstring": "Adds a new Smart Shopping campaign.\n\nArgs:\nclient: an AdWordsClient instance.\nbudget_id: the str ID of the budget to be associated with the Shopping\ncampaign.\nmerchant_id: the str ID of the merchant account to be associated with the\nShopping campaign.\nReturns:\nA campaign ID.", "source": "juraj-google-style"} {"code": "def __init__(self, project=None, inspection_template_name=None, inspection_config=None, timeout=None):\n self.timeout = timeout\n self.config = {}\n self.project = project\n if inspection_config is None and inspection_template_name is None:\n raise ValueError('inspection_template_name or inspection_config must be specified')\n if inspection_template_name is not None:\n self.config['inspect_template_name'] = inspection_template_name\n if inspection_config is not None:\n self.config['inspect_config'] = inspection_config", "docstring": "Initializes a :class:`InspectForDetails` transform.\n\nArgs:\nproject: Optional. GCP project name in which inspection will be performed\ninspection_template_name (str): This or `inspection_config` required.\nName of inspection template to be used\nto detect sensitive data in text.\ninspection_config\n(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):\nConfiguration for the inspector used to detect sensitive data in text.\nIf both template name and config are supplied,\nconfig takes precedence.\ntimeout (float): Optional. The amount of time, in seconds, to wait for\nthe request to complete.", "source": "github-repos"} {"code": "def _write_module_descriptor_file(handle, module_dir):\n readme = _module_descriptor_file(module_dir)\n readme_content = ('Module: %s\\nDownload Time: %s\\nDownloader Hostname: %s (PID:%d)' % (handle, str(datetime.datetime.today()), socket.gethostname(), os.getpid()))\n tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)", "docstring": "Writes a descriptor file about the directory containing a module.\n\nArgs:\nhandle: Module name/handle.\nmodule_dir: Directory where a module was downloaded.", "source": "codesearchnet"} {"code": "async def _send(self, request_bytes, body_bytes, h11_connection):\n (await self.sock.send_all(h11_connection.send(request_bytes)))\n if (body_bytes is not None):\n (await self.sock.send_all(h11_connection.send(body_bytes)))\n (await self.sock.send_all(h11_connection.send(h11.EndOfMessage())))", "docstring": "Takes a package and body, combines then, then shoots 'em off in to\nthe ether.\n\nArgs:\npackage (list of str): The header package.\nbody (str): The str representation of the body.", "source": "codesearchnet"} {"code": "def setPollingRate(self, iPollingRate):\n \n print '%s call setPollingRate' % self.port\n print iPollingRate\n try:\n cmd = WPANCTL_CMD + 'setprop NCP:SleepyPollInterval %s' % str(iPollingRate*1000)\n print cmd\n return self.__sendCommand(cmd)[0] != 'Fail'\n except Exception, e:\n ModuleHelper.WriteIntoDebugLogger('setPollingRate() Error: ' + str(e))", "docstring": "set data polling rate for sleepy end device\n\nArgs:\niPollingRate: data poll period of sleepy end device\n\nReturns:\nTrue: successful to set the data polling rate for sleepy end device\nFalse: fail to set the data polling rate for sleepy end device", "source": "juraj-google-style"} {"code": "def helper_delete(access_token, oid, path):\n \n full_path = ''.join([path, \"('\", oid, \"')\"])\n full_path_encoded = urllib.parse.quote(full_path, safe='')\n endpoint = ''.join([ams_rest_endpoint, full_path_encoded])\n return do_ams_delete(endpoint, full_path_encoded, access_token)", "docstring": "Helper Function to delete a Object at a URL path.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\noid (str): An OID.\npath (str): A URL Path.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"} {"code": "def platform(self):\n uname_sys = self._exec_command('uname -s')\n sysinfo = self._exec_command('sysinfo')\n if (uname_sys is not None):\n if (uname_sys == b'FreeBSD'):\n return 'freebsd'\n elif (uname_sys == b'Darwin'):\n return 'darwin'\n elif (uname_sys == b'Linux'):\n return 'linux'\n elif uname_sys.startswith(b'CYGWIN'):\n return 'cygwin'\n elif ((sysinfo is not None) and sysinfo):\n return 'win32'\n return 'unknown'", "docstring": "The platform the server is running on.\n\nReturns:\nstr: the platform of the remote server, as in `sys.platform`.", "source": "codesearchnet"} {"code": "def render(self, width: int, height: int) -> List[str]:\n if ((width == 0) or (height == 0)):\n return ([''] * height)\n out_chars = [([' '] * width) for _ in range(height)]\n mid_x = int(((width - 1) * self.horizontal_alignment))\n mid_y = ((height - 1) \n if self.left:\n out_chars[mid_y][:(mid_x + 1)] = (self.left * (mid_x + 1))\n if self.right:\n out_chars[mid_y][mid_x:] = (self.right * (width - mid_x))\n if self.top:\n for y in range((mid_y + 1)):\n out_chars[y][mid_x] = self.top\n if self.bottom:\n for y in range(mid_y, height):\n out_chars[y][mid_x] = self.bottom\n mid = (self.content or self.center)\n if (self.content or self.center):\n content_lines = mid.split('\\n')\n y = (mid_y - ((len(content_lines) - 1) \n for (dy, content_line) in enumerate(content_lines):\n s = int(((len(content_line) - 1) * self.horizontal_alignment))\n x = (mid_x - s)\n for (dx, c) in enumerate(content_line):\n out_chars[(y + dy)][(x + dx)] = c\n return [''.join(line) for line in out_chars]", "docstring": "Returns a list of text lines representing the block's contents.\n\nArgs:\nwidth: The width of the output text. Must be at least as large as\nthe block's minimum width.\nheight: The height of the output text. Must be at least as large as\nthe block's minimum height.\n\nReturns:\nText pre-split into lines.", "source": "codesearchnet"} {"code": "def acquire(self, host: str, port: int, use_ssl: bool=False, host_key: Optional[Any]=None) -> Union[(Connection, SSLConnection)]:\n assert isinstance(port, int), 'Expect int. Got {}'.format(type(port))\n assert (not self._closed)\n (yield from self._process_no_wait_releases())\n if use_ssl:\n connection_factory = functools.partial(self._ssl_connection_factory, hostname=host)\n else:\n connection_factory = functools.partial(self._connection_factory, hostname=host)\n connection_factory = functools.partial(HappyEyeballsConnection, (host, port), connection_factory, self._resolver, self._happy_eyeballs_table, is_ssl=use_ssl)\n key = (host_key or (host, port, use_ssl))\n with (yield from self._host_pools_lock):\n if (key not in self._host_pools):\n host_pool = self._host_pools[key] = HostPool(connection_factory, max_connections=self._max_host_count)\n self._host_pool_waiters[key] = 1\n else:\n host_pool = self._host_pools[key]\n self._host_pool_waiters[key] += 1\n _logger.debug('Check out %s', key)\n connection = (yield from host_pool.acquire())\n connection.key = key\n with (yield from self._host_pools_lock):\n self._host_pool_waiters[key] -= 1\n return connection", "docstring": "Return an available connection.\n\nArgs:\nhost: A hostname or IP address.\nport: Port number.\nuse_ssl: Whether to return a SSL connection.\nhost_key: If provided, it overrides the key used for per-host\nconnection pooling. This is useful for proxies for example.\n\nCoroutine.", "source": "codesearchnet"} {"code": "def weighted_choice(item_probabilities):\n \n probability_sum = sum(x[1] for x in item_probabilities)\n assert probability_sum > 0\n random_value = random.random() * probability_sum\n summed_probability = 0\n for item, value in item_probabilities:\n summed_probability += value\n if summed_probability > random_value:\n return item", "docstring": "Randomly choses an item according to defined weights\nArgs:\nitem_probabilities: list of (item, probability)-tuples\nReturns:\nrandom item according to the given weights", "source": "juraj-google-style"} {"code": "def is_registered(self, cuuid, host):\n if ((cuuid in self.registry) and (self.registry[cuuid]['host'] == host)):\n return True\n else:\n return False", "docstring": "This function will check to see if a given host with client uuid is\ncurrently registered.\n\nArgs:\ncuuid (string): The client uuid that wishes to register.\nhost (tuple): The (address, port) tuple of the client that is\nregistering.\n\nReturns:\nWill return True if the client is registered and will return False if\nit is not.", "source": "codesearchnet"} {"code": "def _sliding_cache_update(k_cache: torch.Tensor, v_cache: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, cache_position: torch.LongTensor, max_cache_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n if cache_position.shape[0] > max_cache_len:\n new_k = key_states[:, :, -max_cache_len:, :]\n new_v = value_states[:, :, -max_cache_len:, :]\n k_cache.copy_(new_k)\n v_cache.copy_(new_v)\n return (key_states, value_states)\n slicing = torch.arange(max_cache_len, device=value_states.device)\n current_seq_len = cache_position[-1] + 1\n to_shift = current_seq_len > max_cache_len\n indices = (slicing + to_shift.sum()) % max_cache_len\n k_out_shifted = k_cache[:, :, indices]\n v_out_shifted = v_cache[:, :, indices]\n update_position = cache_position.clamp(min=0, max=max_cache_len - 1)\n try:\n k_out_updated = k_out_shifted.index_copy(2, update_position, key_states)\n v_out_updated = v_out_shifted.index_copy(2, update_position, value_states)\n except NotImplementedError:\n k_out_updated = k_out_shifted.clone()\n v_out_updated = v_out_shifted.clone()\n k_out_updated[:, :, update_position] = key_states\n v_out_updated[:, :, update_position] = value_states\n k_cache.copy_(k_out_updated)\n v_cache.copy_(v_out_updated)\n return (k_out_updated, v_out_updated)", "docstring": "Updates the sliding window cache tensors, returning the potentially modified tensors.\n\nArgs:\nk_cache (`torch.Tensor`): The key cache tensor to update.\nv_cache (`torch.Tensor`): The value cache tensor to update.\nkey_states (`torch.Tensor`): The new key states to add.\nvalue_states (`torch.Tensor`): The new value states to add.\ncache_position (`torch.LongTensor`): The position indices where the new states should be inserted.\nmax_cache_len (`int`): The maximum length of the sliding window cache.\n\nReturns:\nTuple[`torch.Tensor`, `torch.Tensor`]: The key and value tensors representing the cache state after the update.\nFor prefill > window, these are the full input states.\nOtherwise, they are the updated cache tensors.", "source": "github-repos"} {"code": "def ReadFromJson(path: str, *, orient: str='records', lines: bool=True, dtype: Union[bool, Dict[str, Any]]=False, **kwargs):\n from apache_beam.dataframe.io import ReadViaPandas\n return 'ReadFromJson' >> ReadViaPandas('json', path, orient=orient, lines=lines, dtype=dtype, **kwargs)", "docstring": "A PTransform for reading json values from files into a PCollection.\n\nArgs:\npath (str): The file path to read from. The path can contain glob\ncharacters such as ``*`` and ``?``.\norient (str): Format of the json elements in the file.\nDefault to 'records', meaning the file is expected to contain a list\nof json objects like `{field1: value1, field2: value2, ...}`.\nlines (bool): Whether each line should be considered a separate record,\nas opposed to the entire file being a valid JSON object or list.\nDefaults to True (unlike Pandas).\ndtype (bool): If True, infer dtypes; if a dict of column to dtype,\nthen use those; if False, then don’t infer dtypes at all.\nDefaults to False (unlike Pandas).\n**kwargs: Extra arguments passed to `pandas.read_json` (see below).", "source": "github-repos"} {"code": "async def _get_statuses(self, client: GRPCClient, examples: List[Example], concurrency: int=10):\n tasks = []\n try:\n concurrency = int(os.environ['BEAM_CONCURRENCY'])\n logging.info('override default concurrency: %d', concurrency)\n except (KeyError, ValueError):\n pass\n semaphore = asyncio.Semaphore(concurrency)\n\n async def _semaphored_task(example):\n await semaphore.acquire()\n try:\n await update_example_status(example, client)\n await self._populate_fields(example, client)\n finally:\n semaphore.release()\n for example in examples:\n if example.tag.never_run:\n logging.info('skipping non runnable example %s', example.filepath)\n else:\n tasks.append(_semaphored_task(example))\n await tqdm.gather(*tasks)", "docstring": "Receive status and update example.status and example.pipeline_id for\neach example\n\nArgs:\nexamples: beam examples for processing and updating statuses and\npipeline_id values.", "source": "github-repos"} {"code": "def close_holes(script, hole_max_edge=30, selected=False, sel_new_face=True, self_intersection=True):\n filter_xml = ''.join([' \\n', ' \\n', ' \\n', ' \\n', ' \\n', ' \\n'])\n util.write_filter(script, filter_xml)\n return None", "docstring": "Close holes smaller than a given threshold\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nhole_max_edge (int): The size is expressed as number of edges composing\nthe hole boundary.\nselected (bool): Only the holes with at least one of the boundary faces\nselected are closed.\nsel_new_face (bool): After closing a hole the faces that have been\ncreated are left selected. Any previous selection is lost. Useful\nfor example for smoothing or subdividing the newly created holes.\nself_intersection (bool): When closing an holes it tries to prevent the\ncreation of faces that intersect faces adjacent to the boundary of\nthe hole. It is an heuristic, non intersecting hole filling can be\nNP-complete.\n\nLayer stack:\nNo impacts\n\nMeshLab versions:\n2016.12\n1.3.4BETA", "source": "codesearchnet"} {"code": "def make_author(self, full_name, affiliations=(), roles=(), raw_affiliations=(), source=None, ids=(), emails=(), alternative_names=()):\n builder = SignatureBuilder()\n builder.set_full_name(full_name)\n for affiliation in affiliations:\n builder.add_affiliation(affiliation)\n for role in roles:\n builder.add_inspire_role(role)\n for raw_affiliation in raw_affiliations:\n builder.add_raw_affiliation(raw_affiliation, (source or self.source))\n for (id_schema, id_value) in ids:\n if (id_schema and id_value):\n builder.set_uid(id_value, schema=id_schema)\n for email in emails:\n builder.add_email(email)\n for alternative_name in alternative_names:\n builder.add_alternative_name(alternative_name)\n return builder.obj", "docstring": "Make a subrecord representing an author.\n\nArgs:\nfull_name(str): full name of the author. If not yet in standard\nInspire form, it will be normalized.\naffiliations(List[str]): Inspire normalized affiliations of the\nauthor.\nroles(List[str]): Inspire roles of the author.\nraw_affiliations(List[str]): raw affiliation strings of the author.\nsource(str): source for the affiliations when\n``affiliations_normalized`` is ``False``.\nids(List[Tuple[str,str]]): list of ids of the author, whose\nelements are of the form ``(schema, value)``.\nemails(List[str]): email addresses of the author.\nalternative_names(List[str]): alternative names of the author.\n\nReturns:\ndict: a schema-compliant subrecord.", "source": "codesearchnet"} {"code": "async def pipe_to_log(pipe, filehandles=(), level=logging.INFO):\n while True:\n line = (await pipe.readline())\n if line:\n line = to_unicode(line)\n log.log(level, line.rstrip())\n for filehandle in filehandles:\n print(line, file=filehandle, end='')\n else:\n break", "docstring": "Log from a subprocess PIPE.\n\nArgs:\npipe (filehandle): subprocess process STDOUT or STDERR\nfilehandles (list of filehandles, optional): the filehandle(s) to write\nto. If empty, don't write to a separate file. Defaults to ().\nlevel (int, optional): the level to log to. Defaults to ``logging.INFO``.", "source": "codesearchnet"} {"code": "def save_image(tensor, filename, nrow=8, padding=2, normalize=False, range=None, scale_each=False, pad_value=0):\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)", "docstring": "Save a given Tensor into an image file.\n\nArgs:\ntensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\nsaves the tensor as a grid of images by calling ``make_grid``.\n**kwargs: Other arguments are documented in ``make_grid``.", "source": "codesearchnet"} {"code": "def SetSourceInformation(\n self, source_path, source_type, artifact_filters=None, filter_file=None):\n \n self._artifact_filters = artifact_filters\n self._filter_file = filter_file\n self._source_path = source_path\n self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')", "docstring": "Sets the source information.\n\nArgs:\nsource_path (str): path of the source.\nsource_type (str): source type.\nartifact_filters (Optional[list[str]]): names of artifact definitions to\nuse as filters.\nfilter_file (Optional[str]): filter file.", "source": "juraj-google-style"} {"code": "def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs):\n \n return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs)", "docstring": "Create the Email TI object.\n\nArgs:\nowner:\nto:\nfrom_addr:\nname:\nsubject:\nheader:\nbody:\n**kwargs:\n\nReturn:", "source": "juraj-google-style"} {"code": "def newPacker(mode=PackingMode.Offline, \n bin_algo=PackingBin.BBF, \n pack_algo=MaxRectsBssf,\n sort_algo=SORT_AREA, \n rotation=True):\n \n packer_class = None\n\n \n if mode == PackingMode.Online:\n sort_algo=None\n if bin_algo == PackingBin.BNF:\n packer_class = PackerOnlineBNF\n elif bin_algo == PackingBin.BFF:\n packer_class = PackerOnlineBFF\n elif bin_algo == PackingBin.BBF:\n packer_class = PackerOnlineBBF\n else:\n raise AttributeError(\"Unsupported bin selection heuristic\")\n\n \n elif mode == PackingMode.Offline:\n if bin_algo == PackingBin.BNF:\n packer_class = PackerBNF\n elif bin_algo == PackingBin.BFF:\n packer_class = PackerBFF\n elif bin_algo == PackingBin.BBF:\n packer_class = PackerBBF\n elif bin_algo == PackingBin.Global:\n packer_class = PackerGlobal\n sort_algo=None\n else:\n raise AttributeError(\"Unsupported bin selection heuristic\")\n\n else:\n raise AttributeError(\"Unknown packing mode.\")\n\n if sort_algo:\n return packer_class(pack_algo=pack_algo, sort_algo=sort_algo, \n rotation=rotation)\n else:\n return packer_class(pack_algo=pack_algo, rotation=rotation)", "docstring": "Packer factory helper function\n\nArguments:\nmode (PackingMode): Packing mode\nOnline: Rectangles are packed as soon are they are added\nOffline: Rectangles aren't packed untils pack() is called\nbin_algo (PackingBin): Bin selection heuristic\npack_algo (PackingAlgorithm): Algorithm used\nrotation (boolean): Enable or disable rectangle rotation.\n\nReturns:\nPacker: Initialized packer instance.", "source": "juraj-google-style"} {"code": "def apply_transformation(self, structure):\n \n new_structure = structure.copy()\n for prop in self.site_properties.keys():\n new_structure.add_site_property(prop, self.site_properties[prop])\n return new_structure", "docstring": "apply the transformation\n\nArgs:\nstructure (Structure): structure to add site properties to", "source": "juraj-google-style"} {"code": "def _log_to_stderr(self, record):\n \n \n \n old_stream = self.stream\n self.stream = sys.stderr\n try:\n super(PythonHandler, self).emit(record)\n finally:\n self.stream = old_stream", "docstring": "Emits the record to stderr.\n\nThis temporarily sets the handler stream to stderr, calls\nStreamHandler.emit, then reverts the stream back.\n\nArgs:\nrecord: logging.LogRecord, the record to log.", "source": "juraj-google-style"} {"code": "def merge(self, other):\n if other.number_of_shards is not None:\n self.set_number_of_shards(other.number_of_shards)\n if other.shard_dimension is not None:\n self.set_shard_dimension(other.shard_dimension)", "docstring": "Merges the policy of another policy into the current policy.\n\nArgs:\nother: The policy to merge into this one.\n\nRaises:\nValueError: If this policy has been frozen and the merge conflicts with\nthe frozen policy.", "source": "github-repos"} {"code": "def to_dict(self) -> dict[str, Any]:\n output = copy.deepcopy(self.__dict__)\n sig = inspect.signature(self.__init__)\n attrs_to_save = sig.parameters\n attrs_to_save = [x for x in attrs_to_save if x not in self.__class__.attributes]\n attrs_to_save += ['auto_map']\n output = {k: v for k, v in output.items() if k in attrs_to_save}\n output['processor_class'] = self.__class__.__name__\n if 'tokenizer' in output:\n del output['tokenizer']\n if 'image_processor' in output:\n del output['image_processor']\n if 'video_processor' in output:\n del output['video_processor']\n if 'feature_extractor' in output:\n del output['feature_extractor']\n if 'chat_template' in output:\n del output['chat_template']\n output = {k: v for k, v in output.items() if not (isinstance(v, PushToHubMixin) or v.__class__.__name__ == 'BeamSearchDecoderCTC')}\n return output", "docstring": "Serializes this instance to a Python dictionary.\n\nReturns:\n`Dict[str, Any]`: Dictionary of all the attributes that make up this processor instance.", "source": "github-repos"} {"code": "def __verify_ready(self, creating=False):\n \n\n if len(self._value_ranges) == 0:\n self._logger.log(\n 'crit',\n 'Attribute value_ranges must have at least one value'\n )\n raise RuntimeWarning(\n 'Attribute value_ranges must have at least one value'\n )\n if len(self._employers) == 0 and creating is False:\n self._logger.log('crit', 'Need to create employers')\n raise RuntimeWarning('Need to create employers')", "docstring": "Some cleanup, ensures that everything is set up properly to avoid\nrandom errors during execution\n\nArgs:\ncreating (bool): True if currently creating employer bees, False\nfor checking all other operations", "source": "juraj-google-style"} {"code": "def add_listener(self, listener_type, callback):\n \n self.listener_type= listener_type\n if listener_type == 'objectHovered':\n self.listener_callback_source_hover= callback\n elif listener_type == 'objectClicked':\n self.listener_callback_source_click= callback\n elif listener_type == 'click':\n self.listener_callback_click= callback\n elif listener_type == 'select':\n self.listener_callback_select= callback\n\n self.listener_flag= not self.listener_flag", "docstring": "add a listener to the widget\nArgs:\nlistener_type: string that can either be 'objectHovered' or 'objClicked'\ncallback: python function", "source": "juraj-google-style"} {"code": "def supported_languages(self, task=None):\n \n if task:\n collection = self.get_collection(task=task)\n return [isoLangs[x.id.split('.')[1]][\"name\"]\n for x in collection.packages]\n else:\n return [x.name.split()[0] for x in self.collections()\n if Downloader.LANG_PREFIX in x.id]", "docstring": "Languages that are covered by a specific task.\n\nArgs:\ntask (string): Task name.", "source": "juraj-google-style"} {"code": "def serialize(metric):\n return serialize_keras_object(metric)", "docstring": "Serializes metric function or `Metric` instance.\n\nArgs:\nmetric: A Keras `Metric` instance or a metric function.\n\nReturns:\nMetric configuration dictionary.", "source": "github-repos"} {"code": "def __init__(self, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog'):\n self.project = project\n self.retry = retry\n self.timeout = timeout\n self.metadata = metadata\n self.catalog_name = catalog_name", "docstring": "Initializes a :class:`CreateCatalogItem` transform.\n\nArgs:\nproject (str): Optional. GCP project name in which the catalog\ndata will be imported.\nretry: Optional. Designation of what\nerrors, if any, should be retried.\ntimeout (float): Optional. The amount of time, in seconds, to wait\nfor the request to complete.\nmetadata: Optional. Strings which\nshould be sent along with the request as metadata.\ncatalog_name (str): Optional. Name of the catalog.\nDefault: 'default_catalog'", "source": "github-repos"} {"code": "def get_params(img, scale, ratio):\n \n area = img.size[0] * img.size[1]\n\n for attempt in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n \n in_ratio = img.size[0] / img.size[1]\n if (in_ratio < min(ratio)):\n w = img.size[0]\n h = w / min(ratio)\n elif (in_ratio > max(ratio)):\n h = img.size[1]\n w = h * max(ratio)\n else: \n w = img.size[0]\n h = img.size[1]\n i = (img.size[1] - h) \n j = (img.size[0] - w) \n return i, j, h, w", "docstring": "Get parameters for ``crop`` for a random sized crop.\n\nArgs:\nimg (PIL Image): Image to be cropped.\nscale (tuple): range of size of the origin size cropped\nratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\nReturns:\ntuple: params (i, j, h, w) to be passed to ``crop`` for a random\nsized crop.", "source": "juraj-google-style"} {"code": "def _finish_disconnection_action(self, action):\n \n\n success = action.data['success']\n conn_key = action.data['id']\n\n if self._get_connection_state(conn_key) != self.Disconnecting:\n self._logger.error(\n \"Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key={}\"\n .format(str(conn_key))\n )\n return\n\n \n data = self._get_connection(conn_key)\n connection_id = data['connection_id']\n internal_id = data['internal_id']\n\n last_action = data['action']\n callback = last_action.data['callback']\n\n if success is False:\n failure_reason = action.data['failure_reason']\n if failure_reason is None:\n failure_reason = \"No reason was given\"\n\n data['state'] = self.Idle\n data['microstate'] = None\n del data['action']\n callback(connection_id, self.id, False, failure_reason)\n else:\n del self._connections[connection_id]\n del self._int_connections[internal_id]\n callback(connection_id, self.id, True, None)", "docstring": "Finish a disconnection attempt\n\nThere are two possible outcomes:\n- if we were successful at disconnecting, we transition to disconnected\n- if we failed at disconnecting, we transition back to idle\n\nArgs:\naction (ConnectionAction): the action object describing what we are\ndisconnecting from and what the result of the operation was", "source": "juraj-google-style"} {"code": "def __get_unused_context(self, parse_result, context):\n tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])\n result_context = [c for c in context if (c['key'] not in tags_keys)]\n return result_context", "docstring": "Used to get unused context from context. Any keys not in\nparse_result\n\nArgs:\nparse_results(list): parsed results used to identify what keys\nin the context are used.\ncontext(list): this is the context used to match with parsed results\nkeys missing in the parsed results are the unused context\n\nReturns:\nlist: A list of the unused context results.", "source": "codesearchnet"} {"code": "def control_flow_op(op):\n return control_flow_util.IsSwitch(op) or control_flow_util.IsMerge(op)", "docstring": "Returns true if op is one of the special ops of in a while loop.\n\nArgs:\nop: A tf.Operation.\n\nReturns:\nTrue if the given op is one of [Switch, Merge, Enter, Exit,\nNextIteration, LoopCond], which are all building blocks for TF while\nloops.", "source": "github-repos"} {"code": "def remove_sonos_playlist(self, sonos_playlist):\n object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)\n return self.contentDirectory.DestroyObject([('ObjectID', object_id)])", "docstring": "Remove a Sonos playlist.\n\nArgs:\nsonos_playlist (DidlPlaylistContainer): Sonos playlist to remove\nor the item_id (str).\n\nReturns:\nbool: True if succesful, False otherwise\n\nRaises:\nSoCoUPnPException: If sonos_playlist does not point to a valid\nobject.", "source": "codesearchnet"} {"code": "def monoclinic(a: float, b: float, c: float, beta: float):\n return Lattice.from_parameters(a, b, c, 90, beta, 90)", "docstring": "Convenience constructor for a monoclinic lattice.\n\nArgs:\na (float): *a* lattice parameter of the monoclinc cell.\nb (float): *b* lattice parameter of the monoclinc cell.\nc (float): *c* lattice parameter of the monoclinc cell.\nbeta (float): *beta* angle between lattice vectors b and c in\ndegrees.\n\nReturns:\nMonoclinic lattice of dimensions a x b x c with non right-angle\nbeta between lattice vectors a and c.", "source": "codesearchnet"} {"code": "def are_collections_aligned(data_collections, raise_exception=True):\n \n if len(data_collections) > 1:\n first_coll = data_collections[0]\n for coll in data_collections[1:]:\n if not first_coll.is_collection_aligned(coll):\n if raise_exception is True:\n error_msg = '{} Data Collection is not aligned with '\\\n '{} Data Collection.'.format(\n first_coll.header.data_type, coll.header.data_type)\n raise ValueError(error_msg)\n return False\n return True", "docstring": "Test if a series of Data Collections are aligned with one another.\n\nAligned Data Collections are of the same Data Collection class, have the\nsame number of values and have matching datetimes.\n\nArgs:\ndata_collections: A list of Data Collections for which you want to\ntest if they are al aligned with one another.\n\nReturn:\nTrue if collections are aligned, False if not aligned", "source": "juraj-google-style"} {"code": "def __init__(self, xid=None, port=None, queues=None):\n \n super().__init__(xid)\n self.port = port\n self.queues = [] if queues is None else queues", "docstring": "Create a QueueGetConfigReply with the optional parameters below.\n\nArgs:\nxid (int): xid of OpenFlow header.\nport (:class:`~pyof.v0x04.common.port.PortNo`):\nTarget port for the query.\nqueue (:class:`~pyof.v0x04.common.queue.ListOfQueues`):\nList of configured queues.", "source": "juraj-google-style"} {"code": "def WaitUntilDone(self, timeout=None):\n \n\n f = utils.Poll(\n generator=self.Get,\n condition=lambda f: f.data.state != f.data.RUNNING,\n timeout=timeout)\n if f.data.state != f.data.TERMINATED:\n raise errors.FlowFailedError(\n \"Flow %s (%s) failed: %s\" %\n (self.flow_id, self.client_id, f.data.context.current_state))\n return f", "docstring": "Wait until the flow completes.\n\nArgs:\ntimeout: timeout in seconds. None means default timeout (1 hour). 0 means\nno timeout (wait forever).\n\nReturns:\nFresh flow object.\nRaises:\nPollTimeoutError: if timeout is reached.\nFlowFailedError: if the flow is not successful.", "source": "juraj-google-style"} {"code": "def im_open(self, *, user: str, **kwargs) -> SlackResponse:\n kwargs.update({'user': user})\n return self.api_call('im.open', json=kwargs)", "docstring": "Opens a direct message channel.\n\nArgs:\nuser (str): The user id to open a DM with. e.g. 'W1234567890'", "source": "codesearchnet"} {"code": "def __init__(self, rfile, content_length):\n \n self.rfile = rfile\n self.remaining = content_length", "docstring": "Initialize KnownLengthRFile instance.\n\nArgs:\nrfile (file): file of a known size\ncontent_length (int): length of the file being read", "source": "juraj-google-style"} {"code": "def get_url_reports(self, resources):\n api_name = 'virustotal-url-reports'\n (all_responses, resources) = self._bulk_cache_lookup(api_name, resources)\n resource_chunks = self._prepare_resource_chunks(resources, '\\n')\n response_chunks = self._request_reports('resource', resource_chunks, 'url/report')\n self._extract_response_chunks(all_responses, response_chunks, api_name)\n return all_responses", "docstring": "Retrieves a scan report on a given URL.\n\nArgs:\nresources: list of URLs.\nReturns:\nA dict with the URL as key and the VT report as value.", "source": "codesearchnet"} {"code": "def loadJsonValueFromFile(inputFilePath):\n with open(inputFilePath) as fileObj:\n value = json.load(fileObj)\n return value", "docstring": "Loads a json value from a file and converts it to the corresponding python\nobject.\n\ninputFilePath:\nPath of the json file;\n\nReturns:\npython value that represents the loaded json value", "source": "codesearchnet"} {"code": "def load_csv(path):\n with open(path) as f:\n line = f.readline().strip()\n X = np.loadtxt(path, delimiter=',', skiprows=(0 if is_number(line.split(',')[0]) else 1))\n y = np.array(X[(:, 0)]).flatten()\n X = X[(:, 1:)]\n return (X, y)", "docstring": "Load data from a CSV file.\n\nArgs:\npath (str): A path to the CSV format file containing data.\ndense (boolean): An optional variable indicating if the return matrix\nshould be dense. By default, it is false.\n\nReturns:\nData matrix X and target vector y", "source": "codesearchnet"} {"code": "def external_ids(self, **kwargs):\n \n path = self._get_series_id_season_number_episode_number_path(\n 'external_ids')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Get the external ids for a TV episode by combination of a season and\nepisode number.\n\nArgs:\nlanguage: (optional) ISO 639 code.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"} {"code": "def fit(self, X, y, **kwargs):\n X, y = _validate_data(self, X, y)\n y = self._process_target(y, reset=True)\n model = self._get_model(X, y)\n _check_model(model)\n fit_kwargs = self.fit_kwargs or {}\n fit_kwargs.update(kwargs)\n self.history_ = model.fit(X, y, **fit_kwargs)\n self.model_ = model\n return self", "docstring": "Fit the model.\n\nArgs:\nX: array-like, shape=(n_samples, n_features)\nThe input samples.\ny: array-like, shape=(n_samples,) or (n_samples, n_outputs)\nThe targets.\n**kwargs: keyword arguments passed to `model.fit`", "source": "github-repos"} {"code": "def SearchDependencies(self, os_name, artifact_name_list, existing_artifact_deps=None, existing_expansion_deps=None):\n artifact_deps = (existing_artifact_deps or set())\n expansion_deps = (existing_expansion_deps or set())\n artifact_objs = self.GetArtifacts(os_name=os_name, name_list=artifact_name_list)\n artifact_deps = artifact_deps.union([a.name for a in artifact_objs])\n for artifact in artifact_objs:\n expansions = GetArtifactPathDependencies(artifact)\n if expansions:\n expansion_deps = expansion_deps.union(set(expansions))\n new_artifact_names = self.GetArtifactNames(os_name=os_name, provides=expansions)\n missing_artifacts = (new_artifact_names - artifact_deps)\n if missing_artifacts:\n (new_artifacts, new_expansions) = self.SearchDependencies(os_name, new_artifact_names, existing_artifact_deps=artifact_deps, existing_expansion_deps=expansion_deps)\n artifact_deps = artifact_deps.union(new_artifacts)\n expansion_deps = expansion_deps.union(new_expansions)\n return (artifact_deps, expansion_deps)", "docstring": "Return a set of artifact names needed to fulfill dependencies.\n\nSearch the path dependency tree for all artifacts that can fulfill\ndependencies of artifact_name_list. If multiple artifacts provide a\ndependency, they are all included.\n\nArgs:\nos_name: operating system string\nartifact_name_list: list of artifact names to find dependencies for.\nexisting_artifact_deps: existing dependencies to add to, for recursion,\ne.g. set([\"WindowsRegistryProfiles\", \"WindowsEnvironmentVariablePath\"])\nexisting_expansion_deps: existing expansion dependencies to add to, for\nrecursion, e.g. set([\"users.userprofile\", \"users.homedir\"])\n\nReturns:\n(artifact_names, expansion_names): a tuple of sets, one with artifact\nnames, the other expansion names", "source": "codesearchnet"} {"code": "def enbase64(byte_str):\n \n\n \n if isinstance(byte_str, str) and not PYTHON2:\n byte_str = bytes(byte_str, 'utf-8')\n return base64.b64encode(byte_str)", "docstring": "Encode bytes/strings to base64.\n\nArgs:\n- ``byte_str``: The string or bytes to base64 encode.\n\nReturns:\n- byte_str encoded as base64.", "source": "juraj-google-style"} {"code": "def report_to_rows(report):\n if type(report) is GeneratorType:\n leftovers = ''\n for chunk in report:\n data, extra = chunk.rsplit('\\n', 1)\n for row in csv_to_rows(leftovers + data):\n yield row\n leftovers = extra\n else:\n for row in csv_to_rows(report):\n yield row", "docstring": "Helper to convert DCM files into iterator of rows, memory efficient.\n\nUsage example:\n\n```\nfilename, report = report_file(...)\nrows = report_to_rows(report)\n```\n\nArgs:\n* report: (iterator or file) Either an iterator or file that will be\nconverted to rows.\n\nReturns:\n* Iterator of lists representing each row.", "source": "github-repos"} {"code": "def generate_blocks(self, assume_complete_blocks=None):\n _track_assume_blocks = self.assume_complete_blocks\n try:\n if (assume_complete_blocks != None):\n self.assume_complete_blocks = assume_complete_blocks\n if (self.processed_tables == None):\n self.preprocess()\n self.processed_blocks = []\n for worksheet in range(len(self.processed_tables)):\n ptable = self.processed_tables[worksheet]\n flags = self.flags_by_table[worksheet]\n units = self.units_by_table[worksheet]\n if (not self.assume_complete_blocks):\n self.fill_in_table(ptable, worksheet, flags)\n self.processed_blocks.extend(self._find_blocks(ptable, worksheet, flags, units, {'worksheet': worksheet}))\n return self.processed_blocks\n finally:\n self.assume_complete_blocks = _track_assume_blocks", "docstring": "Identifies and extracts all blocks from the input tables. These blocks are logical\nidentifiers for where related information resides in the original table. Any block can be\nconverted into a row-titled table which can then be stitched together with other tables from\nother blocks to form a fully converted data set.\n\nArgs:\nassume_complete_blocks: Optimizes block loopups by not allowing titles to be extended.\nBlocks should be perfectly dense to be found when active. Optional, defaults to\nconstructor value.", "source": "codesearchnet"} {"code": "def find(self, id):\n url = '{}/{}/{}'.format(__endpoint__, self.type.RESOURCE, id)\n response = RestClient.get(url)[self.type.RESOURCE[:(- 1)]]\n return self.type(response)", "docstring": "Get a resource by its id\n\nArgs:\nid (string): Resource id\nReturns:\nobject: Instance of the resource type", "source": "codesearchnet"} {"code": "def get_int_or_float_list(self, min_length=_MIN_LENGTH, max_length=_MAX_LENGTH):\n if self.get_bool():\n return self.get_int_list(min_length, max_length)\n else:\n return self.get_float_list(min_length, max_length)", "docstring": "Consume a signed integer or float list with given constraints based on a consumed bool.\n\nArgs:\nmin_length: The minimum length of the list.\nmax_length: The maximum length of the list.\n\nReturns:\nConsumed integer or float list based on input bytes and constraints.", "source": "github-repos"} {"code": "def _convert_update_row(row):\n after_values = row['after_values']\n before_values = row['before_values']\n values = after_values\n return {'values': values, 'updated_values': _get_updated_values(before_values, after_values)}", "docstring": "Convert a row for update event\n\nArgs:\nrow (dict): event row data", "source": "codesearchnet"} {"code": "def slice_naive(self, key):\n \n kwargs = {'name': self.name, 'description': self.description, 'meta': self.meta}\n for name, data in self._data().items():\n k = name[1:] if name.startswith('_') else name\n kwargs[k] = data.slice_naive(key)\n return self.__class__(**kwargs)", "docstring": "Naively slice each data object in the container by the object's index.\n\nArgs:\nkey: Int, slice, or list by which to extra \"sub\"-container\n\nReturns:\nsub: Sub container of the same format with a view of the data\n\nWarning:\nTo ensure that a new container is created, use the copy method.\n\n.. code-block:: Python\n\nmycontainer[slice].copy()", "source": "juraj-google-style"} {"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n \n\n local_stream = utils.BytearrayStream()\n\n if self._unique_identifier:\n self._unique_identifier.write(\n local_stream,\n kmip_version=kmip_version\n )\n else:\n raise ValueError(\n \"invalid payload missing the unique identifier attribute\"\n )\n\n if self._signature_data:\n self._signature_data.write(\n local_stream,\n kmip_version=kmip_version\n )\n else:\n raise ValueError(\n \"invalid payload missing the signature attribute\"\n )\n\n self.length = local_stream.length()\n super(SignResponsePayload, self).write(\n output_stream,\n kmip_version=kmip_version\n )\n output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Sign response to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\n\nRaises:\nValueError: Raised if the unique_identifier or signature\nattributes are not defined.", "source": "juraj-google-style"} {"code": "def remove_sites(self, indices):\n \n self._sites = [self._sites[i] for i in range(len(self._sites))\n if i not in indices]", "docstring": "Delete sites with at indices.\n\nArgs:\nindices: Sequence of indices of sites to delete.", "source": "juraj-google-style"} {"code": "def isPointInside(self, xp, yp):\n point = np.array([xp, yp]).transpose()\n polygon = self.polygon\n (numVert, numDim) = polygon.shape\n polyVec = (np.roll(polygon, (- 1), 0) - polygon)\n pointVec = (point - polygon)\n crossProduct = np.cross(polyVec, pointVec)\n if (np.all((crossProduct < 0)) or np.all((crossProduct > 0))):\n return True\n return False", "docstring": "Is the given point inside the polygon?\n\nInput:\n------------\nxp, yp\n(floats) Coordinates of point in same units that\narray vertices are specified when object created.\nReturns:\n-----------\n**True** / **False**", "source": "codesearchnet"} {"code": "def orphan_entry(self, rval: RawObject) -> \"ArrayEntry\":\n \n val = self.entry_from_raw(rval)\n return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self,\n val.timestamp)", "docstring": "Return an isolated entry of the receiver.\n\nArgs:\nrval: Raw object to be used for the returned entry.", "source": "juraj-google-style"} {"code": "def identifiers(config):\n ids = []\n if (config.klass_name == 'gen'):\n for generator in os.listdir(config.generator_dir):\n if (generator == '__init__.py'):\n continue\n (gid, ext) = os.path.splitext(generator)\n if ((ext == '.py') and os.path.isfile(os.path.join(config.generator_dir, generator))):\n ids.append(gid)\n else:\n for image_file in os.listdir(config.image_dir):\n (iid, ext) = os.path.splitext(image_file)\n if ((ext in ['.jpg', '.png', '.tif']) and os.path.isfile(os.path.join(config.image_dir, image_file))):\n ids.append(iid)\n return ids", "docstring": "Show list of identifiers for this prefix.\n\nHandles both the case of local file based identifiers and\nalso image generators.\n\nArguments:\nconfig - configuration object in which:\nconfig.klass_name - 'gen' if a generator function\nconfig.generator_dir - directory for generator code\nconfig.image_dir - directory for images\n\nReturns:\nids - a list of ids", "source": "codesearchnet"} {"code": "def __init__(self, channel):\n \n self.Check = channel.unary_unary(\n '/grpc.health.v1.Health/Check',\n request_serializer=Health__pb2.HealthCheckRequest.SerializeToString,\n response_deserializer=Health__pb2.HealthCheckResponse.FromString,\n )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"} {"code": "def setSeasonSchedules(self, cmd_dict=None, password='00000000'):\n result = False\n self.setContext('setSeasonSchedules')\n if (not cmd_dict):\n cmd_dict = self.m_seasons_sched_params\n try:\n if (not self.request(False)):\n self.writeCmdMsg('Bad read CRC on setting')\n elif (not self.serialCmdPwdAuth(password)):\n self.writeCmdMsg('Password failure')\n else:\n req_table = ''\n req_table += binascii.hexlify(str(cmd_dict['Season_1_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_1_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_1_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_2_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_2_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_2_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_3_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_3_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_3_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_4_Start_Month']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_4_Start_Day']).zfill(2))\n req_table += binascii.hexlify(str(cmd_dict['Season_4_Schedule']).zfill(2))\n req_table += binascii.hexlify(str(0).zfill(24))\n req_str = (('015731023030383028' + req_table) + '2903')\n req_str += self.calc_crc16(req_str[2:].decode('hex'))\n self.m_serial_port.write(req_str.decode('hex'))\n if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'):\n self.writeCmdMsg('Success(setSeasonSchedules): 06 returned.')\n result = True\n self.serialPostEnd()\n except:\n ekm_log(traceback.format_exc(sys.exc_info()))\n self.setContext('')\n return result", "docstring": "Serial command to set seasons table.\n\nIf no dictionary is passed, the meter object buffer is used.\n\nArgs:\ncmd_dict (dict): Optional dictionary of season schedules.\npassword (str): Optional password\n\nReturns:\nbool: True on completion and ACK.", "source": "codesearchnet"} {"code": "def pager(__text: str, *, pager: Optional[str]='less'):\n if pager:\n run([pager], input=__text.encode())\n else:\n print(__text)", "docstring": "Pass output through pager.\n\nSee :manpage:`less(1)`, if you wish to configure the default pager. For\nexample, you may wish to check ``FRSX`` options.\n\nArgs:\n__text: Text to page\npager: Pager to use", "source": "codesearchnet"} {"code": "def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool:\n \n\n max_actions = {\n \n Channels.facebook: 10,\n Channels.skype: 10,\n \n Channels.line: 13,\n \n Channels.kik: 20,\n Channels.telegram: 100,\n Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n }\n return button_cnt <= max_actions[channel_id] if channel_id in max_actions else False", "docstring": "Determine if a number of Suggested Actions are supported by a Channel.\n\nArgs:\nchannel_id (str): The Channel to check the if Suggested Actions are supported in.\nbutton_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.\n\nReturns:\nbool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not support that number of Suggested Actions.", "source": "juraj-google-style"} {"code": "def get_name(self, tag):\n name = super(functionTagProcessor, self).get_name(tag)\n if self.include_function_signatures:\n func_args = tag.findChild('arglist')\n if (func_args and len(func_args.contents)):\n name += func_args.contents[0]\n ret_type = tag.findChild('type')\n if (ret_type and len(ret_type.contents)):\n name += (' -> ' + ret_type.contents[0])\n return name", "docstring": "Override. Extract a representative \"name\" from a function tag.\n\nget_name's output can be controlled through keyword arguments that are\nprovided when initializing a functionTagProcessor. For instance,\nfunction arguments and return types can be included by passing\ninclude_function_signatures=True to __init__().\n\nArgs:\ntag: A BeautifulSoup Tag for a function.\n\nReturns:\nA string that would be appropriate to use as an entry name for a\nfunction in a Zeal database.", "source": "codesearchnet"} {"code": "def from_dict(event_dict):\n return SnippetEvent(callback_id=event_dict['callbackId'], name=event_dict['name'], creation_time=event_dict['time'], data=event_dict['data'])", "docstring": "Create a SnippetEvent object from a dictionary.\n\nDEPRECATED: Use mobly.snippet.callback_event.from_dict instead.\n\nArgs:\nevent_dict: a dictionary representing an event.\n\nReturns:\nA SnippetEvent object.", "source": "github-repos"} {"code": "def wb004(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `wb004`'.format(value))\n\n self._wb004 = value", "docstring": "Corresponds to IDD Field `wb004`\nWet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `wb004`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def set_banner(self, banner_type, value=None, default=False,\n disable=False):\n \n\n command_string = \"banner %s\" % banner_type\n if default is True or disable is True:\n cmd = self.command_builder(command_string, value=None,\n default=default, disable=disable)\n return self.configure(cmd)\n else:\n if not value.endswith(\"\\n\"):\n value = value + \"\\n\"\n command_input = dict(cmd=command_string, input=value)\n return self.configure([command_input])", "docstring": "Configures system banners\n\nArgs:\nbanner_type(str): banner to be changed (likely login or motd)\nvalue(str): value to set for the banner\ndefault (bool): Controls the use of the default keyword\ndisable (bool): Controls the use of the no keyword`\n\nReturns:\nbool: True if the commands completed successfully otherwise False", "source": "juraj-google-style"} {"code": "def stage_redis(self, variable, data):\n if isinstance(data, int):\n data = str(data)\n if variable.endswith('Binary'):\n try:\n data = base64.b64decode(data)\n except binascii.Error:\n msg = 'The Binary staging data for variable {} is not properly base64 encoded.'\n msg = msg.format(variable)\n sys.exit(msg)\n elif variable.endswith('BinaryArray'):\n if isinstance(data, string_types):\n data = json.loads(data)\n try:\n decoded_data = []\n for d in data:\n d_decoded = base64.b64decode(d)\n decoded_data.append(d_decoded)\n data = decoded_data\n except binascii.Error:\n msg = 'The BinaryArray staging data for variable {} is not properly base64 encoded.'\n msg = msg.format(variable)\n sys.exit(msg)\n self.log.info(u'[stage] Creating variable {}'.format(variable))\n self.tcex.playbook.create(variable, data)", "docstring": "Stage data in Redis.\n\nArgs:\nvariable (str): The Redis variable name.\ndata (dict|list|str): The data to store in Redis.", "source": "codesearchnet"} {"code": "def EnumerateAllConfigs(self, stats, file_objects):\n cache = {}\n for (stat_obj, file_obj) in zip(stats, file_objects):\n cache[stat_obj.pathspec.path] = utils.ReadFileBytesAsUnicode(file_obj)\n result = []\n external = []\n if (self.OLD_PAMCONF_FILENAME in cache):\n (result, external) = self.EnumerateConfig(None, self.OLD_PAMCONF_FILENAME, cache)\n if result:\n return (result, external)\n for path in cache:\n service = os.path.basename(path)\n (r, e) = self.EnumerateConfig(service, path, cache)\n result.extend(r)\n external.extend(e)\n return (result, external)", "docstring": "Generate RDFs for the fully expanded configs.\n\nArgs:\nstats: A list of RDF StatEntries corresponding to the file_objects.\nfile_objects: A list of file handles.\n\nReturns:\nA tuple of a list of RDFValue PamConfigEntries found & a list of strings\nwhich are the external config references found.", "source": "codesearchnet"} {"code": "def repair(self, volume_id_or_uri, timeout=(- 1)):\n data = {'type': 'ExtraManagedStorageVolumePaths', 'resourceUri': self._client.build_uri(volume_id_or_uri)}\n custom_headers = {'Accept-Language': 'en_US'}\n uri = (self.URI + '/repair')\n return self._client.create(data, uri=uri, timeout=timeout, custom_headers=custom_headers)", "docstring": "Removes extra presentations from a specified volume on the storage system.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume id or the volume uri.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Storage volume.", "source": "codesearchnet"} {"code": "def __init__(self, X, y, batch_size, process_fn=None):\n \n self.X = X\n self.y = y\n self.batch_size = batch_size\n self.process_fn = process_fn or (lambda x: x)", "docstring": "A `Sequence` implementation that can pre-process a mini-batch via `process_fn`\n\nArgs:\nX: The numpy array of inputs.\ny: The numpy array of targets.\nbatch_size: The generator mini-batch size.\nprocess_fn: The preprocessing function to apply on `X`", "source": "juraj-google-style"} {"code": "def _RecurseOverObject(obj, factory, parent=None):\n if _IsSudsIterable(obj):\n copy_of_obj = tuple(obj)\n for item in copy_of_obj:\n if _IsSudsIterable(item):\n if ('xsi_type' in item):\n if isinstance(obj, tuple):\n parent[obj[0]] = _PackForSuds(obj[1], factory)\n else:\n obj.remove(item)\n obj.append(_PackForSuds(item, factory))\n _RecurseOverObject(item, factory, obj)", "docstring": "Recurses over a nested structure to look for changes in Suds objects.\n\nArgs:\nobj: A parameter for a SOAP request field which is to be inspected and\nwill be packed for Suds if an xsi_type is specified, otherwise will be\nleft unaltered.\nfactory: The suds.client.Factory object which can create instances of the\nclasses generated from the WSDL.\nparent: The parent object that contains the obj parameter to be inspected.", "source": "codesearchnet"} {"code": "def register(self, identified_with, identifier, user):\n self.kv_store.set(self._get_storage_key(identified_with, identifier), self.serialization.dumps(user).encode())", "docstring": "Register new key for given client identifier.\n\nThis is only a helper method that allows to register new\nuser objects for client identities (keys, tokens, addresses etc.).\n\nArgs:\nidentified_with (object): authentication middleware used\nto identify the user.\nidentifier (str): user identifier.\nuser (str): user object to be stored in the backend.", "source": "codesearchnet"} {"code": "def resample(df, rule, time_index, groupby=None, aggregation='mean'):\n if groupby:\n df = df.groupby(groupby)\n df = df.resample(rule, on=time_index)\n df = getattr(df, aggregation)()\n for column in groupby:\n del df[column]\n return df", "docstring": "pd.DataFrame.resample adapter.\n\nCall the `df.resample` method on the given time_index\nand afterwards call the indicated aggregation.\n\nOptionally group the dataframe by the indicated columns before\nperforming the resampling.\n\nIf groupby option is used, the result is a multi-index datagrame.\n\nArgs:\ndf (pandas.DataFrame): DataFrame to resample.\nrule (str): The offset string or object representing target conversion.\ngroupby (list): Optional list of columns to group by.\ntime_index (str): Name of the column to use as the time index.\naggregation (str): Name of the aggregation function to use.\n\nReturns:\npandas.Dataframe: resampled dataframe", "source": "codesearchnet"} {"code": "def get_user(self, key = None):\n\t\t \t\n\t\tif key:\n\t\t\turi = self.api_uri + \"/users/\" + key\n\t\telse:\n\t\t\turi = self.api_uri + \"/users/me\"\n\n\t\treturn self._req('get', uri)", "docstring": "Get user information from the server and update the attribute\nArgs:\nkey\t\t\tuser key (default: me)\nreturn\t\t(status code for the get request, dict user data)", "source": "juraj-google-style"} {"code": "def list_files(d, extension=None):\n if os.path.isdir(d):\n expanded_dir = os.path.expanduser(d)\n files = sorted(glob.glob((expanded_dir + '/*')))\n else:\n files = [d]\n if (extension is not None):\n if (type(extension) in STR_TYPES):\n extension = [extension]\n files = [f for f in files if any([(f.split('.')[(- 1)] in extension), (f.split('.')[(- 1)].upper() in extension), (f.split('.')[(- 1)].lower() in extension)])]\n return files", "docstring": "Lists files in a given directory.\n\nArgs:\n\nd (str): Path to a directory.\n\nextension (str): If supplied, only files that contain the\nspecificied extension will be returned. Default is ``False``,\nwhich returns all files in ``d``.\n\nReturns:\n\nlist: A sorted list of file paths.", "source": "codesearchnet"} {"code": "def marshal_bson(obj, types=BSON_TYPES, fields=None):\n return marshal_dict(obj, types, fields=fields)", "docstring": "Recursively marshal a Python object to a BSON-compatible dict\nthat can be passed to PyMongo, Motor, etc...\n\nArgs:\nobj: object, It's members can be nested Python\nobjects which will be converted to dictionaries\ntypes: tuple-of-types, The BSON primitive types, typically\nyou would not change this\nfields: None-list-of-str, Explicitly marshal only these fields\nReturns:\ndict", "source": "codesearchnet"} {"code": "def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args):\n flow_urn = flow.StartAFF4Flow(client_id=client_id, token=token, sync=True, **flow_args)\n WaitForFlow(flow_urn, token=token, timeout=timeout)\n return flow_urn", "docstring": "Runs a flow and waits for it to finish.\n\nArgs:\nclient_id: The client id of the client to run on.\ntoken: The datastore access token.\ntimeout: How long to wait for a flow to complete, maximum.\n**flow_args: Pass through to flow.\n\nReturns:\nThe urn of the flow that was run.", "source": "codesearchnet"} {"code": "def subtract_period_and_roll(self, date_tensor, period_tensor, roll_convention=constants.BusinessDayConvention.NONE):\n minus_period_tensor = periods.PeriodTensor(-period_tensor.quantity(), period_tensor.period_type())\n return self.add_period_and_roll(date_tensor, minus_period_tensor, roll_convention)", "docstring": "Subtracts given periods from given dates and rolls to business days.\n\nThe original dates are not rolled prior to subtraction.\n\nArgs:\ndate_tensor: `DateTensor` of dates to subtract from.\nperiod_tensor: PeriodTensor broadcastable to `date_tensor`.\nroll_convention: BusinessDayConvention. Determines how to roll a date that\nfalls on a holiday.\n\nReturns:\nThe resulting `DateTensor`.", "source": "github-repos"} {"code": "def set(self, refresh_token):\n \n logger.info('Saving refresh_token to %s', repr(self._filename))\n try:\n with open(self._filename, 'w') as f:\n f.write(refresh_token)\n except IOError as e:\n logger.warning('Failed to save refresh_token: %s', e)", "docstring": "Cache a refresh token, ignoring any failure.\n\nArgs:\nrefresh_token (str): Refresh token to cache.", "source": "juraj-google-style"} {"code": "def _delete_blob(self, bucket, blob_name):\n if self._use_blob_generation:\n blob = bucket.get_blob(blob_name, retry=self._storage_client_retry)\n generation = getattr(blob, 'generation', None)\n else:\n generation = None\n try:\n bucket.delete_blob(blob_name, if_generation_match=generation, retry=self._storage_client_retry)\n except NotFound:\n return", "docstring": "Helper method to delete a single blob from GCS.\n\nArgs:\nbucket: The GCS bucket object.\nblob_name: The name of the blob to delete under the bucket.", "source": "github-repos"} {"code": "def verify_controller_module(module):\n required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')\n for attr in required_attributes:\n if not hasattr(module, attr):\n raise signals.ControllerError('Module %s missing required controller module attribute %s.' % (module.__name__, attr))\n if not getattr(module, attr):\n raise signals.ControllerError('Controller interface %s in %s cannot be null.' % (attr, module.__name__))", "docstring": "Verifies a module object follows the required interface for\ncontrollers.\n\nThe interface is explained in the docstring of\n`base_test.BaseTestClass.register_controller`.\n\nArgs:\nmodule: An object that is a controller module. This is usually\nimported with import statements or loaded by importlib.\n\nRaises:\nControllerError: if the module does not match the Mobly controller\ninterface, or one of the required members is null.", "source": "github-repos"} {"code": "async def delete(self, service_id: str) -> bool:\n (await self.docker._query('services/{service_id}'.format(service_id=service_id), method='DELETE'))\n return True", "docstring": "Remove a service\n\nArgs:\nservice_id: ID or name of the service\n\nReturns:\nTrue if successful", "source": "codesearchnet"} {"code": "def toggle_NV(self, pt):\n \n\n if not self.data['nv_locations']: \n self.data['nv_locations'].append(pt)\n self.data['image_data'] = None \n\n else:\n \n tree = scipy.spatial.KDTree(self.data['nv_locations'])\n \n d, i = tree.query(pt,k = 1, distance_upper_bound = self.settings['patch_size'])\n\n \n if d is not np.inf:\n self.data['nv_locations'].pop(i)\n \n else:\n self.data['nv_locations'].append(pt)\n\n \n if self.settings['type'] == 'square' and len(self.data['nv_locations'])>1:\n \n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n pta = self.data['nv_locations'][0]\n ptb = self.data['nv_locations'][1]\n tmp = np.array([[[pta[0] + 1.0*i*(ptb[0]-pta[0])/(Nx-1), pta[1] + 1.0*j*(ptb[1]-pta[1])/(Ny-1)] for i in range(Nx)] for j in range(Ny)])\n self.data['nv_locations'] = np.reshape(tmp, (Nx * Ny, 2))\n self.stop()\n\n\n elif self.settings['type'] == 'line' and len(self.data['nv_locations'])>1:\n \n N = self.settings['Nx']\n pta = self.data['nv_locations'][0]\n ptb = self.data['nv_locations'][1]\n self.data['nv_locations'] = [np.array([pta[0] + 1.0*i*(ptb[0]-pta[0])/(N-1), pta[1] + 1.0*i*(ptb[1]-pta[1])/(N-1)]) for i in range(N)]\n self.stop()\n\n elif self.settings['type'] == 'ring' and len(self.data['nv_locations'])>1:\n \n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n\n pta = self.data['nv_locations'][0] \n ptb = self.data['nv_locations'][1] \n\n \n rmax = np.sqrt((pta[0] - ptb[0]) ** 2 + (pta[1] - ptb[1]) ** 2)\n\n \n tmp = []\n for r in np.linspace(rmax, 0, Ny + 1)[0:-1]:\n for theta in np.linspace(0, 2 * np.pi, Nx+1)[0:-1]:\n tmp += [[r * np.sin(theta)+pta[0], r * np.cos(theta)+pta[1]]]\n\n self.data['nv_locations'] = np.array(tmp)\n self.stop()", "docstring": "If there is not currently a selected NV within self.settings[patch_size] of pt, adds it to the selected list. If\nthere is, removes that point from the selected list.\nArgs:\npt: the point to add or remove from the selected list\n\nPoststate: updates selected list", "source": "juraj-google-style"} {"code": "def find_mice(self, direction, mechanism, purviews=False):\n purviews = self.potential_purviews(direction, mechanism, purviews)\n if (not purviews):\n max_mip = _null_ria(direction, mechanism, ())\n else:\n max_mip = max((self.find_mip(direction, mechanism, purview) for purview in purviews))\n if (direction == Direction.CAUSE):\n return MaximallyIrreducibleCause(max_mip)\n elif (direction == Direction.EFFECT):\n return MaximallyIrreducibleEffect(max_mip)\n return validate.direction(direction)", "docstring": "Return the |MIC| or |MIE| for a mechanism.\n\nArgs:\ndirection (Direction): :|CAUSE| or |EFFECT|.\nmechanism (tuple[int]): The mechanism to be tested for\nirreducibility.\n\nKeyword Args:\npurviews (tuple[int]): Optionally restrict the possible purviews\nto a subset of the subsystem. This may be useful for _e.g._\nfinding only concepts that are \"about\" a certain subset of\nnodes.\n\nReturns:\nMaximallyIrreducibleCauseOrEffect: The |MIC| or |MIE|.", "source": "codesearchnet"} {"code": "def feed(self, byts):\n \n self.unpk.feed(byts)\n\n retn = []\n\n while True:\n\n try:\n item = self.unpk.unpack()\n tell = self.unpk.tell()\n retn.append((tell - self.size, item))\n self.size = tell\n\n except msgpack.exceptions.OutOfData:\n break\n\n return retn", "docstring": "Feed bytes to the unpacker and return completed objects.\n\nArgs:\nbyts (bytes): Bytes to unpack.\n\nNotes:\nIt is intended that this function is called multiple times with\nbytes from some sort of a stream, as it will unpack and return\nobjects as they are available.\n\nReturns:\nlist: List of tuples containing the item size and the unpacked item.", "source": "juraj-google-style"} {"code": "def lint(ctx: click.Context, amend: bool=False, stage: bool=False):\n _lint(ctx, amend, stage)", "docstring": "Runs all linters\n\nArgs:\nctx: click context\namend: whether or not to commit results\nstage: whether or not to stage changes", "source": "codesearchnet"} {"code": "def list_adb_devices_by_usb_id():\n out = adb.AdbProxy().devices(['-l'])\n clean_lines = str(out, 'utf-8').strip().split('\\n')\n results = []\n for line in clean_lines:\n tokens = line.strip().split()\n if len(tokens) > 2 and tokens[1] == 'device':\n results.append(tokens[2])\n return results", "docstring": "List the usb id of all android devices connected to the computer that\nare detected by adb.\n\nReturns:\nA list of strings that are android device usb ids. Empty if there's\nnone.", "source": "github-repos"} {"code": "def set_scf_initial_guess(self, guess=\"SAD\"):\n \n availabel_guesses = {\"core\", \"sad\", \"gwh\", \"read\", \"fragmo\"}\n if guess.lower() not in availabel_guesses:\n raise ValueError(\"The guess method \" + guess + \" is not supported \"\n \"yet\")\n self.params[\"rem\"][\"scf_guess\"] = guess.lower()", "docstring": "Set initial guess method to be used for SCF\n\nArgs:\nguess: The initial guess method. (str)", "source": "juraj-google-style"} {"code": "def get_customer(self, customer_id):\n return self.client._get((self.url + 'customers/{}'.format(customer_id)), headers=self.get_headers())", "docstring": "Queries the information related to the customer.\n\nArgs:\ncustomer_id: Identifier of the client from which you want to find the associated information.\n\nReturns:", "source": "codesearchnet"} {"code": "def _receive(self):\n (message, payload) = self._conn.recv()\n if (message == self._EXCEPTION):\n stacktrace = payload\n raise Exception(stacktrace)\n if (message == self._RESULT):\n return payload\n raise KeyError('Received message of unexpected type {}'.format(message))", "docstring": "Wait for a message from the worker process and return its payload.\n\nRaises:\nException: An exception was raised inside the worker process.\nKeyError: The received message is of an unknown type.\n\nReturns:\nPayload object of the message.", "source": "codesearchnet"} {"code": "def split(x, split_dim, num_or_size_splits, name=None):\n \n return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs", "docstring": "Like tf.split.\n\nArgs:\nx: a Tensor\nsplit_dim: a Dimension in x.shape.dims\nnum_or_size_splits: either an integer dividing split_dim.size\nor a list of integers adding up to split_dim.size\nname: an optional string\nReturns:\na list of Tensors.", "source": "juraj-google-style"} {"code": "def _kl_joint_joint(d0, d1, name=None):\n if (len(d0._dist_fn_wrapped) != len(d1._dist_fn_wrapped)):\n raise ValueError('Can only compute KL divergence between when each has thesame number of component distributions.')\n if ((not all(((a is None) for a in d0._dist_fn_args))) or (not all(((a is None) for a in d1._dist_fn_args)))):\n raise ValueError('Can only compute KL divergence when all distributions are independent.')\n with tf.name_scope((name or 'kl_jointseq_jointseq')):\n return sum((kullback_leibler.kl_divergence(d0_(), d1_()) for (d0_, d1_) in zip(d0._dist_fn_wrapped, d1._dist_fn_wrapped)))", "docstring": "Calculate the KL divergence between two `JointDistributionSequential`s.\n\nArgs:\nd0: instance of a `JointDistributionSequential` object.\nd1: instance of a `JointDistributionSequential` object.\nname: (optional) Name to use for created operations.\nDefault value: `\"kl_joint_joint\"`.\n\nReturns:\nkl_joint_joint: `Tensor` The sum of KL divergences between elemental\ndistributions of two joint distributions.\n\nRaises:\nValueError: when joint distributions have a different number of elemental\ndistributions.\nValueError: when either joint distribution has a distribution with dynamic\ndependency, i.e., when either joint distribution is not a collection of\nindependent distributions.", "source": "codesearchnet"} {"code": "def load_partition_data(self, index):\n \n\n info = self.partitions[index]\n data = PartitionData(info)\n\n for utt_id in info.utt_ids:\n utt_data = [c._file[utt_id][:] for c in self.containers]\n data.utt_data.append(utt_data)\n\n return data", "docstring": "Load and return the partition with the given index.\n\nArgs:\nindex (int): The index of partition, that refers to the index in ``self.partitions``.\n\nReturns:\nPartitionData: A PartitionData object containing the data for the partition with the given index.", "source": "juraj-google-style"} {"code": "def DisplayEstimate(message, min_estimate, max_estimate):\n \n \n mean_avg_cpc = (_CalculateMean(min_estimate['averageCpc']['microAmount'],\n max_estimate['averageCpc']['microAmount'])\n if 'averageCpc' in min_estimate\n and min_estimate['averageCpc'] else None)\n mean_avg_pos = (_CalculateMean(min_estimate['averagePosition'],\n max_estimate['averagePosition'])\n if 'averagePosition' in min_estimate\n and min_estimate['averagePosition'] else None)\n mean_clicks = _CalculateMean(min_estimate['clicksPerDay'],\n max_estimate['clicksPerDay'])\n mean_total_cost = _CalculateMean(min_estimate['totalCost']['microAmount'],\n max_estimate['totalCost']['microAmount'])\n\n print message\n print ' Estimated average CPC: %s' % _FormatMean(mean_avg_cpc)\n print ' Estimated ad position: %s' % _FormatMean(mean_avg_pos)\n print ' Estimated daily clicks: %s' % _FormatMean(mean_clicks)\n print ' Estimated daily cost: %s' % _FormatMean(mean_total_cost)", "docstring": "Displays mean average cpc, position, clicks, and total cost for estimate.\n\nArgs:\nmessage: str message to display for the given estimate.\nmin_estimate: sudsobject containing a minimum estimate from the\nTrafficEstimatorService response.\nmax_estimate: sudsobject containing a maximum estimate from the\nTrafficEstimatorService response.", "source": "juraj-google-style"} {"code": "def s3_app_bucket(self, include_region=False):\n \n if include_region:\n s3_app_bucket = self.format['s3_app_region_bucket'].format(**self.data)\n else:\n s3_app_bucket = self.format['s3_app_bucket'].format(**self.data)\n return s3_app_bucket", "docstring": "Generate s3 application bucket name.\n\nArgs:\ninclude_region (bool): Include region in the name generation.", "source": "juraj-google-style"} {"code": "def build(self):\n for node in self.node_index.values():\n node.freeze()\n stmt_next = {}\n stmt_prev = {}\n for node in self.node_index.values():\n for stmt in self.owners[node]:\n if stmt not in stmt_prev:\n stmt_prev[stmt] = set()\n if stmt not in stmt_next:\n stmt_next[stmt] = set()\n for first, second in self.forward_edges:\n stmts_exited = self.owners[first] - self.owners[second]\n for stmt in stmts_exited:\n stmt_next[stmt].add(second)\n stmts_entered = self.owners[second] - self.owners[first]\n for stmt in stmts_entered:\n stmt_prev[stmt].add(first)\n for stmt in stmt_next:\n stmt_next[stmt] = frozenset(stmt_next[stmt])\n for stmt in stmt_prev:\n stmt_prev[stmt] = frozenset(stmt_prev[stmt])\n result = Graph(entry=self.head, exit=self.leaves, error=self.errors, index=self.node_index, stmt_prev=stmt_prev, stmt_next=stmt_next)\n self.reset()\n return result", "docstring": "Returns the CFG accumulated so far and resets the builder.\n\nReturns:\nGraph", "source": "github-repos"} {"code": "def _ed25519_key_from_file(fn, path):\n \n try:\n return fn(read_from_file(path, exception=ScriptWorkerEd25519Error))\n except ScriptWorkerException as exc:\n raise ScriptWorkerEd25519Error(\"Failed calling {} for {}: {}!\".format(fn, path, str(exc)))", "docstring": "Create an ed25519 key from the contents of ``path``.\n\n``path`` is a filepath containing a base64-encoded ed25519 key seed.\n\nArgs:\nfn (callable): the function to call with the contents from ``path``\npath (str): the file path to the base64-encoded key seed.\n\nReturns:\nobj: the appropriate key type from ``path``\n\nRaises:\nScriptWorkerEd25519Error", "source": "juraj-google-style"} {"code": "def get_heading_encoding(response):\n encoding = wpull.protocol.http.util.parse_charset(response.fields.get('content-type', ''))\n if encoding:\n return wpull.string.normalize_codec_name(encoding)\n else:\n return None", "docstring": "Return the document encoding from a HTTP header.\n\nArgs:\nresponse (Response): An instance of :class:`.http.Response`.\n\nReturns:\n``str``, ``None``: The codec name.", "source": "codesearchnet"} {"code": "def remote_urls(self):\n cmd = 'git config -l | grep \"url\"'\n return self.sh(cmd, shell=True, ignore_error=True).strip()", "docstring": "Get all configured remote urls for this Repository\n\nReturns:\nstr: primary remote url for this Repository\n(``git config -l | grep \"url\"``)", "source": "codesearchnet"} {"code": "def _internal_operation_seed(self):\n return self._rng.randint(0, _MAXINT32)", "docstring": "Returns a fake operation seed.\n\nIn eager mode, user shouldn't set or depend on operation seed.\nHere, we generate a random seed based on global seed to make\noperation's randomness different and depend on the global seed.\n\nReturns:\nA fake operation seed based on global seed.", "source": "github-repos"} {"code": "def up_to(self, term: str) -> str:\n \n end = self.input.find(term, self.offset)\n if end < 0:\n raise EndOfInput(self)\n res = self.input[self.offset:end]\n self.offset = end + 1\n return res", "docstring": "Parse and return segment terminated by the first occurence of a string.\n\nArgs:\nterm: Terminating string.\n\nRaises:\nEndOfInput: If `term` does not occur in the rest of the input text.", "source": "juraj-google-style"} {"code": "def remove_undocumented(module_name, allowed_exception_list=None, doc_string_modules=None):\n current_symbols = set(dir(_sys.modules[module_name]))\n should_have = make_all(module_name, doc_string_modules)\n should_have += allowed_exception_list or []\n extra_symbols = current_symbols - set(should_have)\n target_module = _sys.modules[module_name]\n for extra_symbol in extra_symbols:\n if extra_symbol.startswith('_'):\n continue\n fully_qualified_name = module_name + '.' + extra_symbol\n _HIDDEN_ATTRIBUTES[fully_qualified_name] = (target_module, getattr(target_module, extra_symbol))\n delattr(target_module, extra_symbol)", "docstring": "Removes symbols in a module that are not referenced by a docstring.\n\nArgs:\nmodule_name: the name of the module (usually `__name__`).\nallowed_exception_list: a list of names that should not be removed.\ndoc_string_modules: a list of modules from which to take the docstrings.\nIf None, then a list containing only the module named `module_name` is used.\n\nFurthermore, if a symbol previously added with `add_to_global_allowlist`,\nthen it will always be allowed. This is useful for internal tests.\n\nReturns:\nNone", "source": "github-repos"} {"code": "def _run(self, cmd):\n \n\n if isinstance(cmd, six.string_types):\n cmd = salt.utils.args.shlex_split(cmd)\n\n try:\n log.debug(cmd)\n p = subprocess.Popen(\n cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return p.communicate()\n\n except (OSError, IOError) as exc:\n log.debug('Command Failed: %s', ' '.join(cmd))\n log.debug('Error: %s', exc)\n raise CommandExecutionError(exc)", "docstring": "Internal function for running commands. Used by the uninstall function.\n\nArgs:\ncmd (str, list): The command to run\n\nReturns:\nstr: The stdout of the command", "source": "juraj-google-style"} {"code": "def console_set_char(con: tcod.console.Console, x: int, y: int, c: Union[(int, str)]) -> None:\n lib.TCOD_console_set_char(_console(con), x, y, _int(c))", "docstring": "Change the character at x,y to c, keeping the current colors.\n\nArgs:\ncon (Console): Any Console instance.\nx (int): Character x position from the left.\ny (int): Character y position from the top.\nc (Union[int, AnyStr]): Character to draw, can be an integer or string.\n\n.. deprecated:: 8.4\nArray access performs significantly faster than using this function.\nSee :any:`Console.ch`.", "source": "codesearchnet"} {"code": "def repertoire(self, direction, mechanism, purview):\n system = self.system[direction]\n node_labels = system.node_labels\n if (not set(purview).issubset(self.purview_indices(direction))):\n raise ValueError('{} is not a {} purview in {}'.format(fmt.fmt_mechanism(purview, node_labels), direction, self))\n if (not set(mechanism).issubset(self.mechanism_indices(direction))):\n raise ValueError('{} is no a {} mechanism in {}'.format(fmt.fmt_mechanism(mechanism, node_labels), direction, self))\n return system.repertoire(direction, mechanism, purview)", "docstring": "Return the cause or effect repertoire function based on a direction.\n\nArgs:\ndirection (str): The temporal direction, specifiying the cause or\neffect repertoire.", "source": "codesearchnet"} {"code": "def ws050(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `ws050`'.format(value))\n\n self._ws050 = value", "docstring": "Corresponds to IDD Field `ws050`\nWind speed corresponding 5.0% annual cumulative frequency of occurrence\n\nArgs:\nvalue (float): value for IDD Field `ws050`\nUnit: m/s\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def reraise(e: Exception, prefix: Optional[_Str]=None, suffix: Optional[_Str]=None) -> NoReturn:\n __tracebackhide__ = True\n prefix = prefix() if callable(prefix) else prefix\n suffix = suffix() if callable(suffix) else suffix\n prefix = prefix or ''\n suffix = '\\n' + suffix if suffix else ''\n msg = f'{prefix}{e}{suffix}'\n\n class WrappedException(type(e)):\n \n\n def __init__(self, msg):\n Exception.__init__(self, msg)\n\n def __getattr__(self, name: str):\n return getattr(e, name)\n __repr__ = BaseException.__repr__\n __str__ = BaseException.__str__\n WrappedException.__name__ = type(e).__name__\n WrappedException.__qualname__ = type(e).__qualname__\n WrappedException.__module__ = type(e).__module__\n new_exception = WrappedException(msg)\n raise new_exception.with_traceback(e.__traceback__) from e.__cause__", "docstring": "Reraise an exception with an additional message.\n\nBenefit: Contrary to `raise ... from ...` and\n`raise Exception().with_traceback(tb)`, this function will:\n\n* Keep the original exception type, attributes,...\n* Avoid multi-nested `During handling of the above exception, another\nexception occurred`. Only the single original stacktrace is displayed.\n\nThis result in cleaner and more compact error messages.\n\nUsage:\n\n```\ntry:\nfn(x)\nexcept Exception as e:\nepy.reraise(e, prefix=f'Error for {x}: ')\n```\n\nArgs:\ne: Exception to reraise\nprefix: Prefix to add to the exception message.\nsuffix: Suffix to add to the exception message.", "source": "github-repos"} {"code": "def parent_id(self, value):\n \n if value == self._defaults['ai.operation.parentId'] and 'ai.operation.parentId' in self._values:\n del self._values['ai.operation.parentId']\n else:\n self._values['ai.operation.parentId'] = value", "docstring": "The parent_id property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):\n \n\n if embedding_path is not None:\n embedding_type = embedding_path \n\n embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)\n if embeddings_index is not None:\n return embeddings_index\n\n if embedding_path is None:\n embedding_type_obj = get_embedding_type(embedding_type)\n\n \n \n extract = embedding_type_obj.get('extract', True)\n file_path = get_file(\n embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))\n\n if 'file_in_zip' in embedding_type_obj:\n zip_folder = file_path.split('.zip')[0]\n with ZipFile(file_path, 'r') as zf:\n zf.extractall(zip_folder)\n file_path = os.path.join(\n zip_folder, embedding_type_obj['file_in_zip'])\n else:\n if extract:\n if file_path.endswith('.zip'):\n file_path = file_path.split('.zip')[0]\n \n \n else:\n file_path = embedding_path\n\n embeddings_index = _build_embeddings_index(file_path, embedding_dims)\n\n if cache:\n _EMBEDDINGS_CACHE[embedding_type] = embeddings_index\n return embeddings_index", "docstring": "Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.\n\nArgs:\nembedding_type: The embedding type to load.\nembedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.\n\nReturns:\nThe embeddings indexed by word.", "source": "juraj-google-style"} {"code": "def traverse_inorder(self, leaves=True, internal=True):\n for node in self.root.traverse_inorder(leaves=leaves, internal=internal):\n (yield node)", "docstring": "Perform an inorder traversal of the ``Node`` objects in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"} {"code": "def from_string(cls, dataset_id, default_project=None):\n output_dataset_id = dataset_id\n output_project_id = default_project\n parts = dataset_id.split('.')\n if ((len(parts) == 1) and (not default_project)):\n raise ValueError('When default_project is not set, dataset_id must be a fully-qualified dataset ID in standard SQL format. e.g. \"project.dataset_id\", got {}'.format(dataset_id))\n elif (len(parts) == 2):\n (output_project_id, output_dataset_id) = parts\n elif (len(parts) > 2):\n raise ValueError('Too many parts in dataset_id. Expected a fully-qualified dataset ID in standard SQL format. e.g. \"project.dataset_id\", got {}'.format(dataset_id))\n return cls(output_project_id, output_dataset_id)", "docstring": "Construct a dataset reference from dataset ID string.\n\nArgs:\ndataset_id (str):\nA dataset ID in standard SQL format. If ``default_project``\nis not specified, this must included both the project ID and\nthe dataset ID, separated by ``.``.\ndefault_project (str):\nOptional. The project ID to use when ``dataset_id`` does not\ninclude a project ID.\n\nReturns:\nDatasetReference:\nDataset reference parsed from ``dataset_id``.\n\nExamples:\n>>> DatasetReference.from_string('my-project-id.some_dataset')\nDatasetReference('my-project-id', 'some_dataset')\n\nRaises:\nValueError:\nIf ``dataset_id`` is not a fully-qualified dataset ID in\nstandard SQL format.", "source": "codesearchnet"} {"code": "def FilterItem(self, launchditem):\n \n for regex in self.blacklist_regex:\n if regex.match(launchditem.get(\"Label\", \"\")):\n return True\n return False", "docstring": "Should this job be filtered.\n\nArgs:\nlaunchditem: job NSCFDictionary\nReturns:\nTrue if the item should be filtered (dropped)", "source": "juraj-google-style"} {"code": "def execute_until_false(method, interval_s): \n \n interval = Interval(method, stop_if_false=True)\n interval.start(interval_s)\n return interval", "docstring": "Executes a method forever until the method returns a false value.\n\nArgs:\nmethod: The callable to execute.\ninterval_s: The number of seconds to start the execution after each method\nfinishes.\nReturns:\nAn Interval object.", "source": "juraj-google-style"} {"code": "def legal_check(self, message):\n if (message['method'] == 'LEGAL'):\n logger.debug(('<%s> Event LEGAL' % (str(self.cuuid), message['euuid'])))\n logger.debug(('<%s> Removing event from event buffer.' % (str(self.cuuid), message['euuid'])))\n if (message['priority'] == 'high'):\n self.event_confirmations[message['euuid']] = self.event_uuids[message['euuid']]\n logger.debug(('<%s> Event was high priority. Adding to confirmations buffer.' % (str(self.cuuid), message['euuid'])))\n logger.debug(('<%s> Current event confirmation buffer: %s' % (str(self.cuuid), message['euuid'], pformat(self.event_confirmations))))\n try:\n del self.event_uuids[message['euuid']]\n except KeyError:\n logger.warning(('<%s> Euuid does not exist in event buffer. Key was removed before we could process it.' % (str(self.cuuid), message['euuid'])))\n elif (message['method'] == 'ILLEGAL'):\n logger.debug(('<%s> Event ILLEGAL' % (str(self.cuuid), message['euuid'])))\n logger.debug(('<%s> Removing event from event buffer and adding to rollback buffer.' % (str(self.cuuid), message['euuid'])))\n self.event_rollbacks[message['euuid']] = self.event_uuids[message['euuid']]\n del self.event_uuids[message['euuid']]", "docstring": "This method handles event legality check messages from the server.\n\nArgs:\nmessage (dict): The unserialized legality dictionary received from\nthe server.\n\nReturns:\nNone\n\nExamples:\n>>> message", "source": "codesearchnet"} {"code": "def login_details(self):\n if (not self.__login_details):\n self.__login_details = LoginDetails(self.__connection)\n return self.__login_details", "docstring": "Gets the login details\n\nReturns:\nList of login details", "source": "codesearchnet"} {"code": "def get_java_remote_console_url(self, ip=None):\n \n uri = \"{}/javaRemoteConsoleUrl\".format(self.data[\"uri\"])\n\n if ip:\n uri = \"{}?ip={}\".format(uri, ip)\n\n return self._helper.do_get(uri)", "docstring": "Generates a Single Sign-On (SSO) session for the iLO Java Applet console and returns the URL to launch it.\nIf the server hardware is unmanaged or unsupported, the resulting URL will not use SSO and the iLO Java Applet\nwill prompt for credentials. This is not supported on G7/iLO3 or earlier servers.\n\nArgs:\nip: IP address or host name of the server's iLO management processor\n\nReturns:\nURL", "source": "juraj-google-style"} {"code": "def create_sqlite_connection_provider(db_uri):\n \n uri = urlparse.urlparse(db_uri)\n if uri.scheme != 'sqlite':\n raise ValueError('Scheme is not sqlite: ' + db_uri)\n if uri.netloc:\n raise ValueError('Can not connect to SQLite over network: ' + db_uri)\n if uri.path == ':memory:':\n raise ValueError('Memory mode SQLite not supported: ' + db_uri)\n path = os.path.expanduser(uri.path)\n params = _get_connect_params(uri.query)\n \n return lambda: sqlite3.connect(path, **params)", "docstring": "Returns function that returns SQLite Connection objects.\n\nArgs:\ndb_uri: A string URI expressing the DB file, e.g. \"sqlite:~/tb.db\".\n\nReturns:\nA function that returns a new PEP-249 DB Connection, which must be closed,\neach time it is called.\n\nRaises:\nValueError: If db_uri is not a valid sqlite file URI.", "source": "juraj-google-style"} {"code": "def log1p(x):\n if any_symbolic_tensors((x,)):\n return Log1p().symbolic_call(x)\n return backend.numpy.log1p(x)", "docstring": "Returns the natural logarithm of one plus the `x`, element-wise.\n\nCalculates `log(1 + x)`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise natural logarithm of `1 + x`.", "source": "github-repos"} {"code": "def gql(query_string, *args, **kwds):\n \n qry = _gql(query_string)\n if args or kwds:\n qry = qry._bind(args, kwds)\n return qry", "docstring": "Parse a GQL query string.\n\nArgs:\nquery_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'.\n*args, **kwds: If present, used to call bind().\n\nReturns:\nAn instance of query_class.", "source": "juraj-google-style"} {"code": "def final_block(x1, x2, dim='2d', training=True, scope='final_block'):\n with tf.variable_scope(scope):\n y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis'])\n y = tf.layers.batch_normalization(y, training=training)\n y = tf.nn.relu(y)\n net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], name='final_pool', keep_dims=True)\n return net", "docstring": "Converts activations from last RevNet block to pre-logits.\n\nArgs:\nx1: [NxHxWxC] tensor of network activations.\nx2: [NxHxWxC] tensor of network activations.\ndim: '2d' if 2-dimensional, '3d' if 3-dimensional.\ntraining: True for train phase, False for eval phase.\nscope: Optional variable scope for the final block.\n\nReturns:\n[N, hidden_dim] pre-logits tensor from activations x1 and x2.", "source": "codesearchnet"} {"code": "def GetFeedItemIdsForCampaign(campaign_feed):\n feed_item_ids = set()\n try:\n lhs_operand = campaign_feed['matchingFunction']['lhsOperand']\n except KeyError:\n lhs_operand = None\n if (lhs_operand and (lhs_operand[0]['FunctionArgumentOperand.Type'] == 'RequestContextOperand')):\n request_context_operand = lhs_operand[0]\n if ((request_context_operand['contextType'] == 'FEED_ITEM_ID') and (campaign_feed['matchingFunction']['operator'] == 'IN')):\n for argument in campaign_feed['matchingFunction']['rhsOperand']:\n if (argument['xsi_type'] == 'ConstantOperand'):\n feed_item_ids.add(argument['longValue'])\n return feed_item_ids", "docstring": "Gets the Feed Item Ids used by a campaign through a given Campaign Feed.\n\nArgs:\ncampaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.\n\nReturns:\nA list of Feed Item IDs.", "source": "codesearchnet"} {"code": "def add_prefix(self, prefix, flags, prf):\n \n self._req('prefix add %s %s %s' % (prefix, flags, prf))\n time.sleep(1)\n self._req('netdataregister')", "docstring": "Add network prefix.\n\nArgs:\nprefix (str): network prefix.\nflags (str): network prefix flags, please refer thread documentation for details\nprf (str): network prf, please refer thread documentation for details", "source": "juraj-google-style"} {"code": "def get_handler(self):\n gcl_client = gcl_logging.Client(project=self.project_id, credentials=self.credentials)\n handler = gcl_handlers.CloudLoggingHandler(gcl_client, resource=self.resource, labels={'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname})\n handler.setFormatter(self.get_formatter())\n self._set_worker_thread_level()\n return handler", "docstring": "Create a fully configured CloudLoggingHandler.\n\nReturns:\n(obj): Instance of `google.cloud.logging.handlers.\nCloudLoggingHandler`", "source": "codesearchnet"} {"code": "def doc_private(obj: T) -> T:\n setattr(obj, _DOC_PRIVATE, None)\n return obj", "docstring": "A decorator: Generates docs for private methods/functions.\n\nFor example:\n\n```\nclass Try:\n\n@doc_controls.doc_private\ndef _private(self):\n...\n```\n\nAs a rule of thumb, private(beginning with `_`) methods/functions are\nnot documented.\n\nThis decorator allows to force document a private method/function.\n\nArgs:\nobj: The class-attribute to hide from the generated docs.\n\nReturns:\nobj", "source": "github-repos"} {"code": "def create_config(self, name, data, labels=None):\n if (not isinstance(data, bytes)):\n data = data.encode('utf-8')\n data = base64.b64encode(data)\n if six.PY3:\n data = data.decode('ascii')\n body = {'Data': data, 'Name': name, 'Labels': labels}\n url = self._url('/configs/create')\n return self._result(self._post_json(url, data=body), True)", "docstring": "Create a config\n\nArgs:\nname (string): Name of the config\ndata (bytes): Config data to be stored\nlabels (dict): A mapping of labels to assign to the config\n\nReturns (dict): ID of the newly created config", "source": "codesearchnet"} {"code": "def plot_pair(df, feature_name_1, feature_name_2, kind='scatter', alpha=0.01, **kwargs):\n \n\n plt.figure()\n sns.jointplot(\n feature_name_1,\n feature_name_2,\n df,\n alpha=alpha,\n kind=kind,\n **kwargs\n )\n plt.show()", "docstring": "Plot a scatterplot of two features against one another,\nand calculate Pearson correlation coefficient.\n\nExamples:\n`plot_pair(X, 'emb_mean_euclidean', 'emb_mean_cosine')`\n\nArgs:\ndf:\nfeature_name_1: The name of the first feature.\nfeature_name_2: The name of the second feature.\nkind: One of the values { 'scatter' | 'reg' | 'resid' | 'kde' | 'hex' }.\nalpha: Alpha channel value.\n**kwargs: Additional argument to 'sns.jointplot'.", "source": "juraj-google-style"} {"code": "def convertTimestamps(column):\n tempColumn = column\n try:\n tempValue = np.datetime64(column[randint(0, (len(column.index) - 1))])\n tempColumn = column.apply(to_datetime)\n except Exception:\n pass\n return tempColumn", "docstring": "Convert a dtype of a given column to a datetime.\n\nThis method tries to do this by brute force.\n\nArgs:\ncolumn (pandas.Series): A Series object with all rows.\n\nReturns:\ncolumn: Converted to datetime if no errors occured, else the\noriginal column will be returned.", "source": "codesearchnet"} {"code": "def __init__(self, logdir, max_queue=10, flush_secs=120, filename_suffix=None):\n self._logdir = str(logdir)\n gfile.MakeDirs(self._logdir)\n self._max_queue = max_queue\n self._flush_secs = flush_secs\n self._flush_complete = threading.Event()\n self._flush_sentinel = object()\n self._close_sentinel = object()\n self._ev_writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(os.path.join(self._logdir, 'events')))\n if filename_suffix:\n self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))\n self._initialize()\n self._closed = False", "docstring": "Creates a `EventFileWriter` and an event file to write to.\n\nOn construction the summary writer creates a new event file in `logdir`.\nThis event file will contain `Event` protocol buffers, which are written to\ndisk via the add_event method.\n\nThe other arguments to the constructor control the asynchronous writes to\nthe event file:\n\n* `flush_secs`: How often, in seconds, to flush the added summaries\nand events to disk.\n* `max_queue`: Maximum number of summaries or events pending to be\nwritten to disk before one of the 'add' calls block.\n\nArgs:\nlogdir: A string. Directory where event file will be written.\nmax_queue: Integer. Size of the queue for pending events and summaries.\nflush_secs: Number. How often, in seconds, to flush the\npending events and summaries to disk.\nfilename_suffix: A string. Every event file's name is suffixed with\n`filename_suffix`.", "source": "github-repos"} {"code": "def _fill_and_verify_padding(padding, n):\n if ((not isinstance(n, numbers.Integral)) or (n < 1)):\n raise TypeError('n must be a positive integer')\n if (isinstance(padding, six.string_types) and (padding in ALLOWED_PADDINGS)):\n return ((padding,) * n)\n try:\n if ((len(padding) == n) and all(((p in ALLOWED_PADDINGS) for p in padding))):\n return tuple(padding)\n except TypeError:\n pass\n raise TypeError(\"padding is {}, must be member of '{}' or an iterable of these of size {}\".format(padding, ALLOWED_PADDINGS, n))", "docstring": "Verifies that the provided padding is supported and expands to size n.\n\nArgs:\npadding: One of ALLOWED_PADDINGS, or an iterable of them.\nn: An integer, the size of the desired output list.\n\nReturns:\nIf `padding` is one of ALLOWED_PADDINGS, a tuple of size `n` containing `n`\ncopies of `padding`.\nIf `padding` is an iterable of ALLOWED_PADDINGS of size `n`, it returns\n`padding(x)`.\n\nRaises:\nTypeError: If n is not a positive integer; if padding is neither one of\nALLOWED_PADDINGS nor an iterable of ALLOWED_PADDINGS of size n.", "source": "codesearchnet"} {"code": "def set_unique_child_node(self, name, node):\n \n try:\n temp = self._nodes[name]\n raise RuntimeError(\"Name '%s' is already used for child node\" % name)\n except KeyError:\n pass\n\n self.set_child_node(name, node)", "docstring": "Add one child node to this node.\n\nArgs:\nname (str): Name of the child.\nnode (TreeMapNode): Node to add.\n\nNote:\nThe name must **not** be in use.", "source": "juraj-google-style"} {"code": "def get_read_strategy(cls, response):\n chunked_match = re.match('chunked($|;)', response.fields.get('Transfer-Encoding', ''))\n if chunked_match:\n return 'chunked'\n elif ('Content-Length' in response.fields):\n return 'length'\n else:\n return 'close'", "docstring": "Return the appropriate algorithm of reading response.\n\nReturns:\nstr: ``chunked``, ``length``, ``close``.", "source": "codesearchnet"} {"code": "def isNodeAuthorized(self, targetNodeSubject, pid, vendorSpecific=None):\n \n response = self.isNodeAuthorizedResponse(targetNodeSubject, pid, vendorSpecific)\n return self._read_boolean_401_response(response)", "docstring": "See Also: isNodeAuthorizedResponse()\n\nArgs:\ntargetNodeSubject:\npid:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"} {"code": "def requests(self):\n requests = []\n for json in self.skype.conn('GET', '{0}/users/{1}/invites'.format(SkypeConnection.API_CONTACTS, self.skype.userId), auth=SkypeConnection.Auth.SkypeToken).json().get('invite_list', []):\n for invite in json.get('invites', []):\n invite['userId'] = SkypeUtils.noPrefix(json.get('mri'))\n requests.append(SkypeRequest.fromRaw(self.skype, invite))\n return requests", "docstring": "Retrieve any pending contact requests.\n\nReturns:\n:class:`SkypeRequest` list: collection of requests", "source": "codesearchnet"} {"code": "def _is_function(self, name) -> bool:\n return compat.as_str(name) in self._functions", "docstring": "Tests whether 'name' is registered in this graph's function library.\n\nArgs:\nname: string op name.\n\nReturns:\nbool indicating whether or not 'name' is registered in function library.", "source": "github-repos"} {"code": "def trace(state: State, fn: TransitionOperator, num_steps: IntTensor,\n trace_fn: Callable[[State, TensorNest], TensorNest]\n ) -> Tuple[State, TensorNest]:\n \n\n def fn_wrapper(args, _):\n return tf.nest.map_structure(tf.convert_to_tensor, call_fn(fn, args[0]))\n\n def trace_fn_wrapper(args):\n return tf.nest.map_structure(tf.convert_to_tensor, call_fn(trace_fn, args))\n\n state = call_fn(fn, state)\n first_trace = trace_fn_wrapper(state)\n\n state, full_trace = mcmc_util.trace_scan(\n fn_wrapper, state, tf.ones(num_steps - 1), trace_fn=trace_fn_wrapper)\n\n prepend = lambda x, y: tf.concat( \n [tf.convert_to_tensor(value=x)[tf.newaxis], y], 0)\n\n return state, tf.nest.map_structure(prepend, first_trace, full_trace)", "docstring": "`TransitionOperator` that runs `fn` repeatedly and traces its outputs.\n\nArgs:\nstate: A nest of `Tensor`s or None.\nfn: A `TransitionOperator`.\nnum_steps: Number of steps to run the function for. Must be greater than 1.\ntrace_fn: Callable that the unpacked outputs of `fn` and returns a nest of\n`Tensor`s. These will be stacked and returned.\n\nReturns:\nstate: The final state returned by `fn`.\ntraces: Stacked outputs of `trace_fn`.", "source": "juraj-google-style"} {"code": "def forward_event_shape(self, input_shape):\n return self._forward_event_shape(tensor_shape.TensorShape(input_shape))", "docstring": "Shape of a single sample from a single batch as a `TensorShape`.\n\nSame meaning as `forward_event_shape_tensor`. May be only partially defined.\n\nArgs:\ninput_shape: `TensorShape` indicating event-portion shape passed into\n`forward` function.\n\nReturns:\nforward_event_shape_tensor: `TensorShape` indicating event-portion shape\nafter applying `forward`. Possibly unknown.", "source": "github-repos"} {"code": "def read_links_file(self, file_path):\n articles = []\n with open(file_path) as f:\n for line in f:\n line = line.strip()\n if (len(line) != 0):\n (link, category) = line.split(' ')\n articles.append((category.rstrip(), link.strip()))\n return articles", "docstring": "Read links and associated categories for specified articles\nin text file seperated by a space\n\nArgs:\nfile_path (str): The path to text file with news article links\nand category\n\nReturns:\narticles: Array of tuples that contains article link & cateogory\nex. [('IPO','www.cs.columbia.edu')]", "source": "codesearchnet"} {"code": "def process_feed(config, task, feed_name, dao, print_field, msg='Processing'):\n feed = Feed(config, task['auth'], task['sheet_id'], feed_name, spreadsheet=spreadsheet, timezone=task.get('timezone', None))\n execute_feed(feed, dao, print_field, msg)", "docstring": "Processes a feed that represents a specific entity in the Bulkdozer feed.\n\nArgs:\nfeed_name: Name of the feed to process, refer to feed.py for the supported\nfeed names.\ndao: The data access object to be used to interact with the CM API and\nupdate, must match the entity being updated in CM, in the sense that the\nrequired fields to fetch, create, and update the entity in CM must be\nincluded in the feed.\nprint_field: Field that identifies the item, used to print status messages\nto the Log tab of the Bulkdozer feed.\nmsg: Prefix message to use when writing to the Log tab of the Bulkdozer\nfeed, for instance we display Processing Campaign for campaign, and\nUploading Asset for assets.", "source": "github-repos"} {"code": "def omit_wells(self, uwis=None):\n if (uwis is None):\n raise ValueError('Must specify at least one uwi')\n return Project([w for w in self if (w.uwi not in uwis)])", "docstring": "Returns a new project where wells with specified uwis have been omitted\n\nArgs:\nuwis (list): list or tuple of UWI strings.\n\nReturns:\nproject", "source": "codesearchnet"} {"code": "def __init__(self, callback_id, event_client, ret_value, method_name, device, rpc_max_timeout_sec, default_timeout_sec=120):\n self._id = callback_id\n self.ret_value = ret_value\n self._device = device\n self._event_client = event_client\n self._method_name = method_name\n if rpc_max_timeout_sec < default_timeout_sec:\n raise ValueError(f'The max timeout of a single RPC must be no smaller than the default timeout of the callback handler. Got rpc_max_timeout_sec={rpc_max_timeout_sec}, default_timeout_sec={default_timeout_sec}.')\n self._rpc_max_timeout_sec = rpc_max_timeout_sec\n self._default_timeout_sec = default_timeout_sec", "docstring": "Initializes a callback handler base object.\n\nArgs:\ncallback_id: str, the callback ID which associates with a group of\ncallback events.\nevent_client: SnippetClientV2, the client object used to send RPC to the\nserver and receive response.\nret_value: any, the direct return value of the async RPC call.\nmethod_name: str, the name of the executed Async snippet function.\ndevice: DeviceController, the device object associated with this handler.\nrpc_max_timeout_sec: float, maximum time for sending a single RPC call.\ndefault_timeout_sec: float, the default timeout for this handler. It\nmust be no longer than rpc_max_timeout_sec.", "source": "github-repos"} {"code": "def compute_fat_line(nodes):\n (coeff_a, coeff_b, coeff_c) = compute_implicit_line(nodes)\n (_, num_nodes) = nodes.shape\n d_min = 0.0\n d_max = 0.0\n for index in six.moves.xrange(1, (num_nodes - 1)):\n curr_dist = (((coeff_a * nodes[(0, index)]) + (coeff_b * nodes[(1, index)])) + coeff_c)\n if (curr_dist < d_min):\n d_min = curr_dist\n elif (curr_dist > d_max):\n d_max = curr_dist\n return (coeff_a, coeff_b, coeff_c, d_min, d_max)", "docstring": "Compute the \"fat line\" around a B |eacute| zier curve.\n\nBoth computes the implicit (normalized) form\n\n.. math::\n\nax + by + c = 0\n\nfor the line connecting the first and last node in ``nodes``.\nAlso computes the maximum and minimum distances to that line\nfrom each control point.\n\nArgs:\nnodes (numpy.ndarray): ``2 x N`` array of nodes in a curve.\n\nReturns:\nTuple[float, float, float, float, float]: The 5-tuple of\n\n* The :math:`x` coefficient :math:`a`\n* The :math:`y` coefficient :math:`b`\n* The constant :math:`c`\n* The \"minimum\" distance to the fat line among the control points.\n* The \"maximum\" distance to the fat line among the control points.", "source": "codesearchnet"} {"code": "def get_energy_buckingham(structure, gulp_cmd='gulp',\n keywords=('optimise', 'conp', 'qok'),\n valence_dict=None):\n \n gio = GulpIO()\n gc = GulpCaller(gulp_cmd)\n gin = gio.buckingham_input(\n structure, keywords, valence_dict=valence_dict\n )\n gout = gc.run(gin)\n return gio.get_energy(gout)", "docstring": "Compute the energy of a structure using Buckingham potential.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\ngulp_cmd: GULP command if not in standard place\nkeywords: GULP first line keywords\nvalence_dict: {El: valence}. Needed if the structure is not charge\nneutral.", "source": "juraj-google-style"} {"code": "def get_definition(self, task_name):\n r = self.gbdx_connection.get(((self._base_url + '/') + task_name))\n raise_for_status(r)\n return r.json()", "docstring": "Gets definition of a registered GBDX task.\n\nArgs:\ntask_name (str): Task name.\n\nReturns:\nDictionary representing the task definition.", "source": "codesearchnet"} {"code": "def _raise_if_annotated(self, func):\n \n if hasattr(func, ANNOTATED) and getattr(func, ANNOTATED):\n msg = ('Functions decorated with {!r} '\n 'should not be decorated with {!r}.\\n'\n 'Please reverse the order of the decorators!'.format(\n self.__class__.__name__, Annotate.__name__))\n raise TypeError(msg)", "docstring": "Raise TypeError if a function is decorated with Annotate, as such\nfunctions cause visual bugs when decorated with Animate.\n\nAnimate should be wrapped by Annotate instead.\n\nArgs:\nfunc (function): Any callable.\nRaises:\nTypeError", "source": "juraj-google-style"} {"code": "def build_pos_args_table(full_alias, args, start_index):\n pos_args_placeholder = get_placeholders(full_alias, check_duplicates=True)\n pos_args = args[start_index:(start_index + len(pos_args_placeholder))]\n if (len(pos_args_placeholder) != len(pos_args)):\n error_msg = INSUFFICIENT_POS_ARG_ERROR.format(full_alias, len(pos_args_placeholder), ('' if (len(pos_args_placeholder) == 1) else 's'), len(pos_args))\n raise CLIError(error_msg)\n for (i, pos_arg) in enumerate(pos_args):\n pos_args[i] = pos_arg.replace('\"', '\\\\\"')\n return dict(zip(pos_args_placeholder, pos_args))", "docstring": "Build a dictionary where the key is placeholder name and the value is the position argument value.\n\nArgs:\nfull_alias: The full alias (including any placeholders).\nargs: The arguments that the user inputs in the terminal.\nstart_index: The index at which we start ingesting position arguments.\n\nReturns:\nA dictionary with the key beign the name of the placeholder and its value\nbeing the respective positional argument.", "source": "codesearchnet"} {"code": "def alleles_to_retrieve(df):\n contig_blastn_records = defaultdict(list)\n markers = df.marker.unique()\n for m in markers:\n dfsub = df[(df.marker == m)]\n for (i, r) in dfsub.iterrows():\n if (r.coverage < 1.0):\n contig_blastn_records[r.stitle].append(r)\n break\n return contig_blastn_records", "docstring": "Alleles to retrieve from genome fasta\n\nGet a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be\nretrieved from the genome contig.\n\nArgs:\ndf (pandas.DataFrame): blastn results dataframe\n\nReturns:\n{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker\nfor which the allele sequence must be retrieved from the original sequence.", "source": "codesearchnet"} {"code": "def add(self, X, y):\n if isinstance(X, dict):\n X = [X]\n y = [y]\n for i in range(len(X)):\n each = X[i]\n if (y[i] > self._best_score):\n self._best_score = y[i]\n self._best_hyperparams = X[i]\n vectorized = []\n for tunable in self.tunables:\n vectorized.append(each[tunable[0]])\n if (self.X_raw is not None):\n self.X_raw = np.append(self.X_raw, np.array([vectorized], dtype=object), axis=0)\n else:\n self.X_raw = np.array([vectorized], dtype=object)\n self.y_raw = np.append(self.y_raw, y)\n x_transformed = np.array([], dtype=np.float64)\n if ((len(self.X_raw.shape) > 1) and (self.X_raw.shape[1] > 0)):\n x_transformed = self.tunables[0][1].fit_transform(self.X_raw[(:, 0)], self.y_raw).astype(float)\n for i in range(1, self.X_raw.shape[1]):\n transformed = self.tunables[i][1].fit_transform(self.X_raw[(:, i)], self.y_raw).astype(float)\n x_transformed = np.column_stack((x_transformed, transformed))\n self.fit(x_transformed, self.y_raw)", "docstring": "Add data about known tunable hyperparameter configurations and scores.\n\nRefits model with all data.\n\nArgs:\nX (Union[Dict[str, object], List[Dict[str, object]]]): dict or list of dicts of\nhyperparameter combinations. Keys may only be the name of a tunable, and the\ndictionary must contain values for all tunables.\ny (Union[float, List[float]]): float or list of floats of scores of the hyperparameter\ncombinations. Order of scores must match the order of the hyperparameter\ndictionaries that the scores corresponds", "source": "codesearchnet"} {"code": "def quality(self, tests, alias=None):\n \n \n \n \n \n \n this_tests =\\\n tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\\\n + tests.get(self.mnemonic, [])\\\n + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])\n this_tests = filter(None, this_tests)\n\n \n \n if not tests.get(self.mnemonic, 1):\n this_tests = []\n\n return {test.__name__: test(self) for test in this_tests}", "docstring": "Run a series of tests and return the corresponding results.\n\nArgs:\ntests (list): a list of functions.\nalias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\nReturns:\nlist. The results. Stick to booleans (True = pass) or ints.", "source": "juraj-google-style"} {"code": "def filter_by_slurry(self, slurry, appender=\"_\"):\n \n\n sheet = self.table\n identity = self.db_sheet_cols.id\n exists = self.db_sheet_cols.exists\n cellname = self.db_sheet_cols.cell_name\n search_string = \"\"\n\n if not isinstance(slurry, (list, tuple)):\n slurry = [slurry, ]\n\n first = True\n for slur in slurry:\n s_s = appender + slur + appender\n if first:\n search_string = s_s\n first = False\n else:\n search_string += \"|\"\n search_string += s_s\n\n criterion = sheet.loc[:, cellname].str.contains(\n search_string\n )\n exists = sheet.loc[:, exists] > 0\n sheet = sheet[criterion & exists]\n\n return sheet.loc[:, identity].values.astype(int)", "docstring": "Filters sheet/table by slurry name.\n\nInput is slurry name or list of slurry names, for example 'es030' or\n[\"es012\",\"es033\",\"es031\"].\n\nArgs:\nslurry (str or list of strings): slurry names.\nappender (chr): char that surrounds slurry names.\n\nReturns:\nList of serial_number (ints).", "source": "juraj-google-style"} {"code": "def simulate_moment_steps(self, circuit: circuits.Circuit, param_resolver: 'study.ParamResolverOrSimilarType'=None, qubit_order: ops.QubitOrderOrList=ops.QubitOrder.DEFAULT, initial_state: Any=None) -> Iterator:\n return self._simulator_iterator(circuit, study.ParamResolver(param_resolver), qubit_order, initial_state)", "docstring": "Returns an iterator of StepResults for each moment simulated.\n\nIf the circuit being simulated is empty, a single step result should\nbe returned with the state being set to the initial state.\n\nArgs:\ncircuit: The Circuit to simulate.\nparam_resolver: A ParamResolver for determining values of Symbols.\nqubit_order: Determines the canonical ordering of the qubits. This\nis often used in specifying the initial state, i.e. the\nordering of the computational basis states.\ninitial_state: The initial state for the simulation. The form of\nthis state depends on the simulation implementation. See\ndocumentation of the implementing class for details.\n\nReturns:\nIterator that steps through the simulation, simulating each\nmoment and returning a StepResult for each moment.", "source": "codesearchnet"} {"code": "def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider,\n resource_type, resource_name):\n \n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/', resource_provider,\n '/', resource_type,\n '/', resource_name,\n '/providers/microsoft.insights',\n '/metrics?api-version=', INSIGHTS_PREVIEW_API])\n return do_get(endpoint, access_token)", "docstring": "Get the monitoring metrics for a resource.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nresource_type (str): Type of resource.\nresource_name (str): Name of resource.\n\nReturns:\nHTTP response. JSON body of resource metrics.", "source": "juraj-google-style"} {"code": "def execute(self, shell=True):\n process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell)\n (self.output, self.errors) = process.communicate()", "docstring": "Executes the command setted into class\n\nArgs:\nshell (boolean): Set True if command is a shell command. Default: True", "source": "codesearchnet"} {"code": "def loop_until_valid_response(prompt):\n responses = {'Y': True, 'YES': True, 'TRUE': True, 'N': False, 'NO': False, 'FALSE': False}\n response = ''\n while (response.upper() not in responses):\n response = raw_input(prompt)\n return responses[response.upper()]", "docstring": "Loop over entering input until it is a valid bool-ish response.\n\nArgs:\nprompt: Text presented to user.\n\nReturns:\nThe bool value equivalent of what was entered.", "source": "codesearchnet"} {"code": "def _BashScript(name, commands, default_options=None):\n default_options = default_options or set()\n global_options, options_map, subcommands_map = _GetMaps(name, commands, default_options)\n bash_completion_template = '\n check_wrapper = '\\n case \"${{lastcommand}}\" in\\n {lastcommand_checks}\\n esac'\n lastcommand_check_template = '\\n {command})\\n {opts_assignment}\\n opts=$(filter_options $opts)\\n ;;'\n opts_assignment_subcommand_template = '\\n if is_prev_global; then\\n opts=\"${{GLOBAL_OPTIONS}}\"\\n else\\n opts=\"{options} ${{GLOBAL_OPTIONS}}\"\\n fi'\n opts_assignment_main_command_template = '\\n opts=\"{options} ${{GLOBAL_OPTIONS}}\" '\n\n def _GetOptsAssignmentTemplate(command):\n if command == name:\n return opts_assignment_main_command_template\n else:\n return opts_assignment_subcommand_template\n lines = []\n commands_set = set()\n commands_set.add(name)\n commands_set = commands_set.union(set(subcommands_map.keys()))\n commands_set = commands_set.union(set(options_map.keys()))\n for command in commands_set:\n opts_assignment = _GetOptsAssignmentTemplate(command).format(options=' '.join(sorted(options_map[command].union(subcommands_map[command]))))\n lines.append(lastcommand_check_template.format(command=command, opts_assignment=opts_assignment))\n lastcommand_checks = '\\n'.join(lines)\n checks = check_wrapper.format(lastcommand_checks=lastcommand_checks)\n return bash_completion_template.format(name=name, command=name, checks=checks, default_options=' '.join(default_options), identifier=name.replace('/', '').replace('.', '').replace(',', ''), global_options=' '.join(global_options))", "docstring": "Returns a Bash script registering a completion function for the commands.\n\nArgs:\nname: The first token in the commands, also the name of the command.\ncommands: A list of all possible commands that tab completion can complete\nto. Each command is a list or tuple of the string tokens that make up\nthat command.\ndefault_options: A dict of options that can be used with any command. Use\nthis if there are flags that can always be appended to a command.\nReturns:\nA string which is the Bash script. Source the bash script to enable tab\ncompletion in Bash.", "source": "github-repos"} {"code": "def put_task(self, func, args, response):\n \n\n self._rpc_queue.put_nowait((func, args, response))", "docstring": "Place a task onto the RPC queue.\n\nThis temporary functionality will go away but it lets you run a\ntask synchronously with RPC dispatch by placing it onto the\nRCP queue.\n\nArgs:\nfunc (callable): The function to execute\nargs (iterable): The function arguments\nresponse (GenericResponse): The response object to signal the\nresult on.", "source": "juraj-google-style"} {"code": "def altitude_diff(msg):\n \n tc = common.typecode(msg)\n\n if tc != 19:\n raise RuntimeError(\"%s: Not a airborne velocity message, expecting TC=19\" % msg)\n\n msgbin = common.hex2bin(msg)\n sign = -1 if int(msgbin[80]) else 1\n value = common.bin2int(msgbin[81:88])\n\n if value == 0 or value == 127:\n return None\n else:\n return sign * (value - 1) * 25", "docstring": "Decode the differece between GNSS and barometric altitude\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC=19\n\nReturns:\nint: Altitude difference in ft. Negative value indicates GNSS altitude\nbelow barometric altitude.", "source": "juraj-google-style"} {"code": "def _expand_param_on_rank(self, param, expand_rank, axis):\n param_tensor = tf.convert_to_tensor(param, dtype=self._dtype)\n param_expand = param_tensor\n for _ in range(expand_rank):\n param_expand = tf.expand_dims(param_expand, axis)\n return param_expand", "docstring": "Adds dimensions to `param`, not inplace.\n\nArgs:\nparam: initial element.\nexpand_rank: is amount of dimensions that need to be added.\naxis: is axis where to place these dimensions.\n\nReturns:\nNew `Tensor`.", "source": "github-repos"} {"code": "def process_alias_import_namespace(namespace):\n if is_url(namespace.alias_source):\n alias_source = retrieve_file_from_url(namespace.alias_source)\n _validate_alias_file_content(alias_source, url=namespace.alias_source)\n else:\n namespace.alias_source = os.path.abspath(namespace.alias_source)\n _validate_alias_file_path(namespace.alias_source)\n _validate_alias_file_content(namespace.alias_source)", "docstring": "Validate input arguments when the user invokes 'az alias import'.\n\nArgs:\nnamespace: argparse namespace object.", "source": "codesearchnet"} {"code": "def get_lists(self, **query_params):\n lists = self.get_lists_json(self.base_uri, query_params=query_params)\n lists_list = []\n for list_json in lists:\n lists_list.append(self.create_list(list_json))\n return lists_list", "docstring": "Get the lists attached to this board. Returns a list of List objects.\n\nReturns:\nlist(List): The lists attached to this board", "source": "codesearchnet"} {"code": "def get_by_name(self, name):\n result = self.get_by('name', name)\n if result:\n data = result[0]\n new_resource = self.new(self._connection, data)\n else:\n new_resource = None\n return new_resource", "docstring": "Retrieves a resource by its name.\n\nArgs:\nname: Resource name.\n\nReturns:\nResource object or None if resource does not exist.", "source": "codesearchnet"} {"code": "def build_csv_transforming_training_input_fn(schema, features, stats, analysis_output_dir, raw_data_file_pattern, training_batch_size, num_epochs=None, randomize_input=False, min_after_dequeue=1, reader_num_threads=1, allow_smaller_final_batch=True):\n\n def raw_training_input_fn():\n 'Training input function that reads raw data and applies transforms.'\n if isinstance(raw_data_file_pattern, six.string_types):\n filepath_list = [raw_data_file_pattern]\n else:\n filepath_list = raw_data_file_pattern\n files = []\n for path in filepath_list:\n files.extend(file_io.get_matching_files(path))\n filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs, shuffle=randomize_input)\n (csv_id, csv_lines) = tf.TextLineReader().read_up_to(filename_queue, training_batch_size)\n queue_capacity = (((reader_num_threads + 3) * training_batch_size) + min_after_dequeue)\n if randomize_input:\n (_, batch_csv_lines) = tf.train.shuffle_batch(tensors=[csv_id, csv_lines], batch_size=training_batch_size, capacity=queue_capacity, min_after_dequeue=min_after_dequeue, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch)\n else:\n (_, batch_csv_lines) = tf.train.batch(tensors=[csv_id, csv_lines], batch_size=training_batch_size, capacity=queue_capacity, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch)\n (csv_header, record_defaults) = csv_header_and_defaults(features, schema, stats, keep_target=True)\n parsed_tensors = tf.decode_csv(batch_csv_lines, record_defaults, name='csv_to_tensors')\n raw_features = dict(zip(csv_header, parsed_tensors))\n transform_fn = make_preprocessing_fn(analysis_output_dir, features, keep_target=True)\n transformed_tensors = transform_fn(raw_features)\n transformed_features = {}\n for (k, v) in six.iteritems(transformed_tensors):\n if (isinstance(v, tf.Tensor) and (v.get_shape().ndims == 1)):\n transformed_features[k] = tf.expand_dims(v, (- 1))\n else:\n transformed_features[k] = v\n target_name = get_target_name(features)\n if ((not target_name) or (target_name not in transformed_features)):\n raise ValueError('Cannot find target transform in features')\n transformed_target = transformed_features.pop(target_name)\n return (transformed_features, transformed_target)\n return raw_training_input_fn", "docstring": "Creates training input_fn that reads raw csv data and applies transforms.\n\nArgs:\nschema: schema list\nfeatures: features dict\nstats: stats dict\nanalysis_output_dir: output folder from analysis\nraw_data_file_pattern: file path, or list of files\ntraining_batch_size: An int specifying the batch size to use.\nnum_epochs: numer of epochs to read from the files. Use None to read forever.\nrandomize_input: If true, the input rows are read out of order. This\nrandomness is limited by the min_after_dequeue value.\nmin_after_dequeue: Minimum number elements in the reading queue after a\ndequeue, used to ensure a level of mixing of elements. Only used if\nrandomize_input is True.\nreader_num_threads: The number of threads enqueuing data.\nallow_smaller_final_batch: If false, fractional batches at the end of\ntraining or evaluation are not used.\n\nReturns:\nAn input_fn suitable for training that reads raw csv training data and\napplies transforms.", "source": "codesearchnet"} {"code": "def on_hello(self, message):\n \n\n logger.info(\"Got a hello\")\n self.identify(self.token)\n self.heartbeat_thread = Heartbeat(self.ws,\n message['d']['heartbeat_interval'])\n self.heartbeat_thread.start()\n return", "docstring": "Runs on a hello event from websocket connection\n\nArgs:\nmessage (dict): Full message from Discord websocket connection\"", "source": "juraj-google-style"} {"code": "def build_prefixes(namespaces=None):\n if (namespaces is None):\n namespaces = [('bf', str(BIBFRAME)), ('schema', str(SCHEMA_ORG))]\n output = 'PREFIX {}: <{}>\\n'.format(namespaces[0][0], namespaces[0][1])\n if (len(namespaces) == 1):\n return output\n else:\n for namespace in namespaces[1:]:\n output += 'PREFIX {}: <{}>\\n'.format(namespace[0], namespace[1])\n return output", "docstring": "Internal function takes a list of prefix, namespace uri tuples and\ngenerates a SPARQL PREFIX string.\n\nArgs:\nnamespaces(list): List of tuples, defaults to BIBFRAME and\nSchema.org\n\nReturns:\nstring", "source": "codesearchnet"} {"code": "def __init__(self, key_path_suffix):\n \n super(WindowsRegistryKeyPathSuffixFilter, self).__init__()\n self._key_path_suffix = key_path_suffix", "docstring": "Initializes a Windows Registry key filter.\n\nArgs:\nkey_path_suffix (str): the key path suffix.", "source": "juraj-google-style"} {"code": "def handle_typeguard(node: cfg.CFGNode, ret: _ReturnType, first_arg: cfg.Variable, ctx: 'context.Context', func_name: str | None=None) -> cfg.Variable | None:\n frame = ctx.vm.frame\n if not hasattr(frame, 'f_locals'):\n return None\n if ret.name == 'typing.TypeIs':\n match_result = ctx.matcher(node).compute_one_match(first_arg, ret.get_parameter(node, abstract_utils.T))\n matched = [m.view[first_arg] for m in match_result.good_matches]\n unmatched = [m.view[first_arg] for m in match_result.bad_matches]\n elif ret.name == 'typing.TypeGuard':\n matched = []\n unmatched = first_arg.bindings\n else:\n return None\n if matched:\n typeis_return = ctx.program.NewVariable()\n for b in matched:\n typeis_return.AddBinding(ctx.convert.true, {b}, node)\n for b in unmatched:\n typeis_return.AddBinding(ctx.convert.false, {b}, node)\n return typeis_return\n target_name = ctx.vm.get_var_name(first_arg)\n if not target_name:\n desc = f' function {func_name!r}' if func_name else ''\n ctx.errorlog.not_supported_yet(ctx.vm.frames, f'Calling {ret.name}{desc} with an arbitrary expression', 'Please assign the expression to a local variable.')\n return None\n target = frame.lookup_name(target_name)\n for b in target.Bindings(node):\n target.PasteBinding(b, node)\n old_data = set(target.Data(node))\n new_instance = ret.instantiate_parameter(node, abstract_utils.T)\n new_data = set(new_instance.data)\n for b in new_instance.bindings:\n if b.data not in old_data:\n target.PasteBinding(b, node)\n typeguard_return = ctx.program.NewVariable()\n for b in target.Bindings(node):\n boolvals = {b.data not in old_data} | {b.data in new_data}\n for v in boolvals:\n typeguard_return.AddBinding(ctx.convert.bool_values[v], {b}, node)\n return typeguard_return", "docstring": "Returns a variable of the return value of a type guard function.\n\nArgs:\nnode: The current node.\nret: The function's return value.\nfirst_arg: The first argument to the function.\nctx: The current context.\nfunc_name: Optionally, the function name, for better error messages.", "source": "github-repos"} {"code": "def collective_diffusion_coefficient(self):\n if self.has_run:\n return (self.atoms.collective_dr_squared() / (6.0 * self.lattice.time))\n else:\n return None", "docstring": "Returns the collective or \"jump\" diffusion coefficient, D_J.\n\nArgs:\nNone\n\nReturns:\n(Float): The collective diffusion coefficient, D_J.", "source": "codesearchnet"} {"code": "def to_dict(self):\n return {'name': self.name, 'broker': self.broker.to_dict(), 'pid': self.pid, 'process_pids': self.process_pids, 'concurrency': self.concurrency, 'job_count': self.job_count, 'queues': [q.to_dict() for q in self.queues]}", "docstring": "Return a dictionary of the worker stats.\n\nReturns:\ndict: Dictionary of the stats.", "source": "codesearchnet"} {"code": "def display_upstream_structure(structure_dict):\n graph = _create_graph(structure_dict)\n plt = Image(graph.create_png())\n display(plt)", "docstring": "Displays pipeline structure in the jupyter notebook.\n\nArgs:\nstructure_dict (dict): dict returned by\n:func:`~steppy.base.Step.upstream_structure`.", "source": "codesearchnet"} {"code": "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns information about a `BuildTrigger`. This API is experimental.\n\nArgs:\nrequest: (CloudbuildProjectsTriggersGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(BuildTrigger) The response message.", "source": "github-repos"} {"code": "def transform_to_mods_periodical(marc_xml, uuid, url):\n \n marc_xml = _read_content_or_path(marc_xml)\n\n transformed = xslt_transformation(\n marc_xml,\n _absolute_template_path(\"MARC21toPeriodicalTitle.xsl\")\n )\n\n return _apply_postprocessing(\n marc_xml=marc_xml,\n xml=transformed,\n func=mods_postprocessor.postprocess_periodical,\n uuid=uuid,\n url=url,\n )", "docstring": "Convert `marc_xml` to periodical MODS data format.\n\nArgs:\nmarc_xml (str): Filename or XML string. Don't use ``\\\\n`` in case of\nfilename.\nuuid (str): UUID string giving the package ID.\nurl (str): URL of the publication (public or not).\n\nReturns:\nlist: Collection of transformed xml strings.", "source": "juraj-google-style"} {"code": "def len_header(filename):\n \n with open(filename, 'rb') as f:\n header_sub_count = 0\n eoh_found = False\n while not eoh_found:\n header_sub = f.read(512)\n header_sub_count += 1\n if b'HEADER_END' in header_sub:\n idx_end = header_sub.index(b'HEADER_END') + len(b'HEADER_END')\n eoh_found = True\n break\n\n idx_end = (header_sub_count -1) * 512 + idx_end\n return idx_end", "docstring": "Return the length of the blimpy header, in bytes\n\nArgs:\nfilename (str): name of file to open\n\nReturns:\nidx_end (int): length of header, in bytes", "source": "juraj-google-style"} {"code": "def check_directory(path, human_readable_name):\n \n if not os.path.exists(path):\n LOGGER.error(\"%s directory does not exist: %s\",\n human_readable_name,\n path)\n return False\n\n if not os.path.isdir(path):\n LOGGER.error(\"%s directory is not a directory: %s\",\n human_readable_name,\n path)\n return False\n\n errors = True\n if not os.access(path, os.R_OK):\n LOGGER.error(\"%s directory is not readable: %s\",\n human_readable_name,\n path)\n errors = False\n if not os.access(path, os.W_OK):\n LOGGER.error(\"%s directory is not writable: %s\",\n human_readable_name,\n path)\n errors = False\n return errors", "docstring": "Verify that the directory exists and is readable and writable.\n\nArgs:\npath (str): a directory which should exist and be writable\nhuman_readable_name (str): a human readable string for the directory\nwhich is used in logging statements\n\nReturns:\nbool: False if an error exists, True otherwise.", "source": "juraj-google-style"} {"code": "def setPulseInputRatio(self, line_in, new_cnst, password=\"00000000\"):\n \n result = False\n self.setContext(\"setPulseInputRatio\")\n\n try:\n if not self.requestA():\n self.writeCmdMsg(\"Bad read CRC on setting\")\n else:\n if not self.serialCmdPwdAuth(password):\n self.writeCmdMsg(\"Password failure\")\n else:\n req_const = binascii.hexlify(str(new_cnst).zfill(4))\n line_const = binascii.hexlify(str(line_in - 1))\n req_str = \"01573102303041\" + line_const + \"28\" + req_const + \"2903\"\n req_str += self.calc_crc16(req_str[2:].decode(\"hex\"))\n self.m_serial_port.write(req_str.decode(\"hex\"))\n if self.m_serial_port.getResponse(self.getContext()).encode(\"hex\") == \"06\":\n self.writeCmdMsg(\"Success: 06 returned.\")\n result = True\n\n self.serialPostEnd()\n except:\n ekm_log(traceback.format_exc(sys.exc_info()))\n\n self.setContext(\"\")\n return result", "docstring": "Serial call to set pulse input ratio on a line.\n\nArgs:\nline_in (int): Member of :class:`~ekmmeters.Pulse`\nnew_cnst (int): New pulse input ratio\npassword (str): Optional password\n\nReturns:", "source": "juraj-google-style"} {"code": "def open(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:\n return self._path_open(path, 'rb', mime_type, compression_type)", "docstring": "Returns a read channel for the given file path.\n\nArgs:\npath: string path of the file object to be written to the system\nmime_type: MIME type to specify the type of content in the file object\ncompression_type: Type of compression to be used for this object\n\nReturns: file handle with a close function for the user to use", "source": "github-repos"} {"code": "def __init__(self, parent=None, **kwargs):\n \n if not parent:\n raise ValueError('Missing parent value.')\n\n super(EWFPathSpec, self).__init__(parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the EWF file path specification must have a parent.\n\nArgs:\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"} {"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n if not self.sparse_y_true:\n y_true = ops.argmax(y_true, axis=self.axis)\n if not self.sparse_y_pred:\n y_pred = ops.argmax(y_pred, axis=self.axis)\n y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)\n y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype)\n if len(y_pred.shape) > 1:\n y_pred = ops.reshape(y_pred, [-1])\n if len(y_true.shape) > 1:\n y_true = ops.reshape(y_true, [-1])\n if sample_weight is None:\n sample_weight = 1\n elif hasattr(sample_weight, 'dtype') and 'float' in str(sample_weight.dtype) and ('int' in str(self.dtype)):\n warnings.warn('You are passing weight as `float`, but dtype is `int`. This may result in an incorrect weight due to type casting Consider using integer weights.')\n sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype)\n if len(sample_weight.shape) > 1:\n sample_weight = ops.reshape(sample_weight, [-1])\n sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true))\n if self.ignore_class is not None:\n ignore_class = ops.convert_to_tensor(self.ignore_class, y_true.dtype)\n valid_mask = ops.not_equal(y_true, ignore_class)\n y_true = y_true * ops.cast(valid_mask, y_true.dtype)\n y_pred = y_pred * ops.cast(valid_mask, y_pred.dtype)\n if sample_weight is not None:\n sample_weight = sample_weight * ops.cast(valid_mask, sample_weight.dtype)\n y_pred = ops.cast(y_pred, dtype=self.dtype)\n y_true = ops.cast(y_true, dtype=self.dtype)\n sample_weight = ops.cast(sample_weight, dtype=self.dtype)\n current_cm = confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self.dtype)\n return self.total_cm.assign(self.total_cm + current_cm)", "docstring": "Accumulates the confusion matrix statistics.\n\nArgs:\ny_true: The ground truth values.\ny_pred: The predicted values.\nsample_weight: Optional weighting of each example. Can\nbe a `Tensor` whose rank is either 0, or the same as `y_true`,\nand must be broadcastable to `y_true`. Defaults to `1`.\n\nReturns:\nUpdate op.", "source": "github-repos"} {"code": "def get_all_resources(datasets):\n \n \n resources = []\n for dataset in datasets:\n for resource in dataset.get_resources():\n resources.append(resource)\n return resources", "docstring": "Get all resources from a list of datasets (such as returned by search)\n\nArgs:\ndatasets (List[Dataset]): list of datasets\n\nReturns:\nList[hdx.data.resource.Resource]: list of resources within those datasets", "source": "juraj-google-style"} {"code": "def __make_id(receiver):\n if __is_bound_method(receiver):\n return (id(receiver.__func__), id(receiver.__self__))\n return id(receiver)", "docstring": "Generate an identifier for a callable signal receiver.\n\nThis is used when disconnecting receivers, where we need to correctly\nestablish equivalence between the input receiver and the receivers assigned\nto a signal.\n\nArgs:\nreceiver: A callable object.\n\nReturns:\nAn identifier for the receiver.", "source": "codesearchnet"} {"code": "def start_centroid_distance(item_a, item_b, max_value):\n start_a = item_a.center_of_mass(item_a.times[0])\n start_b = item_b.center_of_mass(item_b.times[0])\n start_distance = np.sqrt((((start_a[0] - start_b[0]) ** 2) + ((start_a[1] - start_b[1]) ** 2)))\n return (np.minimum(start_distance, max_value) / float(max_value))", "docstring": "Distance between the centroids of the first step in each object.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "codesearchnet"} {"code": "def distort_color(image, thread_id=0, scope=None):\n with tf.name_scope(values=[image], name=scope, default_name='distort_color'):\n color_ordering = (thread_id % 2)\n if (color_ordering == 0):\n image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif (color_ordering == 1):\n image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.clip_by_value(image, 0.0, 1.0)\n return image", "docstring": "Distort the color of the image.\n\nEach color distortion is non-commutative and thus ordering of the color ops\nmatters. Ideally we would randomly permute the ordering of the color ops.\nRather then adding that level of complication, we select a distinct ordering\nof color ops for each preprocessing thread.\n\nArgs:\nimage: Tensor containing single image.\nthread_id: preprocessing thread ID.\nscope: Optional scope for name_scope.\nReturns:\ncolor-distorted image", "source": "codesearchnet"} {"code": "def update_watermarks_for_transform_and_unblock_tasks(self, ptransform, watermark) -> List[Tuple[TransformExecutor, Timestamp]]:\n unblocked_tasks = []\n for side in self._transform_to_side_inputs[ptransform]:\n unblocked_tasks.extend(self._update_watermarks_for_side_input_and_unblock_tasks(side, watermark))\n return unblocked_tasks", "docstring": "Updates _SideInputsContainer after a watermark update and unbloks tasks.\n\nIt traverses the list of side inputs per PTransform and calls\n_update_watermarks_for_side_input_and_unblock_tasks to unblock tasks.\n\nArgs:\nptransform: Value of a PTransform.\nwatermark: Value of the watermark after an update for a PTransform.\n\nReturns:\nTasks that get unblocked as a result of the watermark advancing.", "source": "github-repos"} {"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(SignResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n local_stream = utils.BytearrayStream(input_stream.read(self.length))\n if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n else:\n raise ValueError('invalid payload missing the unique identifier attribute')\n if self.is_tag_next(enums.Tags.SIGNATURE_DATA, local_stream):\n self._signature_data = primitives.ByteString(tag=enums.Tags.SIGNATURE_DATA)\n self._signature_data.read(local_stream, kmip_version=kmip_version)\n else:\n raise ValueError('invalid payload missing the signature data attribute')", "docstring": "Read the data encoding the Sign response payload and decode it.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the unique_identifier or signature attributes\nare missing from the encoded payload.", "source": "codesearchnet"} {"code": "def get_device_topology(self, id_or_uri):\n \n uri = self._client.build_uri(id_or_uri) + \"/deviceTopology\"\n return self._client.get(uri)", "docstring": "Retrieves the topology information for the rack resource specified by ID or URI.\n\nArgs:\nid_or_uri: Can be either the resource ID or the resource URI.\n\nReturn:\ndict: Device topology.", "source": "juraj-google-style"} {"code": "def convert_config_value(self, value, label):\n \n if isinstance(value, six.string_types):\n value = value.lower()\n\n if value in self.TRUTHY_VALUES:\n return True\n elif value in self.FALSY_VALUES:\n return False\n else:\n raise YapconfValueError(\"Cowardly refusing to interpret \"\n \"config value as a boolean. Name: \"\n \"{0}, Value: {1}\"\n .format(self.name, value))", "docstring": "Converts all 'Truthy' values to True and 'Falsy' values to False.\n\nArgs:\nvalue: Value to convert\nlabel: Label of the config which this item was found.\n\nReturns:", "source": "juraj-google-style"} {"code": "def _assert(cond, ex_type, msg):\n if _is_tensor(cond):\n return [control_flow_assert.Assert(cond, [msg])]\n elif not cond:\n raise ex_type(msg)\n else:\n return []", "docstring": "A polymorphic assert, works with tensors and boolean expressions.\n\nIf `cond` is not a tensor, behave like an ordinary assert statement, except\nthat a empty list is returned. If `cond` is a tensor, return a list\ncontaining a single TensorFlow assert op.\n\nArgs:\ncond: Something evaluates to a boolean value. May be a tensor.\nex_type: The exception class to use.\nmsg: The error message.\n\nReturns:\nA list, containing at most one assert op.", "source": "github-repos"} {"code": "def explain(self, entry):\n d = self.get_explanation_dict(entry)\n print(('The uncorrected value of the energy of %s is %f eV' % (entry.composition, d['uncorrected_energy'])))\n print(('The following corrections / screening are applied for %s:\\n' % d['compatibility']))\n for c in d['corrections']:\n print(('%s correction: %s\\n' % (c['name'], c['description'])))\n print(('For the entry, this correction has the value %f eV.' % c['value']))\n print(('-' * 30))\n print(('The final energy after corrections is %f' % d['corrected_energy']))", "docstring": "Prints an explanation of the corrections that are being applied for a\ngiven compatibility scheme. Inspired by the \"explain\" methods in many\ndatabase methodologies.\n\nArgs:\nentry: A ComputedEntry.", "source": "codesearchnet"} {"code": "def step(self, action):\n if self.done:\n raise ValueError('cannot step in a done environment! call `reset`')\n self.controllers[0][:] = action\n _LIB.Step(self._env)\n reward = self._get_reward()\n self.done = self._get_done()\n info = self._get_info()\n self._did_step(self.done)\n if (reward < self.reward_range[0]):\n reward = self.reward_range[0]\n elif (reward > self.reward_range[1]):\n reward = self.reward_range[1]\n return (self.screen, reward, self.done, info)", "docstring": "Run one frame of the NES and return the relevant observation data.\n\nArgs:\naction (byte): the bitmap determining which buttons to press\n\nReturns:\na tuple of:\n- state (np.ndarray): next frame as a result of the given action\n- reward (float) : amount of reward returned after given action\n- done (boolean): whether the episode has ended\n- info (dict): contains auxiliary diagnostic information", "source": "codesearchnet"} {"code": "def options(self):\n response = self.repo.api.http_request('OPTIONS', self.uri)\n return response.headers", "docstring": "Small method to return headers of an OPTIONS request to self.uri\n\nArgs:\nNone\n\nReturn:\n(dict) response headers from OPTIONS request", "source": "codesearchnet"} {"code": "def valid_checksum(file, hash_file):\n \n bits = 4096\n \n hash_md5 = hashlib.md5()\n with open(file, \"rb\") as f:\n for chunk in iter(lambda: f.read(bits), b\"\"):\n hash_md5.update(chunk)\n \n with open(hash_file) as c:\n for line in c.readlines():\n if line.strip():\n check_list = line.split()\n if file == check_list[1]:\n if check_list[0] == hash_md5.hexdigest():\n return True\n return False", "docstring": "Summary:\nValidate file checksum using md5 hash\nArgs:\nfile: file object to verify integrity\nhash_file: md5 reference checksum file\nReturns:\nValid (True) | False, TYPE: bool", "source": "juraj-google-style"} {"code": "def set_maintainer(self, maintainer):\n if (isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict)):\n if ('id' not in maintainer):\n maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration)\n maintainer = maintainer['id']\n elif (not isinstance(maintainer, str)):\n raise HDXError(('Type %s cannot be added as a maintainer!' % type(maintainer).__name__))\n if (is_valid_uuid(maintainer) is False):\n raise HDXError(('%s is not a valid user id for a maintainer!' % maintainer))\n self.data['maintainer'] = maintainer", "docstring": "Set the dataset's maintainer.\n\nArgs:\nmaintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.\nReturns:\nNone", "source": "codesearchnet"} {"code": "def convert_var_to_const_function_in_v1(func, lower_control_flow=True, aggressive_inlining=False):\n session = ops.get_default_session()\n if session is None:\n raise RuntimeError('The conversion must be carried out in a Session context.')\n converter_data = _FunctionConverterDataInGraph(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining, session=session)\n output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n return _construct_concrete_function(func, output_graph_def, converted_input_indices)", "docstring": "Replaces all the variables in a graph with constants of the same values.\n\nThis function works as same as convert_variables_to_constants_v2, but it\nshould be used in Graph mode. It is a temporary solution when users want to\nintegrate their models written in TF2 with infra that requires TF1 mode.\n\nThe current implementation only works for graphs that do not contain any\ncontrol flow or embedding related ops.\n\nThe function must be called in a Session context.\n\nArgs:\nfunc: ConcreteFunction.\nlower_control_flow: Boolean indicating whether or not to lower control flow\nops such as If and While. (default True)\naggressive_inlining: Boolean indicating whether or not to do aggressive\nfunction inlining (might be unsafe if function has stateful ops, not\nproperly connected to control outputs). (default False)\n\nRaises:\nRuntimeError: If no Session context is present.\n\nReturns:\nConcreteFunction containing a simplified version of the original.", "source": "github-repos"} {"code": "def adapt_logger(logger):\n if isinstance(logger, logging.Logger):\n return logger\n if isinstance(logger, (SimpleLogger, NoOpLogger)):\n return logger.logger\n return logger", "docstring": "Adapt our custom logger.BaseLogger object into a standard logging.Logger object.\n\nAdaptations are:\n- NoOpLogger turns into a logger with a single NullHandler.\n- SimpleLogger turns into a logger with a StreamHandler and level.\n\nArgs:\nlogger: Possibly a logger.BaseLogger, or a standard python logging.Logger.\n\nReturns: a standard python logging.Logger.", "source": "codesearchnet"} {"code": "def match(self, set_a, set_b):\n \n track_step_matches = [[] * len(set_a)]\n\n costs = self.cost_matrix(set_a, set_b)\n valid_costs = np.all(costs < 1, axis=2)\n set_a_matches, set_b_matches = np.where(valid_costs)\n s = 0\n track_pairings = pd.DataFrame(index=np.arange(costs.shape[0]),\n columns=[\"Track\", \"Step\", \"Time\", \"Matched\", \"Pairings\"], dtype=object)\n set_b_info = []\n for trb, track_b in enumerate(set_b):\n for t, time in enumerate(track_b.times):\n set_b_info.append((trb, t))\n set_b_info_arr = np.array(set_b_info, dtype=int)\n for tr, track_a in enumerate(set_a):\n for t, time in enumerate(track_a.times):\n track_pairings.loc[s, [\"Track\", \"Step\", \"Time\"]] = [tr, t, time]\n track_pairings.loc[s, \"Matched\"] = 1 if np.count_nonzero(set_a_matches == s) > 0 else 0\n if track_pairings.loc[s, \"Matched\"] == 1:\n track_pairings.loc[s, \"Pairings\"] = set_b_info_arr[set_b_matches[set_a_matches == s]]\n else:\n track_pairings.loc[s, \"Pairings\"] = np.array([])\n s += 1\n return track_pairings", "docstring": "For each step in each track from set_a, identify all steps in all tracks from set_b that meet all\ncost function criteria\n\nArgs:\nset_a: List of STObjects\nset_b: List of STObjects\n\nReturns:\ntrack_pairings: pandas.DataFrame", "source": "juraj-google-style"} {"code": "def constant() -> Space:\n return Space()", "docstring": "Returns an constant candidate of Choices.\n\nExample::\n\nspec = pg.geno.constant()\n\nReturns:\na constant ``pg.geno.Space`` object.\n\nSee also:\n\n* :func:`pyglove.geno.space`\n* :func:`pyglove.geno.oneof`\n* :func:`pyglove.geno.manyof`\n* :func:`pyglove.geno.floatv`\n* :func:`pyglove.geno.custom`", "source": "github-repos"} {"code": "def write(self, value):\n \n if not isinstance(value, bool):\n raise TypeError(\"Invalid value type, should be bool.\")\n\n \n try:\n if value:\n os.write(self._fd, b\"1\\n\")\n else:\n os.write(self._fd, b\"0\\n\")\n except OSError as e:\n raise GPIOError(e.errno, \"Writing GPIO: \" + e.strerror)\n\n \n try:\n os.lseek(self._fd, 0, os.SEEK_SET)\n except OSError as e:\n raise GPIOError(e.errno, \"Rewinding GPIO: \" + e.strerror)", "docstring": "Set the state of the GPIO to `value`.\n\nArgs:\nvalue (bool): ``True`` for high state, ``False`` for low state.\n\nRaises:\nGPIOError: if an I/O or OS error occurs.\nTypeError: if `value` type is not bool.", "source": "juraj-google-style"} {"code": "def make_innermost_getter(getter):\n \n\n @functools.wraps(getter)\n def _new_getter(kernel_results, *args, **kwargs):\n \n results_stack = []\n while hasattr(kernel_results, 'inner_results'):\n results_stack.append(kernel_results)\n kernel_results = kernel_results.inner_results\n\n return getter(kernel_results, *args, **kwargs)\n\n return _new_getter", "docstring": "Wraps a getter so it applies to the inner-most results in `kernel_results`.\n\nThe wrapped getter unwraps `kernel_results` and returns the return value of\n`getter` called with the first results without an `inner_results` attribute.\n\nArgs:\ngetter: A callable that takes Kernel results and returns some value.\n\nReturns:\nnew_getter: A wrapped `getter`.", "source": "juraj-google-style"} {"code": "def instruments(self, accountID, **kwargs):\n request = Request('GET', '/v3/accounts/{accountID}/instruments')\n request.set_path_param('accountID', accountID)\n request.set_param('instruments', kwargs.get('instruments'))\n response = self.ctx.request(request)\n if (response.content_type is None):\n return response\n if (not response.content_type.startswith('application/json')):\n return response\n jbody = json.loads(response.raw_body)\n parsed_body = {}\n if (str(response.status) == '200'):\n if (jbody.get('instruments') is not None):\n parsed_body['instruments'] = [self.ctx.primitives.Instrument.from_dict(d, self.ctx) for d in jbody.get('instruments')]\n if (jbody.get('lastTransactionID') is not None):\n parsed_body['lastTransactionID'] = jbody.get('lastTransactionID')\n elif (str(response.status) == '400'):\n if (jbody.get('errorCode') is not None):\n parsed_body['errorCode'] = jbody.get('errorCode')\n if (jbody.get('errorMessage') is not None):\n parsed_body['errorMessage'] = jbody.get('errorMessage')\n elif (str(response.status) == '401'):\n if (jbody.get('errorCode') is not None):\n parsed_body['errorCode'] = jbody.get('errorCode')\n if (jbody.get('errorMessage') is not None):\n parsed_body['errorMessage'] = jbody.get('errorMessage')\n elif (str(response.status) == '405'):\n if (jbody.get('errorCode') is not None):\n parsed_body['errorCode'] = jbody.get('errorCode')\n if (jbody.get('errorMessage') is not None):\n parsed_body['errorMessage'] = jbody.get('errorMessage')\n else:\n parsed_body = jbody\n response.body = parsed_body\n return response", "docstring": "Get the list of tradeable instruments for the given Account. The list\nof tradeable instruments is dependent on the regulatory division that\nthe Account is located in, thus should be the same for all Accounts\nowned by a single user.\n\nArgs:\naccountID:\nAccount Identifier\ninstruments:\nList of instruments to query specifically.\n\nReturns:\nv20.response.Response containing the results from submitting the\nrequest", "source": "codesearchnet"} {"code": "def init_from_adversarial_batches_write_to_datastore(self, submissions, adv_batches):\n idx = 0\n for s_id in iterkeys(submissions.defenses):\n for adv_id in iterkeys(adv_batches.data):\n class_batch_id = CLASSIFICATION_BATCH_ID_PATTERN.format(idx)\n idx += 1\n self.data[class_batch_id] = {'adversarial_batch_id': adv_id, 'submission_id': s_id, 'result_path': os.path.join(self._round_name, CLASSIFICATION_BATCHES_SUBDIR, (((s_id + '_') + adv_id) + '.csv'))}\n client = self._datastore_client\n with client.no_transact_batch() as batch:\n for (key, value) in iteritems(self.data):\n entity = client.entity(client.key(KIND_CLASSIFICATION_BATCH, key))\n entity.update(value)\n batch.put(entity)", "docstring": "Populates data from adversarial batches and writes to datastore.\n\nArgs:\nsubmissions: instance of CompetitionSubmissions\nadv_batches: instance of AversarialBatches", "source": "codesearchnet"} {"code": "def export(self, last_checkpoint, output_dir):\n logging.info('Exporting prediction graph to %s', output_dir)\n with tf.Session(graph=tf.Graph()) as sess:\n (inputs, outputs) = self.build_prediction_graph()\n signature_def_map = {'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)}\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n self.restore_from_checkpoint(sess, self.inception_checkpoint_file, last_checkpoint)\n init_op_serving = control_flow_ops.group(variables.local_variables_initializer(), tf.tables_initializer())\n builder = saved_model_builder.SavedModelBuilder(output_dir)\n builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING], signature_def_map=signature_def_map, legacy_init_op=init_op_serving)\n builder.save(False)", "docstring": "Builds a prediction graph and xports the model.\n\nArgs:\nlast_checkpoint: Path to the latest checkpoint file from training.\noutput_dir: Path to the folder to be used to output the model.", "source": "codesearchnet"} {"code": "def read_file(*components, **kwargs):\n must_exist = kwargs.get('must_exist', True)\n if must_exist:\n path = fs.must_exist(*components)\n else:\n path = fs.path(*components)\n try:\n with open(path) as infile:\n return loads(infile.read())\n except ValueError as e:\n raise ValueError(\"malformed JSON file '{path}'. Message from parser: {err}\".format(path=fs.basename(path), err=str(e)))\n except IOError as e:\n if (not must_exist):\n return {}\n else:\n return e", "docstring": "Load a JSON data blob.\n\nArguments:\npath (str): Path to file.\nmust_exist (bool, otional): If False, return empty dict if file does\nnot exist.\n\nReturns:\narray or dict: JSON data.\n\nRaises:\nFile404: If path does not exist, and must_exist is True.\nInvalidFile: If JSON is malformed.", "source": "codesearchnet"} {"code": "def activate(self, experiment_key, user_id, attributes=None):\n if (not self.is_valid):\n self.logger.error(enums.Errors.INVALID_DATAFILE.format('activate'))\n return None\n if (not validator.is_non_empty_string(experiment_key)):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key'))\n return None\n if (not isinstance(user_id, string_types)):\n self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))\n return None\n variation_key = self.get_variation(experiment_key, user_id, attributes)\n if (not variation_key):\n self.logger.info(('Not activating user \"%s\".' % user_id))\n return None\n experiment = self.config.get_experiment_from_key(experiment_key)\n variation = self.config.get_variation_from_key(experiment_key, variation_key)\n self.logger.info(('Activating user \"%s\" in experiment \"%s\".' % (user_id, experiment.key)))\n self._send_impression_event(experiment, variation, user_id, attributes)\n return variation.key", "docstring": "Buckets visitor and sends impression event to Optimizely.\n\nArgs:\nexperiment_key: Experiment which needs to be activated.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.\n\nReturns:\nVariation key representing the variation the user will be bucketed in.\nNone if user is not in experiment or if experiment is not Running.", "source": "codesearchnet"} {"code": "def read(self, file_des, num_bytes):\n \n file_handle = self.filesystem.get_open_file(file_des)\n file_handle.raw_io = True\n return file_handle.read(num_bytes)", "docstring": "Read number of bytes from a file descriptor, returns bytes read.\n\nArgs:\nfile_des: An integer file descriptor for the file object requested.\nnum_bytes: Number of bytes to read from file.\n\nReturns:\nBytes read from file.\n\nRaises:\nOSError: bad file descriptor.\nTypeError: if file descriptor is not an integer.", "source": "juraj-google-style"} {"code": "def report_source_lineage(path):\n FileSystems.get_filesystem(path).report_lineage(path, Lineage.sources())", "docstring": "Report source :class:`~apache_beam.metrics.metric.Lineage`.\n\nArgs:\npath: string path to be reported.", "source": "github-repos"} {"code": "def _get_num_slurm_tasks():\n return int(_get_slurm_var('STEP_NUM_TASKS'))", "docstring": "Returns the number of SLURM tasks of the current job step.\n\nReturns:\nThe number of tasks as an int", "source": "github-repos"} {"code": "def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:\n pred_boxes = self.box_head(image_feats)\n if interpolate_pos_encoding:\n _, num_patches_height, num_patches_width, _ = feature_map.shape\n box_bias = self.compute_box_bias(num_patches_height, num_patches_width)\n else:\n box_bias = self.box_bias\n box_bias = box_bias.to(feature_map.device)\n pred_boxes += box_bias\n pred_boxes = self.sigmoid(pred_boxes)\n return pred_boxes", "docstring": "Args:\nimage_feats:\nFeatures extracted from the image, returned by the `image_text_embedder` method.\nfeature_map:\nA spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.\ninterpolate_pos_encoding:\nWhether to interpolate the pre-trained position encodings.\nReturns:\npred_boxes:\nList of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.", "source": "github-repos"} {"code": "def seek(self, offset, whence=os.SEEK_SET):\n \n self._check_open()\n\n self._buffer.reset()\n self._buffer_future = None\n\n if whence == os.SEEK_SET:\n self._offset = offset\n elif whence == os.SEEK_CUR:\n self._offset += offset\n elif whence == os.SEEK_END:\n self._offset = self._file_size + offset\n else:\n raise ValueError('Whence mode %s is invalid.' % str(whence))\n\n self._offset = min(self._offset, self._file_size)\n self._offset = max(self._offset, 0)\n if self._remaining():\n self._request_next_buffer()", "docstring": "Set the file's current offset.\n\nNote if the new offset is out of bound, it is adjusted to either 0 or EOF.\n\nArgs:\noffset: seek offset as number.\nwhence: seek mode. Supported modes are os.SEEK_SET (absolute seek),\nos.SEEK_CUR (seek relative to the current position), and os.SEEK_END\n(seek relative to the end, offset should be negative).\n\nRaises:\nIOError: When this buffer is closed.\nValueError: When whence is invalid.", "source": "juraj-google-style"} {"code": "def get_events_for_subscription(access_token, subscription_id, start_timestamp):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/eventtypes/management/values?api-version=', INSIGHTS_API, \"&$filter=eventTimestamp ge '\", start_timestamp, \"'\"])\n return do_get(endpoint, access_token)", "docstring": "Get the insights evens for a subsctipion since the specific timestamp.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nstart_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'.\nReturns:\nHTTP response. JSON body of insights events.", "source": "codesearchnet"} {"code": "def device_placement(self):\n if self.framework == 'tf':\n with tf.device('/CPU:0' if self.device == -1 else f'/device:GPU:{self.device}'):\n yield\n elif self.device.type == 'cuda':\n with torch.cuda.device(self.device):\n yield\n elif self.device.type == 'mlu':\n with torch.mlu.device(self.device):\n yield\n elif self.device.type == 'musa':\n with torch.musa.device(self.device):\n yield\n elif self.device.type == 'xpu':\n with torch.xpu.device(self.device):\n yield\n else:\n yield", "docstring": "Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.\n\nReturns:\nContext manager\n\nExamples:\n\n```python\n# Explicitly ask for tensor allocation on CUDA device :0\npipe = pipeline(..., device=0)\nwith pipe.device_placement():\n# Every framework specific tensor allocation will be done on the request device\noutput = pipe(...)\n```", "source": "github-repos"} {"code": "def AddLogFileOptions(self, argument_group):\n \n argument_group.add_argument(\n '--logfile', '--log_file', '--log-file', action='store',\n metavar='FILENAME', dest='log_file', type=str, default='', help=(\n 'Path of the file in which to store log messages, by default '\n 'this file will be named: \"{0:s}-YYYYMMDDThhmmss.log.gz\". Note '\n 'that the file will be gzip compressed if the extension is '\n '\".gz\".').format(self.NAME))", "docstring": "Adds the log file option to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"} {"code": "def is_treshold_reached(self, scraped_request):\n for route in self.__routing_options.routes:\n if re.compile(route).match(scraped_request.url):\n count_key = (str(route) + scraped_request.method)\n if (count_key in self.__routing_count.keys()):\n return (self.__routing_count[count_key] >= self.__routing_options.minimum_threshold)\n return False", "docstring": "Check if similar requests to the given requests have already been crawled X times. Where X is the\nminimum treshold amount from the options.\n\nArgs:\nscraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold.\n\nReturns:\nbool: True if treshold reached, false otherwise.", "source": "codesearchnet"} {"code": "def get_summary_op():\n summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)\n if summary_op is not None:\n if summary_op:\n summary_op = summary_op[0]\n else:\n summary_op = None\n if summary_op is None:\n summary_op = merge_all_summaries()\n if summary_op is not None:\n ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)\n return summary_op", "docstring": "Returns a single Summary op that would run all summaries.\n\nEither existing one from `SUMMARY_OP` collection or merges all existing\nsummaries.\n\nReturns:\nIf no summaries were collected, returns None. Otherwise returns a scalar\n`Tensor` of type `string` containing the serialized `Summary` protocol\nbuffer resulting from the merging.", "source": "github-repos"} {"code": "def AddFilterOptions(self, argument_group):\n \n names = ['artifact_filters', 'date_filters', 'filter_file']\n helpers_manager.ArgumentHelperManager.AddCommandLineArguments(\n argument_group, names=names)\n\n argument_group.add_argument(\n '-x', '--extensions', dest='extensions_string', action='store',\n type=str, metavar='EXTENSIONS', help=(\n 'Filter on file name extensions. This option accepts multiple '\n 'multiple comma separated values e.g. \"csv,docx,pst\".'))\n\n argument_group.add_argument(\n '--names', dest='names_string', action='store',\n type=str, metavar='NAMES', help=(\n 'Filter on file names. This option accepts a comma separated '\n 'string denoting all file names, e.g. -x '\n '\"NTUSER.DAT,UsrClass.dat\".'))\n\n argument_group.add_argument(\n '--signatures', dest='signature_identifiers', action='store',\n type=str, metavar='IDENTIFIERS', help=(\n 'Filter on file format signature identifiers. This option '\n 'accepts multiple comma separated values e.g. \"esedb,lnk\". '\n 'Use \"list\" to show an overview of the supported file format '\n 'signatures.'))", "docstring": "Adds the filter options to the argument group.\n\nArgs:\nargument_group (argparse._ArgumentGroup): argparse argument group.", "source": "juraj-google-style"} {"code": "def start_day_cycle(self, day_length):\n \n if day_length <= 0:\n raise HolodeckException(\"The given day length should be between above 0!\")\n\n self._should_write_to_command_buffer = True\n command_to_send = DayCycleCommand(True)\n command_to_send.set_day_length(day_length)\n self._commands.add_command(command_to_send)", "docstring": "Queue up a day cycle command to start the day cycle. It will be applied when `tick` or `step` is called next.\nThe sky sphere will now update each tick with an updated sun angle as it moves about the sky. The length of a\nday will be roughly equivalent to the number of minutes given.\n\nArgs:\nday_length (int): The number of minutes each day will be.", "source": "juraj-google-style"} {"code": "def learn_q(self, predicted_q_arr, real_q_arr):\n \n \n\n loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr)\n delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr)\n delta_arr = self.__cnn.back_propagation(delta_arr)\n self.__cnn.optimize(self.__learning_rate, 1)\n self.__loss_list.append(loss)", "docstring": "Infernce Q-Value.\n\nArgs:\npredicted_q_arr: `np.ndarray` of predicted Q-Values.\nreal_q_arr: `np.ndarray` of real Q-Values.", "source": "juraj-google-style"} {"code": "def html_serialize(self, attributes, max_length=None):\n doc = ET.Element('span')\n for chunk in self:\n if (chunk.has_cjk() and (not (max_length and (len(chunk.word) > max_length)))):\n ele = ET.Element('span')\n ele.text = chunk.word\n for (key, val) in attributes.items():\n ele.attrib[key] = val\n doc.append(ele)\n elif doc.getchildren():\n if (doc.getchildren()[(- 1)].tail is None):\n doc.getchildren()[(- 1)].tail = chunk.word\n else:\n doc.getchildren()[(- 1)].tail += chunk.word\n elif (doc.text is None):\n doc.text = chunk.word\n else:\n doc.text += chunk.word\n result = ET.tostring(doc, encoding='utf-8').decode('utf-8')\n result = html5lib.serialize(html5lib.parseFragment(result), sanitize=True, quote_attr_values='always')\n return result", "docstring": "Returns concatenated HTML code with SPAN tag.\n\nArgs:\nattributes (dict): A map of name-value pairs for attributes of output\nSPAN tags.\nmax_length (:obj:`int`, optional): Maximum length of span enclosed chunk.\n\nReturns:\nThe organized HTML code. (str)", "source": "codesearchnet"} {"code": "def diff_dictionaries(old_dict, new_dict):\n \n\n old_set = set(old_dict)\n new_set = set(new_dict)\n\n added_set = new_set - old_set\n removed_set = old_set - new_set\n common_set = old_set & new_set\n\n changes = 0\n output = []\n for key in added_set:\n changes += 1\n output.append(DictValue(key, None, new_dict[key]))\n\n for key in removed_set:\n changes += 1\n output.append(DictValue(key, old_dict[key], None))\n\n for key in common_set:\n output.append(DictValue(key, old_dict[key], new_dict[key]))\n if str(old_dict[key]) != str(new_dict[key]):\n changes += 1\n\n output.sort(key=attrgetter(\"key\"))\n return [changes, output]", "docstring": "Diffs two single dimension dictionaries\n\nReturns the number of changes and an unordered list\nexpressing the common entries and changes.\n\nArgs:\nold_dict(dict): old dictionary\nnew_dict(dict): new dictionary\n\nReturns: list()\nint: number of changed records\nlist: [DictValue]", "source": "juraj-google-style"} {"code": "def get_ordered_names(self, features):\n idxs = np.where(np.in1d(self.data.columns.values, np.array(features)))[0]\n return list(self.data.columns[idxs].values)", "docstring": "Given a list of features, returns features in order that they\nappear in database.\n\nArgs:\nfeatures (list): A list or 1D numpy array of named features to\nreturn.\n\nReturns:\nA list of features in order they appear in database.", "source": "codesearchnet"} {"code": "def encode(self, builder: expressions.Builder, select_scalars_as_array: bool=True, use_resource_alias: bool=False) -> str:\n self._use_resource_alias = use_resource_alias\n result = self.visit(builder.node)\n if select_scalars_as_array or _fhir_path_data_types.returns_collection(builder.node.return_type):\n return f'(SELECT COLLECT_LIST({result.sql_alias})\\nFROM {result.to_subquery()}\\nWHERE {result.sql_alias} IS NOT NULL)'\n else:\n return f'{result.to_subquery()}'", "docstring": "Returns a Spark SQL encoding of a FHIRPath expression.\n\nIf select_scalars_as_array is True, the resulting Spark SQL encoding\nalways returns a top-level `COLLECT_LIST`, whose elements are non-`NULL`.\nOtherwise the resulting SQL will attempt to return a scalar when possible\nand only return a `COLLECT_LIST` for actual collections.\n\nArgs:\nbuilder: The FHIR Path builder to encode as a SQL string.\nselect_scalars_as_array: When True, always builds SQL selecting results in\nan array. When False, attempts to build SQL returning scalars where\npossible.\nuse_resource_alias: Determines whether it is necessary to call the\nresource table directly through an alias.\n\nReturns:\nA Spark SQL representation of the provided FHIRPath expression.", "source": "github-repos"} {"code": "def _checkResponseByteCount(payload):\n \n POSITION_FOR_GIVEN_NUMBER = 0\n NUMBER_OF_BYTES_TO_SKIP = 1\n\n _checkString(payload, minlength=1, description='payload')\n\n givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])\n countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP\n\n if givenNumberOfDatabytes != countedNumberOfDatabytes:\n errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \\\n ' The data payload is: {3!r}'\n errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)\n raise ValueError(errortext)", "docstring": "Check that the number of bytes as given in the response is correct.\n\nThe first byte in the payload indicates the length of the payload (first byte not counted).\n\nArgs:\npayload (string): The payload\n\nRaises:\nTypeError, ValueError", "source": "juraj-google-style"} {"code": "def update(self, membershipId, isModerator=None, **request_parameters):\n check_type(membershipId, basestring, may_be_none=False)\n check_type(isModerator, bool)\n put_data = dict_from_items_with_values(request_parameters, isModerator=isModerator)\n json_data = self._session.put(((API_ENDPOINT + '/') + membershipId), json=put_data)\n return self._object_factory(OBJECT_TYPE, json_data)", "docstring": "Update a team membership, by ID.\n\nArgs:\nmembershipId(basestring): The team membership ID.\nisModerator(bool): Set to True to make the person a team moderator.\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nTeamMembership: A TeamMembership object with the updated Webex\nTeams team-membership details.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "codesearchnet"} {"code": "def from_state(cls, state, alg):\n return cls(alg=alg, state=state)", "docstring": "Creates a generator from a state.\n\nSee `__init__` for description of `state` and `alg`.\n\nArgs:\nstate: the new state.\nalg: the RNG algorithm.\n\nReturns:\nThe new generator.", "source": "github-repos"} {"code": "def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):\n \n \n iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)\n if iso3 is not None:\n return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)\n return None", "docstring": "Get country name from ISO2 code\n\nArgs:\niso2 (str): ISO2 code for which to get country information\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[Dict[str]]: Country information", "source": "juraj-google-style"} {"code": "def start_centroid_distance(item_a, item_b, max_value):\n \n start_a = item_a.center_of_mass(item_a.times[0])\n start_b = item_b.center_of_mass(item_b.times[0])\n start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2)\n return np.minimum(start_distance, max_value) / float(max_value)", "docstring": "Distance between the centroids of the first step in each object.\n\nArgs:\nitem_a: STObject from the first set in TrackMatcher\nitem_b: STObject from the second set in TrackMatcher\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"} {"code": "def is_profile_supported(self, conformance_clause, authentication_suite):\n return (self.is_conformance_clause_supported(conformance_clause) and self.is_authentication_suite_supported(authentication_suite))", "docstring": "Check if a profile is supported by the client.\n\nArgs:\nconformance_clause (ConformanceClause):\nauthentication_suite (AuthenticationSuite):\n\nReturns:\nbool: True if the profile is supported, False otherwise.\n\nExample:\n>>> client.is_profile_supported(\n... ConformanceClause.DISCOVER_VERSIONS,\n... AuthenticationSuite.BASIC)\nTrue", "source": "codesearchnet"} {"code": "def execute(self, shell = True):\n\t\t\n\t\tprocess = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell)\n\t\tself.output, self.errors = process.communicate()", "docstring": "Executes the command setted into class\n\nArgs:\nshell (boolean): Set True if command is a shell command. Default: True", "source": "juraj-google-style"} {"code": "def __init__(self, pya: pyaudio.PyAudio, substream_name: str='realtime', audio_format: AudioFormats=pyaudio.paInt16, channels: int=1, rate: int=24000):\n self._pya = pya\n self._format = audio_format\n self._channels = channels\n self._rate = rate\n self._substream_name = substream_name", "docstring": "Initializes the audio input processor.\n\nArgs:\npya: The pyaudio object to use for capturing audio.\nsubstream_name: The name of the substream that will contain all the audio\nparts captured from the mic.\naudio_format: The audio format to use for the audio.\nchannels: The number of channels in the audio.\nrate: The sample rate of the audio.", "source": "github-repos"} {"code": "def raise_option_error(parser, option, msg):\n \n msg = '{} error: {}'.format(option, msg)\n msg = textwrap.fill(' '.join(msg.split()))\n parser.error(msg)", "docstring": "Raise an option parsing error using parser.error().\n\nArgs:\nparser: an OptionParser instance.\noption: an Option instance.\nmsg: the error text.", "source": "juraj-google-style"} {"code": "def _avro_schema(read_session):\n json_schema = json.loads(read_session.avro_schema.schema)\n column_names = tuple((field['name'] for field in json_schema['fields']))\n return (fastavro.parse_schema(json_schema), column_names)", "docstring": "Extract and parse Avro schema from a read session.\n\nArgs:\nread_session ( \\\n~google.cloud.bigquery_storage_v1beta1.types.ReadSession \\\n):\nThe read session associated with this read rows stream. This\ncontains the schema, which is required to parse the data\nblocks.\n\nReturns:\nTuple[fastavro.schema, Tuple[str]]:\nA parsed Avro schema, using :func:`fastavro.schema.parse_schema`\nand the column names for a read session.", "source": "codesearchnet"} {"code": "def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):\n assert _is_pil_image(img), 'img should be PIL Image'\n img = crop(img, i, j, h, w)\n img = resize(img, size, interpolation)\n return img", "docstring": "Crop the given PIL Image and resize it to desired size.\n\nNotably used in :class:`~torchvision.transforms.RandomResizedCrop`.\n\nArgs:\nimg (PIL Image): Image to be cropped.\ni (int): i in (i,j) i.e coordinates of the upper left corner\nj (int): j in (i,j) i.e coordinates of the upper left corner\nh (int): Height of the cropped image.\nw (int): Width of the cropped image.\nsize (sequence or int): Desired output size. Same semantics as ``resize``.\ninterpolation (int, optional): Desired interpolation. Default is\n``PIL.Image.BILINEAR``.\nReturns:\nPIL Image: Cropped image.", "source": "codesearchnet"} {"code": "def check_connection(host='localhost', port=27017, username=None, password=None, authdb=None, max_delay=1):\n if (username and password):\n uri = 'mongodb:\n log_uri = 'mongodb:\n else:\n log_uri = uri = ('mongodb:\n LOG.info('Test connection with uri: %s', log_uri)\n client = MongoClient(uri, serverSelectionTimeoutMS=max_delay)\n try:\n client.server_info()\n except (ServerSelectionTimeoutError, OperationFailure) as err:\n LOG.warning(err)\n return False\n return True", "docstring": "Check if a connection could be made to the mongo process specified\n\nArgs:\nhost(str)\nport(int)\nusername(str)\npassword(str)\nauthdb (str): database to to for authentication\nmax_delay(int): Number of milliseconds to wait for connection\n\nReturns:\nbool: If connection could be established", "source": "codesearchnet"} {"code": "def frozen_saver(root_trackable):\n named_saveable_objects, registered_savers = save_util_v1.frozen_saveables_and_savers(graph_view_lib.ObjectGraphView(root_trackable))\n return functional_saver.MultiDeviceSaver.from_saveables(named_saveable_objects, registered_savers)", "docstring": "Creates a static `tf.compat.v1.train.Saver` from a trackable object.\n\nThe returned `Saver` saves object-based checkpoints, but these checkpoints\nwill no longer reflect structural changes to the object graph, only changes to\nthe values of `Variable`s added as dependencies of the root object before\n`freeze` was called.\n\n`restore` works on the returned `Saver`, but requires that the object graph of\nthe checkpoint being loaded exactly matches the object graph when `freeze` was\ncalled. This is in contrast the object-based restore performed by\n`tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's\nobject graph and the current Python object graph.\n\nArgs:\nroot_trackable: A trackable object to save.\n\nReturns:\nA saver which saves object-based checkpoints for the object graph frozen at\nthe time `frozen_saver` was called.", "source": "github-repos"} {"code": "def learn(self, grad_arr, fix_opt_flag=False):\n \n if grad_arr.ndim != 2:\n grad_arr = grad_arr.reshape((grad_arr.shape[0], -1))\n delta_arr = self.__nn.back_propagation(grad_arr)\n if fix_opt_flag is False:\n self.__nn.optimize(self.__learning_rate, 1)\n \n return delta_arr", "docstring": "Update this Discriminator by ascending its stochastic gradient.\n\nArgs:\ngrad_arr: `np.ndarray` of gradients.\nfix_opt_flag: If `False`, no optimization in this model will be done.\n\nReturns:\n`np.ndarray` of delta or gradients.", "source": "juraj-google-style"} {"code": "def parse_delta(__string: str) -> datetime.timedelta:\n \n if not __string:\n return datetime.timedelta(0)\n match = re.fullmatch(r, __string, re.VERBOSE)\n if not match:\n raise ValueError('Unable to parse delta {!r}'.format(__string))\n match_dict = {k: int(v) if v else 0 for k, v in match.groupdict().items()}\n return datetime.timedelta(**match_dict)", "docstring": "Parse ISO-8601 duration string.\n\nArgs:\n__string: Duration string to parse\nReturns:\nParsed delta object", "source": "juraj-google-style"} {"code": "def _add_np_doc(doc, np_fun_name, np_f, link):\n flag = get_np_doc_form()\n if flag == 'inlined':\n if _has_docstring(np_f):\n doc += 'Documentation for `numpy.%s`:\\n\\n' % np_fun_name\n doc += np_f.__doc__.replace('>>>', '>')\n elif isinstance(flag, str):\n if link is None:\n url = generate_link(flag, np_fun_name)\n elif isinstance(link, AliasOf):\n url = generate_link(flag, link.value)\n elif isinstance(link, Link):\n url = link.value\n else:\n url = None\n if url is not None:\n if is_check_link():\n import requests\n r = requests.head(url)\n if r.status_code != 200:\n raise ValueError(f'Check link failed at [{url}] with status code {r.status_code}. Argument `np_fun_name` is {np_fun_name}.')\n doc += 'See the NumPy documentation for [`numpy.%s`](%s).' % (np_fun_name, url)\n return doc", "docstring": "Appends the numpy docstring to `doc`, according to `set_np_doc_form`.\n\nSee `set_np_doc_form` for how it controls the form of the numpy docstring.\n\nArgs:\ndoc: the docstring to be appended to.\nnp_fun_name: the name of the numpy function.\nnp_f: (optional) the numpy function.\nlink: (optional) which link to use. See `np_doc` for details.\n\nReturns:\n`doc` with numpy docstring appended.", "source": "github-repos"} {"code": "def finish_operation(self, conn_or_internal_id, success, *args):\n \n\n data = {\n 'id': conn_or_internal_id,\n 'success': success,\n 'callback_args': args\n }\n\n action = ConnectionAction('finish_operation', data, sync=False)\n self._actions.put(action)", "docstring": "Finish an operation on a connection.\n\nArgs:\nconn_or_internal_id (string, int): Either an integer connection id or a string\ninternal_id\nsuccess (bool): Whether the operation was successful\nfailure_reason (string): Optional reason why the operation failed\nresult (dict): Optional dictionary containing the results of the operation", "source": "juraj-google-style"} {"code": "def clone(self, name=None):\n if (name is None):\n name = (self.module_name + '_clone')\n return type(self)(output_channels=self.output_channels, kernel_shape=self._kernel_shape, stride=self._stride, rate=self._rate, padding=self._padding, use_bias=self._use_bias, initializers=self._initializers, partitioners=self._partitioners, regularizers=self._regularizers, mask=self._mask, data_format=self._data_format, custom_getter=self._custom_getter, name=name)", "docstring": "Returns a cloned `_ConvND` module.\n\nArgs:\nname: Optional string assigning name of cloned module. The default name\nis constructed by appending \"_clone\" to `self.module_name`.\n\nReturns:\nA copy of the current class.", "source": "codesearchnet"} {"code": "def get_model_from_layers(model_layers, input_shape=None, input_dtype=None, name=None, input_ragged=None, input_sparse=None, model_type=None):\n if model_type is None:\n model_type = get_model_type()\n if model_type == 'subclass':\n inputs = None\n if input_ragged or input_sparse:\n inputs = layers.Input(shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse)\n return _SubclassModel(model_layers, name=name, input_tensor=inputs)\n if model_type == 'subclass_custom_build':\n layer_generating_func = lambda: model_layers\n return _SubclassModelCustomBuild(layer_generating_func, name=name)\n if model_type == 'sequential':\n model = models.Sequential(name=name)\n if input_shape:\n model.add(layers.InputLayer(input_shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse))\n for layer in model_layers:\n model.add(layer)\n return model\n if model_type == 'functional':\n if not input_shape:\n raise ValueError('Cannot create a functional model from layers with no input shape.')\n inputs = layers.Input(shape=input_shape, dtype=input_dtype, ragged=input_ragged, sparse=input_sparse)\n outputs = inputs\n for layer in model_layers:\n outputs = layer(outputs)\n return models.Model(inputs, outputs, name=name)\n raise ValueError('Unknown model type {}'.format(model_type))", "docstring": "Builds a model from a sequence of layers.\n\nArgs:\nmodel_layers: The layers used to build the network.\ninput_shape: Shape tuple of the input or 'TensorShape' instance.\ninput_dtype: Datatype of the input.\nname: Name for the model.\ninput_ragged: Boolean, whether the input data is a ragged tensor.\ninput_sparse: Boolean, whether the input data is a sparse tensor.\nmodel_type: One of \"subclass\", \"subclass_custom_build\", \"sequential\", or\n\"functional\". When None, defaults to `get_model_type`.\n\nReturns:\nA Keras model.", "source": "github-repos"} {"code": "def get_preparer(mixed: Union[SQLCompiler, Engine,\n Dialect]) -> IdentifierPreparer:\n \n dialect = get_dialect(mixed)\n \n return dialect.preparer(dialect)", "docstring": "Returns the SQLAlchemy :class:`IdentifierPreparer` in use for the dialect\nbeing used.\n\nArgs:\nmixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or\n:class:`Dialect` object\n\nReturns: an :class:`IdentifierPreparer`", "source": "juraj-google-style"} {"code": "def with_rank(self, rank):\n \n try:\n return self.merge_with(unknown_shape(ndims=rank))\n except ValueError:\n raise ValueError(\"Shape %s must have rank %d\" % (self, rank))", "docstring": "Returns a shape based on `self` with the given rank.\n\nThis method promotes a completely unknown shape to one with a\nknown rank.\n\nArgs:\nrank: An integer.\n\nReturns:\nA shape that is at least as specific as `self` with the given rank.\n\nRaises:\nValueError: If `self` does not represent a shape with the given `rank`.", "source": "juraj-google-style"} {"code": "def set_userdata(self, key: str, value: Any) -> None:\n self._userdata[key] = value", "docstring": "Sets user data.\n\nUser data can be used for storing state associated with the DNASpec, and\nis not persisted across processes or during serialization. Use `hints` to\ncarry persistent objects for the DNASpec.\n\nArgs:\nkey: Key of the user data.\nvalue: Value of the user data.", "source": "github-repos"} {"code": "def write_config_file(self, parsed_namespace, output_file_paths, exit_after=False):\n \n for output_file_path in output_file_paths:\n \n try:\n with open(output_file_path, \"w\") as output_file:\n pass\n except IOError as e:\n raise ValueError(\"Couldn't open %s for writing: %s\" % (\n output_file_path, e))\n if output_file_paths:\n \n config_items = self.get_items_for_config_file_output(\n self._source_to_settings, parsed_namespace)\n file_contents = self._config_file_parser.serialize(config_items)\n for output_file_path in output_file_paths:\n with open(output_file_path, \"w\") as output_file:\n output_file.write(file_contents)\n message = \"Wrote config file to \" + \", \".join(output_file_paths)\n if exit_after:\n self.exit(0, message)\n else:\n print(message)", "docstring": "Write the given settings to output files.\n\nArgs:\nparsed_namespace: namespace object created within parse_known_args()\noutput_file_paths: any number of file paths to write the config to\nexit_after: whether to exit the program after writing the config files", "source": "juraj-google-style"} {"code": "def mock(self, url=None, **kw):\n \n \n if kw.get('activate'):\n kw.pop('activate')\n self.activate()\n\n \n mock = Mock(url=url, **kw)\n \n mock._engine = self\n \n self.add_mock(mock)\n\n \n return mock", "docstring": "Creates and registers a new HTTP mock in the current engine.\n\nArguments:\nurl (str): request URL to mock.\nactivate (bool): force mock engine activation.\nDefaults to ``False``.\n**kw (mixed): variadic keyword arguments for ``Mock`` constructor.\n\nReturns:\npook.Mock: new mock instance.", "source": "juraj-google-style"} {"code": "def to_bqstorage(self):\n if (bigquery_storage_v1beta1 is None):\n raise ValueError(_NO_BQSTORAGE_ERROR)\n table_ref = bigquery_storage_v1beta1.types.TableReference()\n table_ref.project_id = self._project\n table_ref.dataset_id = self._dataset_id\n table_id = self._table_id\n if ('@' in table_id):\n table_id = table_id.split('@')[0]\n if ('$' in table_id):\n table_id = table_id.split('$')[0]\n table_ref.table_id = table_id\n return table_ref", "docstring": "Construct a BigQuery Storage API representation of this table.\n\nInstall the ``google-cloud-bigquery-storage`` package to use this\nfeature.\n\nIf the ``table_id`` contains a partition identifier (e.g.\n``my_table$201812``) or a snapshot identifier (e.g.\n``mytable@1234567890``), it is ignored. Use\n:class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions`\nto filter rows by partition. Use\n:class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers`\nto select a specific snapshot to read from.\n\nReturns:\ngoogle.cloud.bigquery_storage_v1beta1.types.TableReference:\nA reference to this table in the BigQuery Storage API.\n\nRaises:\nValueError:\nIf the :mod:`google.cloud.bigquery_storage_v1beta1` module\ncannot be imported.", "source": "codesearchnet"} {"code": "def __init__(self, proginfo, directory='.'):\n \n self.info = proginfo\n self.dirname = directory", "docstring": "Create base object.\nArgs:\nproginfo A ProgramInfo object\ndirectory Directory to write output into", "source": "juraj-google-style"} {"code": "def _IsFlag(argument):\n return _IsSingleCharFlag(argument) or _IsMultiCharFlag(argument)", "docstring": "Determines if the argument is a flag argument.\n\nIf it starts with a hyphen and isn't a negative number, it's a flag.\n\nArgs:\nargument: A command line argument that may or may not be a flag.\nReturns:\nA boolean indicating whether the argument is a flag.", "source": "github-repos"} {"code": "def add_genstrings_comments_to_file(localization_file, genstrings_err):\n \n\n errors_to_log = [line for line in genstrings_err.splitlines() if \"used with multiple comments\" not in line]\n\n if len(errors_to_log) > 0:\n logging.warning(\"genstrings warnings:\\n%s\", \"\\n\".join(errors_to_log))\n\n loc_file = open_strings_file(localization_file, \"a\")\n\n regex_matches = re.findall(r'Warning: Key \"(.*?)\" used with multiple comments (\"[^\"]*\" (& \"[^\"]*\")+)',\n genstrings_err)\n\n logging.info(\"Adding multiple comments from genstrings output\")\n for regex_match in regex_matches:\n if len(regex_match) == 3:\n key = regex_match[0]\n comments = [comment.strip()[1:-1] for comment in regex_match[1].split(\"&\")]\n\n logging.info(\"Found key with %d comments: %s\", len(comments), key)\n\n loc_key = LocalizationEntry(comments, key, key)\n\n loc_file.write(unicode(loc_key))\n loc_file.write(u\"\\n\")\n\n loc_file.close()", "docstring": "Adds the comments produced by the genstrings script for duplicate keys.\n\nArgs:\nlocalization_file (str): The path to the strings file.", "source": "juraj-google-style"} {"code": "def insert_json(table=None,\n bulk_size=1000,\n concurrency=25,\n hosts=None,\n output_fmt=None):\n \n if not hosts:\n return print_only(table)\n\n queries = (to_insert(table, d) for d in dicts_from_stdin())\n bulk_queries = as_bulk_queries(queries, bulk_size)\n print('Executing inserts: bulk_size={} concurrency={}'.format(\n bulk_size, concurrency), file=sys.stderr)\n\n stats = Stats()\n with clients.client(hosts, concurrency=concurrency) as client:\n f = partial(aio.measure, stats, client.execute_many)\n try:\n aio.run_many(f, bulk_queries, concurrency)\n except clients.SqlException as e:\n raise SystemExit(str(e))\n try:\n print(format_stats(stats.get(), output_fmt))\n except KeyError:\n if not stats.sampler.values:\n raise SystemExit('No data received via stdin')\n raise", "docstring": "Insert JSON lines fed into stdin into a Crate cluster.\n\nIf no hosts are specified the statements will be printed.\n\nArgs:\ntable: Target table name.\nbulk_size: Bulk size of the insert statements.\nconcurrency: Number of operations to run concurrently.\nhosts: hostname:port pairs of the Crate nodes", "source": "juraj-google-style"} {"code": "def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> List[Dict[str, TensorType]]:\n requires_backends(self, 'torch')\n predicted_depth = outputs.predicted_depth\n if target_sizes is not None and len(predicted_depth) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth')\n results = []\n target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes\n for depth, target_size in zip(predicted_depth, target_sizes):\n if target_size is not None:\n depth = depth[None, None, ...]\n depth = torch.nn.functional.interpolate(depth, size=target_size, mode='bicubic', align_corners=False)\n depth = depth.squeeze()\n results.append({'predicted_depth': depth})\n return results", "docstring": "Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.\nOnly supports PyTorch.\n\nArgs:\noutputs ([`DepthEstimatorOutput`]):\nRaw outputs of the model.\ntarget_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\n\nReturns:\n`List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth\npredictions.", "source": "github-repos"} {"code": "def _Insert(cursor, table, values):\n \n precondition.AssertIterableType(values, dict)\n\n if not values: \n return\n\n column_names = list(sorted(values[0]))\n for value_dict in values:\n if set(column_names) != set(value_dict):\n raise ValueError(\"Given value dictionaries must have identical keys. \"\n \"Expecting columns {!r}, but got value {!r}\".format(\n column_names, value_dict))\n\n query = \"INSERT IGNORE INTO %s {cols} VALUES {vals}\" % table\n query = query.format(\n cols=mysql_utils.Columns(column_names),\n vals=mysql_utils.Placeholders(num=len(column_names), values=len(values)))\n\n values_list = []\n for values_dict in values:\n values_list.extend(values_dict[column] for column in column_names)\n\n cursor.execute(query, values_list)", "docstring": "Inserts one or multiple rows into the given table.\n\nArgs:\ncursor: The MySQL cursor to perform the insertion.\ntable: The table name, where rows should be inserted.\nvalues: A list of dicts, associating column names to values.", "source": "juraj-google-style"} {"code": "def get_channel(self, **kwargs):\n if self.compatibility_mode:\n if hasattr(self.chef_module, 'get_channel'):\n config.LOGGER.info('Calling get_channel... ')\n channel = self.chef_module.get_channel(**kwargs)\n if hasattr(self.chef_module, 'create_channel'):\n config.LOGGER.info('Calling create_channel... ')\n channel = self.chef_module.create_channel(**kwargs)\n else:\n channel = None\n return channel\n elif hasattr(self, 'channel_info'):\n channel = ChannelNode(source_domain=self.channel_info['CHANNEL_SOURCE_DOMAIN'], source_id=self.channel_info['CHANNEL_SOURCE_ID'], title=self.channel_info['CHANNEL_TITLE'], thumbnail=self.channel_info.get('CHANNEL_THUMBNAIL'), language=self.channel_info.get('CHANNEL_LANGUAGE'), description=self.channel_info.get('CHANNEL_DESCRIPTION'))\n return channel\n else:\n raise NotImplementedError('BaseChef must overrride the get_channel method')", "docstring": "Call chef script's get_channel method in compatibility mode\n...or...\nCreate a `ChannelNode` from the Chef's `channel_info` class attribute.\n\nArgs:\nkwargs (dict): additional keyword arguments that `uploadchannel` received\nReturns: channel created from get_channel method or None", "source": "codesearchnet"} {"code": "def load_sgf(self, sgf_data):\n if ('\\n' not in sgf_data):\n with open(sgf_data, 'r') as infile:\n sgf_data = infile.read()\n model = DeviceModel()\n parser = SensorGraphFileParser()\n parser.parse_file(data=sgf_data)\n parser.compile(model)\n opt = SensorGraphOptimizer()\n opt.optimize(parser.sensor_graph, model=model)\n sensor_graph = parser.sensor_graph\n self._logger.info('Loading sensor_graph with %d nodes, %d streamers and %d configs', len(sensor_graph.nodes), len(sensor_graph.streamers), len(sensor_graph.config_database))\n self.sensor_graph.persisted_nodes = sensor_graph.dump_nodes()\n self.sensor_graph.persisted_streamers = sensor_graph.dump_streamers()\n self.sensor_graph.persisted_constants = []\n for (stream, value) in sorted(sensor_graph.constant_database.items(), key=(lambda x: x[0].encode())):\n reading = IOTileReading(stream.encode(), 0, value)\n self.sensor_graph.persisted_constants.append((stream, reading))\n self.sensor_graph.persisted_exists = True\n self.config_database.clear()\n for slot in sorted(sensor_graph.config_database, key=(lambda x: x.encode())):\n for (conf_var, (conf_type, conf_val)) in sorted(sensor_graph.config_database[slot].items()):\n self.config_database.add_direct(slot, conf_var, conf_type, conf_val)\n app_tag = sensor_graph.metadata_database.get('app_tag')\n app_version = sensor_graph.metadata_database.get('app_version')\n if (app_tag is not None):\n if (app_version is None):\n app_version = '0.0'\n self.app_info = (app_tag, app_version)", "docstring": "Load, persist a sensor_graph file.\n\nThe data passed in `sgf_data` can either be a path or the already\nloaded sgf lines as a string. It is determined to be sgf lines if\nthere is a '\\n' character in the data, otherwise it is interpreted as\na path.\n\nNote that this scenario just loads the sensor_graph directly into the\npersisted sensor_graph inside the device. You will still need to\nreset the device for the sensor_graph to enabled and run.\n\nArgs:\nsgf_data (str): Either the path to an sgf file or its contents\nas a string.", "source": "codesearchnet"} {"code": "def to_df(self, varnames=None, ranefs=False, transformed=False, chains=None):\n names = self._filter_names(varnames, ranefs, transformed)\n if (chains is None):\n chains = list(range(self.n_chains))\n chains = listify(chains)\n data = [self.data[(:, i, :)] for i in chains]\n data = np.concatenate(data, axis=0)\n df = sum([self.level_dict[x] for x in names], [])\n df = pd.DataFrame({x: data[(:, self.levels.index(x))] for x in df})\n return df", "docstring": "Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains\nconcatenated.\n\nArgs:\nvarnames (list): List of variable names to include; if None\n(default), all eligible variables are included.\nranefs (bool): Whether or not to include random effects in the\nreturned DataFrame. Default is True.\ntransformed (bool): Whether or not to include internally\ntransformed variables in the result. Default is False.\nchains (int, list): Index, or list of indexes, of chains to\nconcatenate. E.g., [1, 3] would concatenate the first and\nthird chains, and ignore any others. If None (default),\nconcatenates all available chains.", "source": "codesearchnet"} {"code": "def flush_redis_unsafe(redis_client=None):\n if (redis_client is None):\n ray.worker.global_worker.check_connected()\n redis_client = ray.worker.global_worker.redis_client\n keys = redis_client.keys('LOGFILE:*')\n if (len(keys) > 0):\n num_deleted = redis_client.delete(*keys)\n else:\n num_deleted = 0\n print('Deleted {} log files from Redis.'.format(num_deleted))\n keys = redis_client.keys('event_log:*')\n if (len(keys) > 0):\n num_deleted = redis_client.delete(*keys)\n else:\n num_deleted = 0\n print('Deleted {} event logs from Redis.'.format(num_deleted))", "docstring": "This removes some non-critical state from the primary Redis shard.\n\nThis removes the log files as well as the event log from Redis. This can\nbe used to try to address out-of-memory errors caused by the accumulation\nof metadata in Redis. However, it will only partially address the issue as\nmuch of the data is in the task table (and object table), which are not\nflushed.\n\nArgs:\nredis_client: optional, if not provided then ray.init() must have been\ncalled.", "source": "codesearchnet"} {"code": "def service_headline(self, short_name):\n \n\n if short_name not in self.services:\n raise ArgumentError(\"Unknown service name\", short_name=short_name)\n\n return self.services[short_name]['state'].headline", "docstring": "Get the headline stored for a service.\n\nArgs:\nshort_name (string): The short name of the service to get messages for\n\nReturns:\nServiceMessage: the headline or None if there is no headline", "source": "juraj-google-style"} {"code": "def experimental_distribute_dataset(self, dataset, options=None):\n if options and options.experimental_replication_moden == distribute_lib.InputReplicationMode.PER_REPLICA:\n raise NotImplementedError('InputReplicationMode.PER_REPLICA is only supported in `experimental_distribute_datasets_from_function`.')\n return super(CentralStorageStrategy, self).experimental_distribute_dataset(dataset, options)", "docstring": "Distributes a tf.data.Dataset instance provided via dataset.\n\nThe returned dataset is a wrapped strategy dataset which creates a\nmultidevice iterator under the hood. It prefetches the input data to the\nspecified devices on the worker. The returned distributed dataset can be\niterated over similar to how regular datasets can.\n\nNOTE: Currently, the user cannot add any more transformations to a\ndistributed dataset.\n\nFor Example:\n```\nstrategy = tf.distribute.CentralStorageStrategy() # with 1 CPU and 1 GPU\ndataset = tf.data.Dataset.range(10).batch(2)\ndist_dataset = strategy.experimental_distribute_dataset(dataset)\nfor x in dist_dataset:\nprint(x) # Prints PerReplica values [0, 1], [2, 3],...\n\n```\nArgs:\ndataset: `tf.data.Dataset` to be prefetched to device.\noptions: `tf.distribute.InputOptions` used to control options on how this\ndataset is distributed.\n\nReturns:\nA \"distributed `Dataset`\" that the caller can iterate over.", "source": "github-repos"} {"code": "def add_message_to_extension(msg: message.Message, extension: message.Message) -> None:\n if not fhir_types.is_profile_of_extension(msg):\n raise ValueError(f'Message: {msg.DESCRIPTOR.full_name} is not a valid FHIR Extension profile.')\n if not fhir_types.is_extension(extension):\n raise ValueError(f'Extension: {extension.DESCRIPTOR.full_name} is not a valid FHIR Extension.')\n cast(Any, extension).url.value = annotation_utils.get_structure_definition_url(msg.DESCRIPTOR)\n if proto_utils.field_is_set(msg, 'id'):\n proto_utils.copy_common_field(msg, extension, 'id')\n value_fields = [field for field in msg.DESCRIPTOR.fields if field.name not in NON_VALUE_FIELDS]\n if not value_fields:\n raise ValueError(f'Extension has no value fields: {msg.DESCRIPTOR.name}.')\n if len(value_fields) == 1 and (not proto_utils.field_is_repeated(value_fields[0])):\n value_field = value_fields[0]\n _verify_field_is_proto_message_type(value_field)\n if proto_utils.field_is_set(msg, value_field):\n value = proto_utils.get_value_at_field(msg, value_field)\n _add_value_to_extension(value, extension, annotation_utils.is_choice_type_field(value_field))\n else:\n pass\n else:\n _add_fields_to_extension(msg, extension)", "docstring": "Adds the contents of msg to extension.\n\nArgs:\nmsg: A FHIR profile of Extension, whose contents should be added to the\ngeneric extension.\nextension: The generic Extension to populate.", "source": "github-repos"} {"code": "def num_accelerators(self, task_type=None, task_id=None, config_proto=None):\n if self._tpu == 'local':\n return {'TPU': len([d for d in framework_config.list_logical_devices() if d.device_type == 'TPU'])}\n retry_count = 1\n while True:\n try:\n device_details = TPUClusterResolver._get_device_dict_and_cores(cluster_resolver_lib.get_accelerator_devices(self.master(), config_proto=config_proto))\n break\n except errors.DeadlineExceededError:\n error_message = 'Failed to connect to master. The TPU might not be ready (e.g. still scheduling) or the master address is incorrect: got (%s)' % self.master()\n if retry_count <= _TPU_CONN_RETRIES:\n logging.warning(error_message)\n logging.warning('Retrying (%d/%d)...', retry_count, _TPU_CONN_RETRIES)\n retry_count += 1\n else:\n raise RuntimeError(error_message)\n if device_details.total_cores:\n return {'TPU': TPUClusterResolver._verify_and_return_same_core_count(device_details.device_map)}\n return {'TPU': 0}", "docstring": "Returns the number of TPU cores per worker.\n\nConnects to the master and list all the devices present in the master,\nand counts them up. Also verifies that the device counts per host in the\ncluster is the same before returning the number of TPU cores per host.\n\nArgs:\ntask_type: Unused.\ntask_id: Unused.\nconfig_proto: Used to create a connection to a TPU master in order to\nretrieve the system metadata.\n\nRaises:\nRuntimeError: If we cannot talk to a TPU worker after retrying or if the\nnumber of TPU devices per host is different.", "source": "github-repos"} {"code": "def cli_cmd_to_string(args):\n if isinstance(args, str):\n return args\n return ' '.join([shlex.quote(arg) for arg in args])", "docstring": "Converts a cmd arg list to string.\n\nArgs:\nargs: list of strings, the arguments of a command.\n\nReturns:\nString representation of the command.", "source": "github-repos"} {"code": "def ParseFileObject(self, parser_mediator, file_object):\n \n try:\n self._ParseFileHeader(file_object)\n except errors.ParseError as exception:\n raise errors.ParseError(\n 'Unable to parse index file header with error: {0!s}'.format(\n exception))\n \n file_object.seek(112, os.SEEK_CUR)\n self._ParseIndexTable(file_object)", "docstring": "Parses a file-like object.\n\nArgs:\nparser_mediator (ParserMediator): a parser mediator.\nfile_object (dfvfs.FileIO): a file-like object to parse.\n\nRaises:\nParseError: when the file cannot be parsed.", "source": "juraj-google-style"} {"code": "def consult_robots_txt(self, request: HTTPRequest) -> bool:\n if (not self._robots_txt_checker):\n return True\n result = (yield from self._robots_txt_checker.can_fetch(request))\n return result", "docstring": "Consult by fetching robots.txt as needed.\n\nArgs:\nrequest: The request to be made\nto get the file.\n\nReturns:\nTrue if can fetch\n\nCoroutine", "source": "codesearchnet"} {"code": "def __init__(self, value):\n if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n self._value = value", "docstring": "Constructor for `RegressionOutput`.\n\nArgs:\nvalue: a float `Tensor` giving the predicted values. Required.\n\nRaises:\nValueError: if the value is not a `Tensor` with dtype tf.float32.", "source": "github-repos"} {"code": "def correct_entry(self, entry):\n entry.correction.update(self.get_correction(entry))\n return entry", "docstring": "Corrects a single entry.\n\nArgs:\nentry: A DefectEntry object.\n\nReturns:\nAn processed entry.\n\nRaises:\nCompatibilityError if entry is not compatible.", "source": "codesearchnet"} {"code": "def run(self):\n cwd = os.getcwd()\n with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir:\n self.total_errors = 0\n start = datetime.datetime.now()\n logger.info('Run started at {} in {}.'.format(start, temp_dir))\n v = sys.version.replace('\\n', ' ')\n logger.info('Custodian running on Python version {}'.format(v))\n logger.info('Hostname: {}, Cluster: {}'.format(*get_execution_host_info()))\n try:\n for (job_n, job) in islice(enumerate(self.jobs, 1), self.restart, None):\n self._run_job(job_n, job)\n dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4)\n if self.checkpoint:\n self.restart = job_n\n Custodian._save_checkpoint(cwd, job_n)\n except CustodianError as ex:\n logger.error(ex.message)\n if ex.raises:\n raise\n finally:\n logger.info('Logging to {}...'.format(Custodian.LOG_FILE))\n dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4)\n end = datetime.datetime.now()\n logger.info('Run ended at {}.'.format(end))\n run_time = (end - start)\n logger.info('Run completed. Total time taken = {}.'.format(run_time))\n if self.gzipped_output:\n gzip_dir('.')\n Custodian._delete_checkpoints(cwd)\n return self.run_log", "docstring": "Runs all jobs.\n\nReturns:\nAll errors encountered as a list of list.\n[[error_dicts for job 1], [error_dicts for job 2], ....]\n\nRaises:\nValidationError: if a job fails validation\nReturnCodeError: if the process has a return code different from 0\nNonRecoverableError: if an unrecoverable occurs\nMaxCorrectionsPerJobError: if max_errors_per_job is reached\nMaxCorrectionsError: if max_errors is reached\nMaxCorrectionsPerHandlerError: if max_errors_per_handler is reached", "source": "codesearchnet"} {"code": "def _PrintAnalysisStatusHeader(self, processing_status):\n \n self._output_writer.Write(\n 'Storage file\\t\\t: {0:s}\\n'.format(self._storage_file_path))\n\n self._PrintProcessingTime(processing_status)\n\n if processing_status and processing_status.events_status:\n self._PrintEventsStatus(processing_status.events_status)\n\n self._output_writer.Write('\\n')", "docstring": "Prints the analysis status header.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"} {"code": "def transform_parameter_value(parameter_name, value, parameter_config):\n if isinstance(value, list):\n return [transform_parameter_value(('%s[%d]' % (parameter_name, index)), element, parameter_config) for (index, element) in enumerate(value)]\n entry = _get_parameter_conversion_entry(parameter_config)\n if entry:\n (validation_func, conversion_func, type_name) = entry\n if validation_func:\n validation_func(parameter_name, value, parameter_config)\n if conversion_func:\n try:\n return conversion_func(value)\n except ValueError:\n raise errors.BasicTypeParameterError(parameter_name, value, type_name)\n return value", "docstring": "Validates and transforms parameters to the type expected by the API.\n\nIf the value is a list this will recursively call _transform_parameter_value\non the values in the list. Otherwise, it checks all parameter rules for the\nthe current value and converts its type from a string to whatever format\nthe API expects.\n\nIn the list case, '[index-of-value]' is appended to the parameter name for\nerror reporting purposes.\n\nArgs:\nparameter_name: A string containing the name of the parameter, which is\neither just a variable name or the name with the index appended, in the\nrecursive case. For example 'var' or 'var[2]'.\nvalue: A string or list of strings containing the value(s) passed in for\nthe parameter. These are the values from the request, to be validated,\ntransformed, and passed along to the backend.\nparameter_config: The dictionary containing information specific to the\nparameter in question. This is retrieved from request.parameters in the\nmethod config.\n\nReturns:\nThe converted parameter value(s). Not all types are converted, so this\nmay be the same string that's passed in.", "source": "codesearchnet"} {"code": "def listen(self, message_consumer):\n while (not self._rfile.closed):\n request_str = self._read_message()\n if (request_str is None):\n break\n try:\n message_consumer(json.loads(request_str.decode('utf-8')))\n except ValueError:\n log.exception('Failed to parse JSON message %s', request_str)\n continue", "docstring": "Blocking call to listen for messages on the rfile.\n\nArgs:\nmessage_consumer (fn): function that is passed each message as it is read off the socket.", "source": "codesearchnet"} {"code": "def __init__(self, channel):\n \n self.Recognize = channel.unary_unary(\n \"/google.cloud.speech.v1.Speech/Recognize\",\n request_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.RecognizeResponse.FromString,\n )\n self.LongRunningRecognize = channel.unary_unary(\n \"/google.cloud.speech.v1.Speech/LongRunningRecognize\",\n request_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.LongRunningRecognizeRequest.SerializeToString,\n response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,\n )\n self.StreamingRecognize = channel.stream_stream(\n \"/google.cloud.speech.v1.Speech/StreamingRecognize\",\n request_serializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_speech__v1_dot_proto_dot_cloud__speech__pb2.StreamingRecognizeResponse.FromString,\n )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"} {"code": "def from_optimize_result(cls, result, n, m, index=None):\n coords = pd.DataFrame(result.x.reshape((m, n)), index=index)\n projection = cls(coords)\n projection.stress = result.fun\n return projection", "docstring": "Construct a Projection from the output of an optimization.\n\nArgs:\nresult (:py:class:`scipy.optimize.OptimizeResult`): Object\nreturned by :py:func:`scipy.optimize.minimize`.\nn (`int`): Number of dimensions.\nm (`int`): Number of samples.\nindex (`list-like`): Names of samples. (Optional).\n\nReturns:\n:py:class:`pymds.Projection`", "source": "codesearchnet"} {"code": "def is_applicable_python_file(rel_path: str) -> bool:\n return (rel_path.endswith('.py') and (not any((re.search(pat, rel_path) for pat in IGNORED_FILE_PATTERNS))))", "docstring": "Determines if a file should be included in incremental coverage analysis.\n\nArgs:\nrel_path: The repo-relative file path being considered.\nReturns:\nWhether to include the file.", "source": "codesearchnet"} {"code": "def best_case(self, matrix, m_list, indices_left):\n m_indices = []\n fraction_list = []\n for m in m_list:\n m_indices.extend(m[2])\n fraction_list.extend(([m[0]] * m[1]))\n indices = list(indices_left.intersection(m_indices))\n interaction_matrix = matrix[(indices, :)][(:, indices)]\n fractions = (np.zeros(len(interaction_matrix)) + 1)\n fractions[:len(fraction_list)] = fraction_list\n fractions = np.sort(fractions)\n sums = (2 * np.sum(matrix[indices], axis=1))\n sums = np.sort(sums)\n step1 = (np.sort(interaction_matrix) * (1 - fractions))\n step2 = np.sort(np.sum(step1, axis=1))\n step3 = (step2 * (1 - fractions))\n interaction_correction = np.sum(step3)\n if (self._algo == self.ALGO_TIME_LIMIT):\n elapsed_time = (datetime.utcnow() - self._start_time)\n speedup_parameter = (elapsed_time.total_seconds() / 1800)\n avg_int = np.sum(interaction_matrix, axis=None)\n avg_frac = np.average(np.outer((1 - fractions), (1 - fractions)))\n average_correction = (avg_int * avg_frac)\n interaction_correction = ((average_correction * speedup_parameter) + (interaction_correction * (1 - speedup_parameter)))\n best_case = ((np.sum(matrix) + np.inner(sums[::(- 1)], (fractions - 1))) + interaction_correction)\n return best_case", "docstring": "Computes a best case given a matrix and manipulation list.\n\nArgs:\nmatrix: the current matrix (with some permutations already\nperformed)\nm_list: [(multiplication fraction, number_of_indices, indices,\nspecies)] describing the manipulation\nindices: Set of indices which haven't had a permutation\nperformed on them.", "source": "codesearchnet"} {"code": "def _PrintExtractionStatusUpdateWindow(self, processing_status):\n \n if self._stdout_output_writer:\n self._ClearScreen()\n\n output_text = 'plaso - {0:s} version {1:s}\\n\\n'.format(\n self._tool_name, plaso.__version__)\n self._output_writer.Write(output_text)\n\n self.PrintExtractionStatusHeader(processing_status)\n\n table_view = views.CLITabularTableView(column_names=[\n 'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events',\n 'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])\n\n self._AddExtractionProcessStatusTableRow(\n processing_status.foreman_status, table_view)\n\n for worker_status in processing_status.workers_status:\n self._AddExtractionProcessStatusTableRow(worker_status, table_view)\n\n table_view.Write(self._output_writer)\n self._output_writer.Write('\\n')\n\n if processing_status.aborted:\n self._output_writer.Write(\n 'Processing aborted - waiting for clean up.\\n\\n')\n\n \n \n \n if self._stdout_output_writer:\n \n sys.stdout.flush()", "docstring": "Prints an extraction status update in window mode.\n\nArgs:\nprocessing_status (ProcessingStatus): processing status.", "source": "juraj-google-style"} {"code": "def snapshot(self):\n self.is_coordinator = self.device.is_coordinator\n media_info = self.device.avTransport.GetMediaInfo([('InstanceID', 0)])\n self.media_uri = media_info['CurrentURI']\n if (self.media_uri.split(':')[0] == 'x-rincon-queue'):\n if (self.media_uri.split('\n self.is_playing_queue = True\n else:\n self.is_playing_cloud_queue = True\n self.volume = self.device.volume\n self.mute = self.device.mute\n self.bass = self.device.bass\n self.treble = self.device.treble\n self.loudness = self.device.loudness\n if self.is_playing_queue:\n self.play_mode = self.device.play_mode\n self.cross_fade = self.device.cross_fade\n track_info = self.device.get_current_track_info()\n if (track_info is not None):\n position = track_info['playlist_position']\n if (position != ''):\n self.playlist_position = int(position)\n self.track_position = track_info['position']\n else:\n self.media_metadata = media_info['CurrentURIMetaData']\n if self.is_coordinator:\n transport_info = self.device.get_current_transport_info()\n if (transport_info is not None):\n self.transport_state = transport_info['current_transport_state']\n self._save_queue()\n return self.is_coordinator", "docstring": "Record and store the current state of a device.\n\nReturns:\nbool: `True` if the device is a coordinator, `False` otherwise.\nUseful for determining whether playing an alert on a device\nwill ungroup it.", "source": "codesearchnet"} {"code": "def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):\n ratios = tuple((float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)))\n ratio_height, ratio_width = ratios\n new_annotation = {}\n new_annotation['size'] = target_size\n for key, value in annotation.items():\n if key == 'boxes':\n boxes = value\n scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)\n new_annotation['boxes'] = scaled_boxes\n elif key == 'area':\n area = value\n scaled_area = area * (ratio_width * ratio_height)\n new_annotation['area'] = scaled_area\n elif key == 'masks':\n masks = value[:, None]\n masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])\n masks = masks.astype(np.float32)\n masks = masks[:, 0] > threshold\n new_annotation['masks'] = masks\n elif key == 'size':\n new_annotation['size'] = target_size\n else:\n new_annotation[key] = value\n return new_annotation", "docstring": "Resizes an annotation to a target size.\n\nArgs:\nannotation (`Dict[str, Any]`):\nThe annotation dictionary.\norig_size (`Tuple[int, int]`):\nThe original size of the input image.\ntarget_size (`Tuple[int, int]`):\nThe target size of the image, as returned by the preprocessing `resize` step.\nthreshold (`float`, *optional*, defaults to 0.5):\nThe threshold used to binarize the segmentation masks.\nresample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):\nThe resampling filter to use when resizing the masks.", "source": "github-repos"} {"code": "def decode(data):\n \n \n dialect = None\n try:\n dialect = csv.Sniffer().sniff(data)\n except Exception:\n pass\n\n \n handler = None\n try:\n data = data.splitlines() \n handler = csv.reader(data, dialect)\n except Exception, e:\n raise MetaParsingException(\"Can't parse your CSV data: %s\" % e.message)\n\n \n decoded = []\n for cnt, line in enumerate(handler):\n usable_data = filter(lambda x: x.strip(), line)\n\n if not usable_data:\n continue\n\n if len(usable_data) != 2:\n raise MetaParsingException(\n \"Bad number of elements - line %d:\\n\\t%s\\n\" % (cnt, data[cnt])\n )\n\n \n usable_data = map(lambda x: x.strip().decode(\"utf-8\"), usable_data)\n\n \n usable_data = map(lambda x: _remove_quotes(x), usable_data)\n\n decoded.append(usable_data)\n\n \n decoded = validator.check_structure(decoded)\n\n return decoded", "docstring": "Handles decoding of the CSV `data`.\n\nArgs:\ndata (str): Data which will be decoded.\n\nReturns:\ndict: Dictionary with decoded data.", "source": "juraj-google-style"} {"code": "def model_from_json(json_string, custom_objects=None):\n config = json_utils.decode(json_string)\n from tensorflow.python.keras.layers import deserialize\n return deserialize(config, custom_objects=custom_objects)", "docstring": "Parses a JSON model configuration string and returns a model instance.\n\nUsage:\n\n>>> model = tf.keras.Sequential([\n... tf.keras.layers.Dense(5, input_shape=(3,)),\n... tf.keras.layers.Softmax()])\n>>> config = model.to_json()\n>>> loaded_model = tf.keras.models.model_from_json(config)\n\nArgs:\njson_string: JSON string encoding a model configuration.\ncustom_objects: Optional dictionary mapping names\n(strings) to custom classes or functions to be\nconsidered during deserialization.\n\nReturns:\nA Keras model instance (uncompiled).", "source": "github-repos"} {"code": "def forward(self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, audio_mask: Optional[torch.FloatTensor]=None, mask_pixel: bool=False, mask_audio: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], TvltModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n pixel_embedding_output, pixel_mask = self.pixel_embeddings(pixel_values, pixel_mask)\n audio_embedding_output, audio_mask = self.audio_embeddings(audio_values, audio_mask)\n pixel_label_masks = None\n pixel_ids_restore = None\n if mask_pixel:\n pixel_mask_noise, pixel_len_keep = generate_pixel_mask_noise(pixel_embedding_output, pixel_mask=pixel_mask, mask_ratio=self.config.pixel_mask_ratio)\n pixel_embedding_output, pixel_mask, pixel_label_masks, pixel_ids_restore = random_masking(pixel_embedding_output, pixel_mask_noise, pixel_len_keep, attention_masks=pixel_mask)\n audio_label_masks = None\n audio_ids_restore = None\n if mask_audio:\n num_freq_patches = self.config.frequency_length \n audio_mask_noise, audio_len_keep = generate_audio_mask_noise(audio_embedding_output, audio_mask=audio_mask, mask_ratio=self.config.audio_mask_ratio, mask_type=self.config.audio_mask_type, freq_len=num_freq_patches)\n audio_embedding_output, audio_mask, audio_label_masks, audio_ids_restore = random_masking(audio_embedding_output, audio_mask_noise, audio_len_keep, attention_masks=audio_mask)\n batch_size = pixel_values.size(0)\n embedding_output = torch.cat([self.cls_embedding.repeat(batch_size, 1, 1), pixel_embedding_output, audio_embedding_output], 1)\n masked_pixel_len = pixel_embedding_output.size(1)\n attention_mask = None\n if pixel_mask is not None and audio_mask is not None:\n attention_mask = torch.cat([pixel_mask[:, :1], pixel_mask, audio_mask], 1)\n input_shape = embedding_output.size()\n extended_attention_mask = None\n if attention_mask is not None:\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)\n encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = encoder_outputs[0]\n if self.layernorm is not None:\n sequence_output = self.layernorm(sequence_output)\n pixel_sequence_output = sequence_output[:, 1:1 + masked_pixel_len]\n audio_sequence_output = sequence_output[:, 1 + masked_pixel_len:]\n if not return_dict:\n return (sequence_output, pixel_sequence_output, audio_sequence_output, pixel_label_masks, audio_label_masks, pixel_ids_restore, audio_ids_restore) + encoder_outputs[1:]\n return TvltModelOutput(last_hidden_state=sequence_output, last_pixel_hidden_state=pixel_sequence_output, last_audio_hidden_state=audio_sequence_output, pixel_label_masks=pixel_label_masks, audio_label_masks=audio_label_masks, pixel_ids_restore=pixel_ids_restore, audio_ids_restore=audio_ids_restore, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import TvltProcessor, TvltModel\n>>> import numpy as np\n>>> import torch\n\n>>> num_frames = 8\n>>> images = list(np.random.randn(num_frames, 3, 224, 224))\n>>> audio = list(np.random.randn(10000))\n\n>>> processor = TvltProcessor.from_pretrained(\"ZinengTang/tvlt-base\")\n>>> model = TvltModel.from_pretrained(\"ZinengTang/tvlt-base\")\n\n>>> input_dict = processor(images, audio, sampling_rate=44100, return_tensors=\"pt\")\n\n>>> outputs = model(**input_dict)\n>>> loss = outputs.loss\n```", "source": "github-repos"} {"code": "def get_tag(self, key, *, case_sensitive=True):\n key = (key if case_sensitive else key.lower())\n for tag in self.resource.tags:\n if (not case_sensitive):\n if (tag.key.lower() == key):\n return tag\n elif (key == tag.key):\n return tag\n return None", "docstring": "Return a tag by key, if found\n\nArgs:\nkey (str): Name/key of the tag to locate\ncase_sensitive (bool): Should tag keys be treated case-sensitive (default: true)\n\nReturns:\n`Tag`,`None`", "source": "codesearchnet"} {"code": "def parse_code(self):\n code = open(self.path, encoding='utf-8').read()\n try:\n body = ast.parse(code).body\n except SyntaxError:\n try:\n code = code.encode('utf-8')\n body = ast.parse(code).body\n except SyntaxError:\n return []\n return self.get_imports(body)", "docstring": "Read the source code and return all the import statements.\n\nReturns:\nlist of dict: the import statements.", "source": "codesearchnet"} {"code": "def _checkMode(mode):\n if (not isinstance(mode, str)):\n raise TypeError('The {0} should be a string. Given: {1!r}'.format('mode', mode))\n if (mode not in [MODE_RTU, MODE_ASCII]):\n raise ValueError(\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\".format(mode))", "docstring": "Check that the Modbus mode is valie.\n\nArgs:\nmode (string): The Modbus mode (MODE_RTU or MODE_ASCII)\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"} {"code": "def findCaller(self, stack_info=False):\n f_to_skip = ABSLLogger._frames_to_skip\n frame = sys._getframe(2)\n while frame:\n code = frame.f_code\n if ((_LOGGING_FILE_PREFIX not in code.co_filename) and ((code.co_filename, code.co_name, code.co_firstlineno) not in f_to_skip) and ((code.co_filename, code.co_name) not in f_to_skip)):\n if (six.PY2 and (not stack_info)):\n return (code.co_filename, frame.f_lineno, code.co_name)\n else:\n sinfo = None\n if stack_info:\n out = io.StringIO()\n out.write(u'Stack (most recent call last):\\n')\n traceback.print_stack(frame, file=out)\n sinfo = out.getvalue().rstrip(u'\\n')\n return (code.co_filename, frame.f_lineno, code.co_name, sinfo)\n frame = frame.f_back", "docstring": "Finds the frame of the calling method on the stack.\n\nThis method skips any frames registered with the\nABSLLogger and any methods from this file, and whatever\nmethod is currently being used to generate the prefix for the log\nline. Then it returns the file name, line number, and method name\nof the calling method. An optional fourth item may be returned,\ncallers who only need things from the first three are advised to\nalways slice or index the result rather than using direct unpacking\nassignment.\n\nArgs:\nstack_info: bool, when True, include the stack trace as a fourth item\nreturned. On Python 3 there are always four items returned - the\nfourth will be None when this is False. On Python 2 the stdlib\nbase class API only returns three items. We do the same when this\nnew parameter is unspecified or False for compatibility.\n\nReturns:\n(filename, lineno, methodname[, sinfo]) of the calling method.", "source": "codesearchnet"} {"code": "def select_delim(self, delim):\n \n size = len(delim)\n if size > 20:\n raise RuntimeError('Delimeter too long')\n n1 = size/10\n n2 = size%10\n self.send('^SS'+chr(n1)+chr(n2))", "docstring": "Select desired delimeter\n\nArgs:\ndelim: The delimeter character you want.\nReturns:\nNone\nRaises:\nRuntimeError: Delimeter too long.", "source": "juraj-google-style"} {"code": "def publish(self, channel_id):\n \n payload = {\n \"channel_id\":channel_id,\n }\n response = config.SESSION.post(config.publish_channel_url(), data=json.dumps(payload))\n response.raise_for_status()", "docstring": "publish: publishes tree to Kolibri\nArgs:\nchannel_id (str): channel's id on Kolibri Studio\nReturns: None", "source": "juraj-google-style"} {"code": "def _get_elements(mol, label):\n \n elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]\n return elements", "docstring": "The the elements of the atoms in the specified order\n\nArgs:\nmol: The molecule. OpenBabel OBMol object.\nlabel: The atom indices. List of integers.\n\nReturns:\nElements. List of integers.", "source": "juraj-google-style"} {"code": "def _GetKeyFlagsForModule(self, module):\n \n if not isinstance(module, str):\n module = module.__name__\n\n \n \n \n key_flags = self._GetFlagsDefinedByModule(module)\n\n \n for flag in self.KeyFlagsByModuleDict().get(module, []):\n if flag not in key_flags:\n key_flags.append(flag)\n return key_flags", "docstring": "Returns the list of key flags for a module.\n\nArgs:\nmodule: A module object or a module name (a string)\n\nReturns:\nA new list of Flag objects. Caller may update this list as he\nwishes: none of those changes will affect the internals of this\nFlagValue object.", "source": "juraj-google-style"} {"code": "def _ParseMRUListExEntryValue(\n self, parser_mediator, registry_key, entry_index, entry_number, **kwargs):\n \n value_string = ''\n\n value = registry_key.GetValueByName('{0:d}'.format(entry_number))\n if value is None:\n parser_mediator.ProduceExtractionWarning(\n 'missing MRUListEx value: {0:d} in key: {1:s}.'.format(\n entry_number, registry_key.path))\n\n elif value.DataIsString():\n value_string = value.GetDataAsObject()\n\n elif value.DataIsBinaryData():\n utf16le_string_map = self._GetDataTypeMap('utf16le_string')\n\n try:\n value_string = self._ReadStructureFromByteStream(\n value.data, 0, utf16le_string_map)\n except (ValueError, errors.ParseError) as exception:\n parser_mediator.ProduceExtractionWarning((\n 'unable to parse MRUListEx entry value: {0:d} with error: '\n '{1!s}').format(entry_number, exception))\n\n value_string = value_string.rstrip('\\x00')\n\n return value_string", "docstring": "Parses the MRUListEx entry value.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains\nthe MRUListEx value.\nentry_index (int): MRUListEx entry index.\nentry_number (int): entry number.\n\nReturns:\nstr: MRUList entry value.", "source": "juraj-google-style"} {"code": "def peek_all(self, model_class):\n \n if self._cache:\n return self._cache.get_records(model_class.__name__)\n else:\n return []", "docstring": "Return a list of models from the local cache.\n\nArgs:\nmodel_class (:class:`cinder_data.model.CinderModel`): A subclass of\n:class:`cinder_data.model.CinderModel` of your chosen model.\n\nReturns:\nlist: A list of instances of you model_class or and empty list.", "source": "juraj-google-style"} {"code": "def load(self) -> RepresentativeDatasetMapping:\n repr_dataset_map = {}\n for signature_def_key, dataset_file in self.dataset_file_map.items():\n if dataset_file.HasField('tfrecord_file_path'):\n repr_dataset_map[signature_def_key] = self._load_tf_record(dataset_file.tfrecord_file_path)\n else:\n raise ValueError('Unsupported Representative Dataset filetype')\n return repr_dataset_map", "docstring": "Loads the representative datasets.\n\nReturns:\nrepresentative dataset mapping: A signature def key -> representative\nmapping. The loader loads `RepresentativeDataset` for each path in\n`self.dataset_file_map` and associates the loaded dataset to the\ncorresponding signature def key.", "source": "github-repos"} {"code": "def _in_op_degree(op):\n count = 0\n for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:\n if not _is_loop_edge(op):\n count += 1\n return count", "docstring": "Returns the number of incoming edges to the given op.\n\nThe edge calculation skips the edges that come from 'NextIteration' ops.\nNextIteration creates a cycle in the graph. We break cycles by treating\nthis op as 'sink' and ignoring all outgoing edges from it.\nArgs:\nop: Tf.Operation\nReturns:\nthe number of incoming edges.", "source": "github-repos"} {"code": "def are_compatible(spec1, spec2):\n try:\n nest.assert_same_structure(spec1, spec2)\n except TypeError:\n return False\n except ValueError:\n return False\n for s1, s2 in zip(nest.flatten(spec1), nest.flatten(spec2)):\n if not s1.is_compatible_with(s2) or not s2.is_compatible_with(s1):\n return False\n return True", "docstring": "Indicates whether two type specifications are compatible.\n\nTwo type specifications are compatible if they have the same nested structure\nand the their individual components are pair-wise compatible.\n\nArgs:\nspec1: A `tf.TypeSpec` object to compare.\nspec2: A `tf.TypeSpec` object to compare.\n\nReturns:\n`True` if the two type specifications are compatible and `False` otherwise.", "source": "github-repos"} {"code": "def set_weights(self, new_weights):\n \n self._check_sess()\n assign_list = [\n self.assignment_nodes[name] for name in new_weights.keys()\n if name in self.assignment_nodes\n ]\n assert assign_list, (\"No variables in the input matched those in the \"\n \"network. Possible cause: Two networks were \"\n \"defined in the same TensorFlow graph. To fix \"\n \"this, place each network definition in its own \"\n \"tf.Graph.\")\n self.sess.run(\n assign_list,\n feed_dict={\n self.placeholders[name]: value\n for (name, value) in new_weights.items()\n if name in self.placeholders\n })", "docstring": "Sets the weights to new_weights.\n\nNote:\nCan set subsets of variables as well, by only passing in the\nvariables you want to be set.\n\nArgs:\nnew_weights (Dict): Dictionary mapping variable names to their\nweights.", "source": "juraj-google-style"} {"code": "def Glob2Regex(glob_pattern):\n if (not glob_pattern):\n raise ValueError('Missing glob pattern.')\n regex_pattern = []\n glob_pattern_index = 0\n glob_pattern_length = len(glob_pattern)\n while (glob_pattern_index < glob_pattern_length):\n character = glob_pattern[glob_pattern_index]\n glob_pattern_index += 1\n if (character == '*'):\n regex_pattern.append('.*')\n elif (character == '?'):\n regex_pattern.append('.')\n elif (character != '['):\n regex_character = re.escape(character)\n regex_pattern.append(regex_character)\n else:\n glob_group_index = glob_pattern_index\n if ((glob_group_index < glob_pattern_length) and (glob_pattern[glob_group_index] == '!')):\n glob_group_index += 1\n if ((glob_group_index < glob_pattern_length) and (glob_pattern[glob_group_index] == ']')):\n glob_group_index += 1\n while ((glob_group_index < glob_pattern_length) and (glob_pattern[glob_group_index] != ']')):\n glob_group_index += 1\n if (glob_group_index >= glob_pattern_length):\n regex_pattern.append('\\\\[')\n continue\n glob_group = glob_pattern[glob_pattern_index:glob_group_index]\n glob_pattern_index = (glob_group_index + 1)\n glob_group = glob_group.replace('\\\\', '\\\\\\\\')\n if py2to3.PY_3_7_AND_LATER:\n glob_group = glob_group.replace('|', '\\\\|')\n regex_pattern.append('[')\n if (glob_group[0] == '!'):\n regex_pattern.append('^')\n glob_group = glob_group[1:]\n elif (glob_group[0] == '^'):\n regex_pattern.append('\\\\')\n regex_pattern.append(glob_group)\n regex_pattern.append(']')\n return ''.join(regex_pattern)", "docstring": "Converts a glob pattern to a regular expression.\n\nThis function supports basic glob patterns that consist of:\n* matches everything\n? matches any single character\n[seq] matches any character in sequence\n[!seq] matches any character not in sequence\n\nArgs:\nglob_pattern (str): glob pattern.\n\nReturns:\nstr: regular expression pattern.\n\nRaises:\nValueError: if the glob pattern cannot be converted.", "source": "codesearchnet"} {"code": "def get_cosmosdb_account_keys(access_token, subscription_id, rgname, account_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '/listKeys', '?api-version=', COSMOSDB_API])\n return do_post(endpoint, '', access_token)", "docstring": "Get the access keys for the specified Cosmos DB account.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the Cosmos DB account.\n\nReturns:\nHTTP response. JSON body of Cosmos DB account keys.", "source": "codesearchnet"} {"code": "def create(self, callback_url):\n resource = self.resource.create({'subscribed_to': 'address', 'callback_url': callback_url})\n subscription = self.wrap(resource)\n self.add(subscription)\n return subscription", "docstring": "Register a new Subscription on this collection's parent object.\n\nArgs:\ncallback_url (str): URI of an active endpoint which can receive\nnotifications.\n\nReturns:\nA round.Subscription object if successful.", "source": "codesearchnet"} {"code": "def create_with_claims(self, claims):\n \n new_kwargs = dict(self._kwargs)\n new_kwargs.update(claims)\n result = self.__class__(self._service_account_email,\n self._signer,\n scopes=self._scopes,\n private_key_id=self._private_key_id,\n client_id=self.client_id,\n user_agent=self._user_agent,\n **new_kwargs)\n result.token_uri = self.token_uri\n result.revoke_uri = self.revoke_uri\n result._private_key_pkcs8_pem = self._private_key_pkcs8_pem\n result._private_key_pkcs12 = self._private_key_pkcs12\n result._private_key_password = self._private_key_password\n return result", "docstring": "Create credentials that specify additional claims.\n\nArgs:\nclaims: dict, key-value pairs for claims.\n\nReturns:\nServiceAccountCredentials, a copy of the current service account\ncredentials with updated claims to use when obtaining access\ntokens.", "source": "juraj-google-style"} {"code": "def get_protocol(url):\n \n\n if url not in URLHelper.__cache:\n URLHelper.__cache[url] = urlparse(url)\n\n return URLHelper.__cache[url].scheme", "docstring": "Get the protocol (e.g. http, https or ftp) of the given URL.\n\nArgs:\nurl (str): The URL to get the protocol from.\n\nReturns:\nstr: The URL protocol", "source": "juraj-google-style"} {"code": "def calculate(cls, order_id, shipping=None, refund_line_items=None):\n data = {}\n if shipping:\n data['shipping'] = shipping\n data['refund_line_items'] = (refund_line_items or [])\n body = {'refund': data}\n resource = cls.post('calculate', order_id=order_id, body=json.dumps(body).encode())\n return cls(cls.format.decode(resource.body), prefix_options={'order_id': order_id})", "docstring": "Calculates refund transactions based on line items and shipping.\nWhen you want to create a refund, you should first use the calculate\nendpoint to generate accurate refund transactions.\n\nArgs:\norder_id: Order ID for which the Refund has to created.\nshipping: Specify how much shipping to refund.\nrefund_line_items: A list of line item IDs and quantities to refund.\nReturns:\nUnsaved refund record", "source": "codesearchnet"} {"code": "def bool(name, default=None, allow_none=False, fallback=None):\n value = read(name, default, allow_none, fallback=fallback)\n if isinstance(value, builtins.bool):\n return value\n elif isinstance(value, builtins.int):\n return (True if (value > 0) else False)\n elif ((value is None) and allow_none):\n return None\n else:\n value_str = builtins.str(value).lower().strip()\n return _strtobool(value_str)", "docstring": "Get a boolean based environment value or the default.\n\nArgs:\nname: The environment variable name\ndefault: The default value to use if no environment variable is found\nallow_none: If the return value can be `None` (i.e. optional)", "source": "codesearchnet"} {"code": "def _get_sync(self, url):\n \n response = self.session.get(url)\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n raise HTTPError", "docstring": "Internal method used for GET requests\n\nArgs:\nurl (str): URL to fetch\n\nReturns:\nIndividual URL request's response\n\nRaises:\nHTTPError: If HTTP request failed.", "source": "juraj-google-style"} {"code": "def orient_undirected_graph(self, data, graph):\n \n \n self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n self.arguments['{SCORE}'] = self.scores[self.score]\n\n fe = DataFrame(nx.adj_matrix(graph, weight=None).todense())\n fg = DataFrame(1 - fe.values)\n\n results = self._run_gies(data, fixedGaps=fg, verbose=self.verbose)\n\n return nx.relabel_nodes(nx.DiGraph(results),\n {idx: i for idx, i in enumerate(data.columns)})", "docstring": "Run GIES on an undirected graph.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\ngraph (networkx.Graph): Skeleton of the graph to orient\n\nReturns:\nnetworkx.DiGraph: Solution given by the GIES algorithm.", "source": "juraj-google-style"} {"code": "def process_output(meta_file, outfile_name, code_links):\n doc_str = (('\n doc_str += 'Generated by [py2md](https:\n doc_str += (strftime('%Y-%m-%d %H:%M:%S ') + '\\n\\n')\n if (len(meta_file['modules']) > 1):\n doc_str += '\n chapter_num = 1\n for meta_doc in meta_file['modules']:\n chapter_name = meta_doc['summary_comment']\n chapter_link = chapter_name.lstrip().replace('.', '').replace(' ', '-').lower()\n doc_str += (((((str(chapter_num) + '. [') + chapter_name) + '](\n chapter_num += 1\n for meta_doc in meta_file['modules']:\n doc_str += (('\n doc_str += ((('[source file](' + meta_doc['source_file']) + ')') + '\\n')\n for function_info in meta_doc['functions']:\n doc_str += (('\n doc_str += (function_info['definition'] + '\\n\\n')\n if ('comments' in function_info):\n doc_str += (('```\\n' + function_info['comments']) + '\\n```\\n\\n')\n print(('Writing file: ' + outfile_name))\n out_file = open(outfile_name, 'w')\n out_file.write(doc_str)\n out_file.close()", "docstring": "Create a markdown format documentation file.\n\nArgs:\nmeta_file (dict): Dictionary with documentation metadata.\noutfile_name (str): Markdown file to write to.", "source": "codesearchnet"} {"code": "def bundle(self, bundle_name):\n \n if self.args.bundle or self.tcex_json.get('package', {}).get('bundle', False):\n if self.tcex_json.get('package', {}).get('bundle_packages') is not None:\n for bundle in self.tcex_json.get('package', {}).get('bundle_packages') or []:\n bundle_name = bundle.get('name')\n bundle_patterns = bundle.get('patterns')\n\n bundle_apps = []\n for app in self._app_packages:\n for app_pattern in bundle_patterns:\n p = re.compile(app_pattern, re.IGNORECASE)\n if p.match(app):\n bundle_apps.append(app)\n\n \n if bundle_apps:\n self.bundle_apps(bundle_name, bundle_apps)\n else:\n self.bundle_apps(bundle_name, self._app_packages)", "docstring": "Bundle multiple Job or Playbook Apps into a single zip file.\n\nArgs:\nbundle_name (str): The output name of the bundle zip file.", "source": "juraj-google-style"} {"code": "def check_target_integrity(key, values, meta=False, all=False, parent=None):\n \n\n \n if all:\n if not values:\n print(\"Warning: target 'all' is empty\")\n \n return True\n\n errmes = \"target '{}' is not allowed to be missing a help message\\n\"\n\n \n if meta:\n \n if \"help\" not in values:\n sys.stderr.write(errmes.format(key))\n return False\n \n if len(values.keys()) == 1:\n sys.stderr.write(\"Meta-target '{}' is empty\\n\".format(key))\n return False\n return True\n\n \n expected_fields = [\"dependencies\", \"help\", \"output\", \"formula\"]\n expected_fields = set(expected_fields)\n try:\n our_keys_set = set(values.keys())\n except:\n sys.stderr.write(\"Error processing target '{}'\\n\".format(key))\n sys.stderr.write(\"Are you sure '{}' is a meta-target?\\n\".format(\n parent))\n sys.stderr.write(\"If it's not, it's missing a formula\\n\")\n return False\n ignored_fields = set([field for field in our_keys_set\\\n if field.strip().startswith(\"(ignore)\")])\n difference = our_keys_set - expected_fields - ignored_fields\n if difference:\n print(\"The following fields were not recognized and will be ignored\")\n for item in difference:\n print(\" - \" + item)\n if \"help\" not in values:\n sys.stderr.write(errmes.format(key))\n return False\n \n if \"formula\" not in values:\n sys.stderr.write(\"Target '{}' is missing formula\\n\".format(key))\n return False\n return True", "docstring": "Checks the integrity of a specific target. Gets called\nmultiple times from check_integrity()\n\nArgs:\nThe target name\nThe dictionary values of that target\nA boolean representing whether it is a meta-target\nA boolean representing whether it is the \"all\" target\nA string representing name of parent (default None)\n\nReturns:\nTrue is the target is conformant\nFalse if not", "source": "juraj-google-style"} {"code": "def _check_input_partition_dims(self, tensor, dims):\n if dims is None:\n return\n dims = np.array(dims)\n if (dims < 1).any():\n raise ValueError('All input partition dims must be >= 1.')\n if dims.prod() == 1:\n return\n if dims.prod() != self._device_assignment.num_cores_per_replica:\n raise ValueError('The product of each input partition dim should equal to num_cores_per_replica. (dim = {}, num_cores_per_replica = {})'.format(dims, self._device_assignment.num_cores_per_replica))\n if dims.shape[0] != tensor.shape.ndims:\n raise ValueError('Input partition dims must have the same number of dimensions as the `Tensor` to be partitioned. (tensor shape = {}, input partition dims = {}).'.format(tensor.shape.as_list(), dims))\n tensor.shape.assert_is_fully_defined()", "docstring": "Checks that input partition dims are valid for the `Tensor`.\n\nArgs:\ntensor: Input tensor for partitioning.\ndims: A list of integer describes how to partition the input tensor.\n\nRaises:\nValueError: If the tensor can't be partitioned by dims or the\nnum_cores_per_replica doesn't match the number of\npartitions(dims.prod()).", "source": "github-repos"} {"code": "def _to_qasm_output(\n self,\n header: Optional[str] = None,\n precision: int = 10,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n ) -> QasmOutput:\n \n if header is None:\n header = 'Generated from Cirq v{}'.format(\n cirq._version.__version__)\n qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(\n self.all_qubits())\n return QasmOutput(operations=self.all_operations(),\n qubits=qubits,\n header=header,\n precision=precision,\n version='2.0')", "docstring": "Returns a QASM object equivalent to the circuit.\n\nArgs:\nheader: A multi-line string that is placed in a comment at the top\nof the QASM. Defaults to a cirq version specifier.\nprecision: Number of digits to use when representing numbers.\nqubit_order: Determines how qubits are ordered in the QASM\nregister.", "source": "juraj-google-style"} {"code": "def adaptive_enc_mask(x_len, chunk_start_idx, left_window=0, right_window=0):\n chunk_start_idx = torch.Tensor(chunk_start_idx).long()\n start_pad = torch.nn.functional.pad(chunk_start_idx, (1, 0))\n end_pad = torch.nn.functional.pad(chunk_start_idx, (0, 1), value=x_len)\n seq_range = torch.arange(0, x_len).unsqueeze(-1)\n idx = ((seq_range < end_pad) & (seq_range >= start_pad)).nonzero()[:, 1]\n seq_range_expand = torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1)\n idx_left = idx - left_window\n idx_left[idx_left < 0] = 0\n boundary_left = start_pad[idx_left]\n mask_left = seq_range_expand >= boundary_left.unsqueeze(-1)\n idx_right = idx + right_window\n idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx)\n boundary_right = end_pad[idx_right]\n mask_right = seq_range_expand < boundary_right.unsqueeze(-1)\n return mask_left & mask_right", "docstring": "The function is very important for Transformer Transducer Streaming mode\nArgs:\nxs_len (int): sequence length\nchunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. It also supports adaptive chunk size [0,10,15,45]\nleft_window (int): how many left chunks can be seen\nright_window (int): how many right chunks can be seen. It is used for chunk overlap model.\nReturns:\nmask (torch.Tensor): a mask tensor for streaming model", "source": "github-repos"} {"code": "def get_uniquely_named_objects_by_name(object_list):\n if (not object_list):\n return dict()\n result = dict()\n for obj in object_list:\n name = obj.name.value\n if (name in result):\n raise GraphQLCompilationError(u'Found duplicate object key: {} {}'.format(name, object_list))\n result[name] = obj\n return result", "docstring": "Return dict of name -> object pairs from a list of objects with unique names.\n\nArgs:\nobject_list: list of objects, each X of which has a unique name accessible as X.name.value\n\nReturns:\ndict, { X.name.value: X for x in object_list }\nIf the list is empty or None, returns an empty dict.", "source": "codesearchnet"} {"code": "def status(self, order_id):\n self.logger.debug(('Get status of order ' + order_id))\n url = ('%(base_url)s/order/%(order_id)s' % {'base_url': self.base_url, 'order_id': order_id})\n r = self.gbdx_connection.get(url)\n r.raise_for_status()\n return r.json().get('acquisitions', {})", "docstring": "Checks imagery order status. There can be more than one image per\norder and this function returns the status of all images\nwithin the order.\n\nArgs:\norder_id (str): The id of the order placed.\n\nReturns:\nList of dictionaries, one per image. Each dictionary consists\nof the keys 'acquisition_id', 'location' and 'state'.", "source": "codesearchnet"} {"code": "def __init__(\n self, pair: str = '', exchange: str = 'IDEALPRO',\n symbol: str = '', currency: str = '', **kwargs):\n \n if pair:\n assert len(pair) == 6\n symbol = symbol or pair[:3]\n currency = currency or pair[3:]\n Contract.__init__(\n self, 'CASH', symbol=symbol,\n exchange=exchange, currency=currency, **kwargs)", "docstring": "Foreign exchange currency pair.\n\nArgs:\npair: Shortcut for specifying symbol and currency, like 'EURUSD'.\nexchange: Destination exchange.\nsymbol: Base currency.\ncurrency: Quote currency.", "source": "juraj-google-style"} {"code": "def parse(trigger_word_file):\n with open(trigger_word_file) as fd:\n triggers_dict = json.load(fd)\n sources = [Source(s) for s in triggers_dict['sources']]\n sinks = [Sink.from_json(trigger, data) for (trigger, data) in triggers_dict['sinks'].items()]\n return Definitions(sources, sinks)", "docstring": "Parse the file for source and sink definitions.\n\nReturns:\nA definitions tuple with sources and sinks.", "source": "codesearchnet"} {"code": "def __init__(self, data=None, **kwargs):\n \n if data not in _DATA_OPTIONS:\n raise ValueError(\"data must be one of %s\" % _DATA_OPTIONS)\n\n super(CycleGANConfig, self).__init__(**kwargs)\n self.data = data", "docstring": "Constructs a CycleGANConfig.\n\nArgs:\ndata: `str`, one of `_DATA_OPTIONS`.\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"} {"code": "def _init_values_from_proto(self, values_def, import_scope=None):\n assert isinstance(values_def, control_flow_pb2.ValuesDef)\n self._values = set((ops.prepend_name_scope(value, import_scope) for value in values_def.values))\n g = ops.get_default_graph()\n self._external_values = {}\n for k, v in values_def.external_values.items():\n k = ops.prepend_name_scope(k, import_scope)\n self._external_values[k] = g.as_graph_element(ops.prepend_name_scope(v, import_scope))\n op_names = set([op.split(':')[0] for op in self._values - set(self._external_values.keys())])\n for op in op_names:\n g.as_graph_element(op)._set_control_flow_context(self)", "docstring": "Initializes values and external_values from `ValuesDef` protocol buffer.\n\nArgs:\nvalues_def: `ValuesDef` protocol buffer.\nimport_scope: Optional `string`. Name scope to add.", "source": "github-repos"} {"code": "def lattice_from_abivars(cls=None, *args, **kwargs):\n \n cls = Lattice if cls is None else cls\n kwargs.update(dict(*args))\n d = kwargs\n\n rprim = d.get(\"rprim\", None)\n angdeg = d.get(\"angdeg\", None)\n acell = d[\"acell\"]\n\n if rprim is not None:\n if angdeg is not None:\n raise ValueError(\"angdeg and rprimd are mutually exclusive\")\n rprim = np.reshape(rprim, (3,3))\n rprimd = [float(acell[i]) * rprim[i] for i in range(3)]\n \n return cls(ArrayWithUnit(rprimd, \"bohr\").to(\"ang\"))\n\n elif angdeg is not None:\n angdeg = np.reshape(angdeg, 3)\n\n if np.any(angdeg <= 0.):\n raise ValueError(\"Angles must be > 0 but got %s\" % str(angdeg))\n if angdeg.sum() >= 360.:\n raise ValueError(\"The sum of angdeg must be lower that 360, angdeg %s\" % str(angdeg))\n\n \n \n tol12 = 1e-12\n pi, sin, cos, sqrt = np.pi, np.sin, np.cos, np.sqrt\n rprim = np.zeros((3,3))\n if (abs(angdeg[0] -angdeg[1]) < tol12 and abs(angdeg[1] - angdeg[2]) < tol12 and\n abs(angdeg[0]-90.) + abs(angdeg[1]-90.) + abs(angdeg[2] -90) > tol12):\n \n \n cosang = cos(pi * angdeg[0]/180.0)\n a2 = 2.0/3.0*(1.0 - cosang)\n aa = sqrt(a2)\n cc = sqrt(1.0-a2)\n rprim[0,0] = aa ; rprim[0,1] = 0.0 ; rprim[0,2] = cc\n rprim[1,0] = -0.5*aa; rprim[1,1] = sqrt(3.0)*0.5*aa ; rprim[1,2] = cc\n rprim[2,0] = -0.5*aa; rprim[2,1] = -sqrt(3.0)*0.5*aa; rprim[2,2] = cc\n else:\n \n rprim[0,0] = 1.0\n rprim[1,0] = cos(pi*angdeg[2]/180.)\n rprim[1,1] = sin(pi*angdeg[2]/180.)\n rprim[2,0] = cos(pi*angdeg[1]/180.)\n rprim[2,1] = (cos(pi*angdeg[0]/180.0)-rprim[1,0]*rprim[2,0])/rprim[1,1]\n rprim[2,2] = sqrt(1.0-rprim[2,0]**2-rprim[2,1]**2)\n\n \n rprimd = [float(acell[i]) * rprim[i] for i in range(3)]\n return cls(ArrayWithUnit(rprimd, \"bohr\").to(\"ang\"))\n\n raise ValueError(\"Don't know how to construct a Lattice from dict:\\n%s\" % pformat(d))", "docstring": "Returns a `Lattice` object from a dictionary\nwith the Abinit variables `acell` and either `rprim` in Bohr or `angdeg`\nIf acell is not given, the Abinit default is used i.e. [1,1,1] Bohr\n\nArgs:\ncls: Lattice class to be instantiated. pymatgen.core.lattice.Lattice if `cls` is None\n\nExample:\n\nlattice_from_abivars(acell=3*[10], rprim=np.eye(3))", "source": "juraj-google-style"} {"code": "def _add_monomer(self, monomer, mon_vector, move_direction):\n \n translate_by = self.molecule.cart_coords[self.end] + \\\n self.link_distance * move_direction\n monomer.translate_sites(range(len(monomer)), translate_by)\n if not self.linear_chain:\n self._align_monomer(monomer, mon_vector, move_direction)\n \n does_cross = False\n for i, site in enumerate(monomer):\n try:\n self.molecule.append(site.specie, site.coords,\n properties=site.properties)\n except:\n does_cross = True\n polymer_length = len(self.molecule)\n self.molecule.remove_sites(\n range(polymer_length - i, polymer_length))\n break\n if not does_cross:\n self.length += 1\n self.end += len(self.monomer)", "docstring": "extend the polymer molecule by adding a monomer along mon_vector direction\n\nArgs:\nmonomer (Molecule): monomer molecule\nmon_vector (numpy.array): monomer vector that points from head to tail.\nmove_direction (numpy.array): direction along which the monomer\nwill be positioned", "source": "juraj-google-style"} {"code": "def get_primitive_wrapper_cls_for_primitive_cls(self, primitive_cls: Type[message.Message]) -> Type[_primitive_wrappers.PrimitiveWrapper]:", "docstring": "Returns the primitive wrapper for the corresponding primitive_cls.\n\nSubclasses should override and provide logic to return their own wrappers\nfor primitives specific to a particular version of FHIR. The base\nimplementation provides handling for all primitives included in FHIR version\n>= stu3.\n\nArgs:\nprimitive_cls: The type of primitive for which to return a wrapper.\n\nReturns:\nA PrimitiveWrapper for parsing/printing types of primitive_cls.", "source": "github-repos"} {"code": "def extract_github_repo_and_revision_from_source_url(url):\n \n _check_github_url_is_supported(url)\n\n parts = get_parts_of_url_path(url)\n repo_name = parts[1]\n try:\n revision = parts[3]\n except IndexError:\n raise ValueError('Revision cannot be extracted from url: {}'.format(url))\n\n end_index = url.index(repo_name) + len(repo_name)\n repo_url = url[:end_index]\n\n return _strip_trailing_dot_git(repo_url), revision", "docstring": "Given an URL, return the repo name and who owns it.\n\nArgs:\nurl (str): The URL to the GitHub repository\n\nRaises:\nValueError: on url that aren't from github or when the revision cannot be extracted\n\nReturns:\nstr, str: the owner of the repository, the repository name", "source": "juraj-google-style"} {"code": "def egg_info_writer(cmd, basename, filename):\n \n \n setupcfg = next((f for f in setuptools.findall()\n if os.path.basename(f) == 'setup.cfg'), None)\n if not setupcfg:\n return\n parser = six.moves.configparser.ConfigParser() \n parser.read(setupcfg)\n if not parser.has_section('rcli') or not parser.items('rcli'):\n return\n config = dict(parser.items('rcli')) \n for k, v in six.iteritems(config):\n if v.lower() in ('y', 'yes', 'true'):\n config[k] = True\n elif v.lower() in ('n', 'no', 'false'):\n config[k] = False\n else:\n try:\n config[k] = json.loads(v)\n except ValueError:\n pass\n cmd.write_file(basename, filename, json.dumps(config))", "docstring": "Read rcli configuration and write it out to the egg info.\n\nArgs:\ncmd: An egg info command instance to use for writing.\nbasename: The basename of the file to write.\nfilename: The full path of the file to write into the egg info.", "source": "juraj-google-style"} {"code": "def _create_m_objective(w, X):\n \n clusters, cells = w.shape\n genes = X.shape[0]\n w_sum = w.sum(1)\n def objective(m):\n m = m.reshape((X.shape[0], w.shape[0]))\n d = m.dot(w)+eps\n temp = X/d\n w2 = w.dot(temp.T)\n deriv = w_sum - w2.T\n return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes\n return objective", "docstring": "Creates an objective function and its derivative for M, given W and X\n\nArgs:\nw (array): clusters x cells\nX (array): genes x cells", "source": "juraj-google-style"} {"code": "def reaction_formula(reaction, compound_formula):\n \n\n def multiply_formula(compound_list):\n for compound, count in compound_list:\n yield count * compound_formula[compound.name]\n\n for compound, _ in reaction.compounds:\n if compound.name not in compound_formula:\n return None\n else:\n left_form = reduce(\n operator.or_, multiply_formula(reaction.left), Formula())\n right_form = reduce(\n operator.or_, multiply_formula(reaction.right), Formula())\n return left_form, right_form", "docstring": "Calculate formula compositions for both sides of the specified reaction.\n\nIf the compounds in the reaction all have formula, then calculate and\nreturn the chemical compositions for both sides, otherwise return `None`.\n\nArgs:\nreaction: :class:`psamm.reaction.Reaction`.\ncompound_formula: a map from compound id to formula.", "source": "juraj-google-style"} {"code": "def last_step_outputs(self):\n return self._last_step_outputs", "docstring": "A dictionary consisting of outputs to be captured on last step.\n\nKeys in the dictionary are names of tensors to be captured, as specified\nwhen `set_last_step_output` is called.\nValues in the dictionary are the tensors themselves. If\n`set_last_step_output` was called with a `reduce_op` for this output,\nthen the value is the reduced value.\n\nReturns:\nA dictionary with last step outputs.", "source": "github-repos"} {"code": "def _create_gcl_resource(self):\n return gcl_resource.Resource('gce_instance', {'project_id': self.project_id, 'instance_id': self.instance_id, 'zone': self.zone})", "docstring": "Create a configured Resource object.\n\nThe logging.resource.Resource object enables GCL to filter and\nbucket incoming logs according to which resource (host) they're\ncoming from.\n\nReturns:\n(obj): Instance of `google.cloud.logging.resource.Resource`", "source": "codesearchnet"} {"code": "def get_random_telephonenumber():\n phone = [RandomInputHelper.get_random_value(3, '123456789'), RandomInputHelper.get_random_value(3, '12345678'), ''.join(map(str, random.sample(range(10), 4)))]\n return '-'.join(phone)", "docstring": "Get a random 10 digit phone number that complies with most of the requirements.\n\nReturns:\nstr: The random telephone number.", "source": "codesearchnet"} {"code": "def HashBuffer(self, buf):\n for hasher in itervalues(self._hashers):\n hasher.update(buf)\n if self._progress:\n self._progress()\n self._bytes_read += len(buf)", "docstring": "Updates underlying hashers with a given buffer.\n\nArgs:\nbuf: A byte buffer (string object) that is going to be fed to the hashers.", "source": "codesearchnet"} {"code": "def ParseMessage(self, parser_mediator, key, date_time, tokens):\n \n if key != 'task_run':\n raise ValueError('Unknown grammar key: {0:s}'.format(key))\n\n event_data = CronTaskRunEventData()\n event_data.body = tokens.get('body', None)\n event_data.command = tokens.get('command', None)\n event_data.hostname = tokens.get('hostname', None)\n \n event_data.offset = 0\n event_data.pid = tokens.get('pid', None)\n event_data.reporter = tokens.get('reporter', None)\n event_data.severity = tokens.get('severity', None)\n event_data.username = tokens.get('username', None)\n\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_WRITTEN)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a syslog body that matched one of defined grammars.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nkey (str): name of the matching grammar.\ndate_time (dfdatetime.DateTimeValues): date and time values.\ntokens (dict[str, str]): tokens derived from a syslog message based on\nthe defined grammar.\n\nRaises:\nValueError: If an unknown key is provided.", "source": "juraj-google-style"} {"code": "def execute(cmd, shell=False, poll_period=1.0, catch_out=False):\n log = logging.getLogger(__name__)\n log.debug('Starting: %s', cmd)\n stdout = ''\n stderr = ''\n if ((not shell) and isinstance(cmd, string_types)):\n cmd = shlex.split(cmd)\n if catch_out:\n process = subprocess.Popen(cmd, shell=shell, stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)\n else:\n process = subprocess.Popen(cmd, shell=shell, close_fds=True)\n (stdout, stderr) = process.communicate()\n if stderr:\n log.error('There were errors:\\n%s', stderr)\n if stdout:\n log.debug('Process output:\\n%s', stdout)\n returncode = process.returncode\n log.debug('Process exit code: %s', returncode)\n return (returncode, stdout, stderr)", "docstring": "Execute UNIX command and wait for its completion\n\nArgs:\ncmd (str or list): command to execute\nshell (bool): invoke inside shell environment\ncatch_out (bool): collect process' output\n\nReturns:\nreturncode (int): process return code\nstdout (str): collected process stdout (only if catch_out set to true)\nstderr (str): collected process stderr (only if catch_out set to true)", "source": "codesearchnet"} {"code": "def cross_entropy(self, other, name='cross_entropy'):\n with self._name_scope(name):\n return self._cross_entropy(other)", "docstring": "Computes the (Shannon) cross entropy.\n\nDenote this distribution (`self`) by `P` and the `other` distribution by\n`Q`. Assuming `P, Q` are absolutely continuous with respect to\none another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)\ncross entropy is defined as:\n\n```none\nH[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)\n```\n\nwhere `F` denotes the support of the random variable `X ~ P`.\n\nArgs:\nother: `tfp.distributions.Distribution` instance.\nname: Python `str` prepended to names of ops created by this function.\n\nReturns:\ncross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`\nrepresenting `n` different calculations of (Shanon) cross entropy.", "source": "github-repos"} {"code": "def on_train_end(self, logs=None):", "docstring": "Called at the end of training.\n\nSubclasses should override for any actions to run.\n\nArgs:\nlogs: Dict. Currently the output of the last call to\n`on_epoch_end()` is passed to this argument for this method but\nthat may change in the future.", "source": "github-repos"} {"code": "def create_app(config):\n \n\n \n setup_logging.connect(partial(_initialize_logging, config), weak=False)\n task_postrun.connect(partial(_cleanup_workflow, config), weak=False)\n\n \n patch_celery()\n\n \n app = Celery('lightflow')\n app.conf.update(**config.celery)\n\n \n app.conf.update(\n task_serializer='pickle',\n accept_content=['pickle'],\n result_serializer='pickle',\n task_default_queue=DefaultJobQueueName.Task\n )\n\n if isinstance(app.conf.include, list):\n app.conf.include.extend(LIGHTFLOW_INCLUDE)\n else:\n if len(app.conf.include) > 0:\n raise ConfigOverwriteError(\n 'The content in the include config will be overwritten')\n app.conf.include = LIGHTFLOW_INCLUDE\n\n return app", "docstring": "Create a fully configured Celery application object.\n\nArgs:\nconfig (Config): A reference to a lightflow configuration object.\n\nReturns:\nCelery: A fully configured Celery application object.", "source": "juraj-google-style"} {"code": "def __setitem__(self, char, num):\n \n self.symbols[num] = char\n self.reversesymbols[char] = num", "docstring": "Sets a symbol\nArgs:\nchar (str): The symbol character\nnum (int): The symbol identifier\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def query_phenomizer(usr, pwd, *hpo_terms):\n \n base_string = 'http:\n questions = {'mobilequery':'true', 'terms':','.join(hpo_terms), 'username':usr, 'password':pwd}\n try:\n r = requests.get(base_string, params=questions, timeout=10)\n except requests.exceptions.Timeout:\n raise RuntimeError(\"The request timed out.\")\n \n if not r.status_code == requests.codes.ok:\n raise RuntimeError(\"Phenomizer returned a bad status code: %s\" % r.status_code)\n \n r.encoding = 'utf-8'\n \n return r", "docstring": "Query the phenomizer web tool\n\nArguments:\nusr (str): A username for phenomizer\npwd (str): A password for phenomizer\nhpo_terms (list): A list with hpo terms\n\nReturns:\nraw_answer : The raw result from phenomizer", "source": "juraj-google-style"} {"code": "def thickest(self, n=1, index=False):\n \n s = sorted(range(len(self)), key=lambda k: self[k].thickness)\n indices = s[-n:]\n if index:\n return indices\n else:\n if n == 1:\n \n i = indices[0]\n return self[i]\n else:\n return self[indices]", "docstring": "Returns the thickest interval(s) as a striplog.\n\nArgs:\nn (int): The number of thickest intervals to return. Default: 1.\nindex (bool): If True, only the indices of the intervals are\nreturned. You can use this to index into the striplog.\n\nReturns:\nInterval. The thickest interval. Or, if ``index`` was ``True``,\nthe index of the thickest interval.", "source": "juraj-google-style"} {"code": "def nac_p(msg):\n tc = typecode(msg)\n if (tc not in [29, 31]):\n raise RuntimeError(('%s: Not a target state and status message, or operation status message, expecting TC = 29 or 31' % msg))\n msgbin = common.hex2bin(msg)\n if (tc == 29):\n NACp = common.bin2int(msgbin[71:75])\n elif (tc == 31):\n NACp = common.bin2int(msgbin[76:80])\n try:\n EPU = uncertainty.NACp[NACp]['EPU']\n VEPU = uncertainty.NACp[NACp]['VEPU']\n except KeyError:\n (EPU, VEPU) = (uncertainty.NA, uncertainty.NA)\n return (EPU, VEPU)", "docstring": "Calculate NACp, Navigation Accuracy Category - Position\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string, TC = 29 or 31\n\nReturns:\nint or string: 95% horizontal accuracy bounds, Estimated Position Uncertainty\nint or string: 95% vertical accuracy bounds, Vertical Estimated Position Uncertainty", "source": "codesearchnet"} {"code": "def quickhull(sample):\n link = (lambda a, b: np.concatenate((a, b[1:])))\n edge = (lambda a, b: np.concatenate(([a], [b])))\n\n def dome(sample, base):\n (h, t) = base\n dists = np.dot((sample - h), np.dot(((0, (- 1)), (1, 0)), (t - h)))\n outer = np.repeat(sample, (dists > 0), axis=0)\n if len(outer):\n pivot = sample[np.argmax(dists)]\n return link(dome(outer, edge(h, pivot)), dome(outer, edge(pivot, t)))\n else:\n return base\n if (len(sample) > 2):\n axis = sample[(:, 0)]\n base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)\n return link(dome(sample, base), dome(sample, base[::(- 1)]))\n else:\n return sample", "docstring": "Find data points on the convex hull of a supplied data set\n\nArgs:\nsample: data points as column vectors n x d\nn - number samples\nd - data dimension (should be two)\n\nReturns:\na k x d matrix containint the convex hull data points", "source": "codesearchnet"} {"code": "def __init__(self, parent):\n \n logger.debug(\"Initialising status bar\")\n\n super(StatusBar, self).__init__(parent)\n\n self.status = tk.StringVar()\n \n self.statusbar = ttk.Label(self, textvariable=self.status, padding=2, anchor=\"center\")\n self.statusbar.grid(column=0, row=0, sticky=\"W E\")\n\n \n self.columnconfigure(0, weight=1)\n\n \n self.set_status(False)", "docstring": "Create a new status bar.\n\nArgs:\nparent: A tk or ttk object", "source": "juraj-google-style"} {"code": "def reduce(reducer, data, chunk_size=DEFAULT_CHUNK_SIZE):\n if (not chunk_size):\n return finalize(reducer, fold(reducer, data))\n chunks = generate_chunks(data, chunk_size)\n intermediate = fold(reducer, next(chunks))\n for chunk in chunks:\n intermediate = merge(reducer, intermediate, fold(reducer, chunk))\n return finalize(reducer, intermediate)", "docstring": "Repeatedly call fold and merge on data and then finalize.\n\nArguments:\ndata: Input for the fold function.\nreducer: The IReducer to use.\nchunk_size: How many items should be passed to fold at a time?\n\nReturns:\nReturn value of finalize.", "source": "codesearchnet"} {"code": "def code_verifier(n_bytes=64):\n verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n if (len(verifier) < 43):\n raise ValueError('Verifier too short. n_bytes must be > 30.')\n elif (len(verifier) > 128):\n raise ValueError('Verifier too long. n_bytes must be < 97.')\n else:\n return verifier", "docstring": "Generates a 'code_verifier' as described in section 4.1 of RFC 7636.\n\nThis is a 'high-entropy cryptographic random string' that will be\nimpractical for an attacker to guess.\n\nArgs:\nn_bytes: integer between 31 and 96, inclusive. default: 64\nnumber of bytes of entropy to include in verifier.\n\nReturns:\nBytestring, representing urlsafe base64-encoded random data.", "source": "codesearchnet"} {"code": "def is_reading_in_require_or_assert(self, variable):\n \n variables_read = [n.variables_read for n in self.nodes if n.contains_require_or_assert()]\n variables_read = [item for sublist in variables_read for item in sublist]\n return variable in variables_read", "docstring": "Check if the function reads the variable in an require or assert\nArgs:\nvariable (Variable):\nReturns:\nbool: True if the variable is read", "source": "juraj-google-style"} {"code": "def __init__(self, rom_path):\n \n \n if not isinstance(rom_path, str):\n raise TypeError('rom_path must be of type: str.')\n \n if not os.path.exists(rom_path):\n msg = 'rom_path points to non-existent file: {}.'.format(rom_path)\n raise ValueError(msg)\n \n self.raw_data = np.fromfile(rom_path, dtype='uint8')\n \n if not np.array_equal(self._magic, self._MAGIC):\n raise ValueError('ROM missing magic number in header.')\n if self._zero_fill != 0:\n raise ValueError(\"ROM header zero fill bytes are not zero.\")", "docstring": "Initialize a new ROM.\n\nArgs:\nrom_path (str): the path to the ROM file\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def histogram(x, bins=10, range=None):\n if not isinstance(bins, int):\n raise TypeError(f'Argument `bins` must be of type `int`. Received: bins={bins}')\n if bins < 0:\n raise ValueError(f'Argument `bins` should be a non-negative integer. Received: bins={bins}')\n if range:\n if len(range) < 2 or not isinstance(range, tuple):\n raise ValueError(f'Argument `range` must be a tuple of two elements. Received: range={range}')\n if range[1] < range[0]:\n raise ValueError(f'The second element of `range` must be greater than the first. Received: range={range}')\n if any_symbolic_tensors((x,)):\n return Histogram(bins=bins, range=range).symbolic_call(x)\n x = backend.convert_to_tensor(x)\n if len(x.shape) > 1:\n raise ValueError(f'Input tensor must be 1-dimensional. Received: input.shape={x.shape}')\n return backend.numpy.histogram(x, bins=bins, range=range)", "docstring": "Computes a histogram of the data tensor `x`.\n\nArgs:\nx: Input tensor.\nbins: An integer representing the number of histogram bins.\nDefaults to 10.\nrange: A tuple representing the lower and upper range of the bins.\nIf not specified, it will use the min and max of `x`.\n\nReturns:\nA tuple containing:\n- A tensor representing the counts of elements in each bin.\n- A tensor representing the bin edges.\n\nExample:\n\n```\n>>> input_tensor = np.random.rand(8)\n>>> keras.ops.histogram(input_tensor)\n(array([1, 1, 1, 0, 0, 1, 2, 1, 0, 1], dtype=int32),\narray([0.0189519 , 0.10294958, 0.18694726, 0.27094494, 0.35494262,\n0.43894029, 0.52293797, 0.60693565, 0.69093333, 0.77493101,\n0.85892869]))\n```", "source": "github-repos"} {"code": "def get_message(self, metadata=False, asctime=True):\n msg = (self.msg if is_string(self.msg) else str(self.msg))\n if self.args:\n try:\n msg = (msg % self.args)\n except:\n msg += str(self.args)\n if asctime:\n msg = ((('[' + self.asctime) + '] ') + msg)\n if metadata:\n msg += ('\\nCalled by %s at %s:%s\\n' % (self.func_name, self.pathname, self.lineno))\n return msg", "docstring": "Return the message after merging any user-supplied arguments with the message.\n\nArgs:\nmetadata: True if function and module name should be added.\nasctime: True if time string should be added.", "source": "codesearchnet"} {"code": "def _object_table(self, object_id):\n if (not isinstance(object_id, ray.ObjectID)):\n object_id = ray.ObjectID(hex_to_binary(object_id))\n message = self._execute_command(object_id, 'RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.OBJECT, '', object_id.binary())\n if (message is None):\n return {}\n gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)\n assert (gcs_entry.EntriesLength() > 0)\n entry = ray.gcs_utils.ObjectTableData.GetRootAsObjectTableData(gcs_entry.Entries(0), 0)\n object_info = {'DataSize': entry.ObjectSize(), 'Manager': entry.Manager()}\n return object_info", "docstring": "Fetch and parse the object table information for a single object ID.\n\nArgs:\nobject_id: An object ID to get information about.\n\nReturns:\nA dictionary with information about the object ID in question.", "source": "codesearchnet"} {"code": "def get_nonmonotonic_neurites(neuron, tol=1e-06):\n return [n for n in neuron.neurites if (not is_monotonic(n, tol))]", "docstring": "Get neurites that are not monotonic\n\nArgs:\nneurite(Neurite): neurite to operate on\ntol(float): the tolerance or the ratio\n\nReturns:\nlist of neurites that do not satisfy monotonicity test", "source": "codesearchnet"} {"code": "def uniprot_reviewed_checker(uniprot_id):\n \n\n query_string = 'id:' + uniprot_id\n\n uni_rev_raw = StringIO(bsup.search(query_string, columns='id,reviewed', frmt='tab'))\n uni_rev_df = pd.read_table(uni_rev_raw, sep='\\t', index_col=0)\n uni_rev_df = uni_rev_df.fillna(False)\n uni_rev_df = uni_rev_df[pd.notnull(uni_rev_df.Status)]\n\n uni_rev_df = uni_rev_df.replace(to_replace=\"reviewed\", value=True)\n uni_rev_df = uni_rev_df.replace(to_replace=\"unreviewed\", value=False)\n uni_rev_dict_adder = uni_rev_df.to_dict()['Status']\n\n return uni_rev_dict_adder[uniprot_id]", "docstring": "Check if a single UniProt ID is reviewed or not.\n\nArgs:\nuniprot_id:\n\nReturns:\nbool: If the entry is reviewed", "source": "juraj-google-style"} {"code": "def _map_subgraph_network(inputs, outputs):\n if not ops.executing_eagerly_outside_functions():\n base_layer_utils.create_keras_history(outputs)\n _, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs)\n return (nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers)", "docstring": "Returns the nodes and layers in the topology from `inputs` to `outputs`.\n\nArgs:\ninputs: List of input tensors.\noutputs: List of output tensors.\n\nReturns:\nA tuple of List{Node] and List[Layer].", "source": "github-repos"} {"code": "def read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True):\n \n fname = os.path.join(\n data_dir, GOLD_STANDARD_BLOCKS_DIRNAME, fileroot + GOLD_STANDARD_BLOCKS_EXT)\n with io.open(fname, mode='r') as f:\n data = f.read()\n if split_blocks:\n return filter(None, data[:-1].split('\\n'))\n return filter(None, data)", "docstring": "Read the gold standard blocks file corresponding to identifier ``fileroot``\nin the gold standard blocks directory below the root ``data_dir``.\n\nArgs:\ndata_dir (str)\nfileroot (str)\nsplit_blocks (bool): If True, split the file's content into blocks.\n\nReturns:\nstr or List[str]", "source": "juraj-google-style"} {"code": "def __init__(self, initial_ltol=0.2, initial_stol=0.3, initial_angle_tol=5):\n \n self.initial_ltol = initial_ltol\n self.initial_stol = initial_stol\n self.initial_angle_tol = initial_angle_tol", "docstring": "Tolerances as defined in StructureMatcher. Tolerances will be\ngradually decreased until only a single match is found (if possible).\n\nArgs:\ninitial_ltol: fractional length tolerance\ninitial_stol: site tolerance\ninitial_angle_tol: angle tolerance", "source": "juraj-google-style"} {"code": "def get(self, identifier, default=None):\n \n split_label = (tuple(identifier.split('.'))\n if isinstance(identifier, str) else tuple(identifier))\n if len(split_label) == 1:\n identifier = split_label[0]\n return self.__dict__.get(identifier, default)\n path_item = self\n for identifier in split_label:\n if path_item == default or path_item is None:\n return default\n path_item = path_item.get(identifier, default)\n return path_item", "docstring": "Get a node of the AttrTree using its path string.\n\nArgs:\nidentifier: Path string of the node to return\ndefault: Value to return if no node is found\n\nReturns:\nThe indexed node of the AttrTree", "source": "juraj-google-style"} {"code": "def GetFileEntryByPathSpec(self, path_spec):\n \n row_index = getattr(path_spec, 'row_index', None)\n row_condition = getattr(path_spec, 'row_condition', None)\n\n \n if row_index is None and row_condition is None:\n return sqlite_blob_file_entry.SQLiteBlobFileEntry(\n self._resolver_context, self, path_spec, is_root=True,\n is_virtual=True)\n\n return sqlite_blob_file_entry.SQLiteBlobFileEntry(\n self._resolver_context, self, path_spec)", "docstring": "Retrieves a file entry for a path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\nFileEntry: a file entry or None.", "source": "juraj-google-style"} {"code": "def update(self, current, values=None, finalize=None):\n if finalize is None:\n if self.target is None:\n finalize = False\n else:\n finalize = current >= self.target\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n value_base = max(current - self._seen_so_far, 1)\n if k not in self._values:\n self._values[k] = [v * value_base, value_base]\n else:\n self._values[k][0] += v * value_base\n self._values[k][1] += value_base\n else:\n self._values[k] = [v, 1]\n self._seen_so_far = current\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if now - self._last_update < self.interval and (not finalize):\n return\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\x08' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += '=' * (prog_width - 1)\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += '.' * (self.width - prog_width)\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n self._total_width = len(bar)\n sys.stdout.write(bar)\n time_per_unit = self._estimate_step_duration(current, now)\n if self.target is None or finalize:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += ' %.0fs/%s' % (time_per_unit, self.unit_name)\n elif time_per_unit >= 0.001:\n info += ' %.0fms/%s' % (time_per_unit * 1000.0, self.unit_name)\n else:\n info += ' %.0fus/%s' % (time_per_unit * 1000000.0, self.unit_name)\n else:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta \n elif eta > 60:\n eta_format = '%d:%02d' % (eta \n else:\n eta_format = '%ds' % eta\n info = ' - ETA: %s' % eta_format\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 0.001:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n info += ' %s' % self._values[k]\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += ' ' * (prev_total_width - self._total_width)\n if finalize:\n info += '\\n'\n sys.stdout.write(info)\n sys.stdout.flush()\n elif self.verbose == 2:\n if finalize:\n numdigits = int(np.log10(self.target)) + 1\n count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 0.001:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n sys.stdout.write(info)\n sys.stdout.flush()\n self._last_update = now", "docstring": "Updates the progress bar.\n\nArgs:\ncurrent: Index of current step.\nvalues: List of tuples: `(name, value_for_last_step)`. If `name` is in\n`stateful_metrics`, `value_for_last_step` will be displayed as-is.\nElse, an average of the metric over time will be displayed.\nfinalize: Whether this is the last update for the progress bar. If\n`None`, defaults to `current >= self.target`.", "source": "github-repos"} {"code": "def get_forecast_errors(y_hat,\n y_true,\n window_size=5,\n batch_size=30,\n smoothing_percent=0.05,\n smoothed=True):\n \n errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)]\n\n if not smoothed:\n return errors\n\n historical_error_window = int(window_size * batch_size * smoothing_percent)\n moving_avg = []\n for i in range(len(errors)):\n left_window = i - historical_error_window\n right_window = i + historical_error_window + 1\n if left_window < 0:\n left_window = 0\n\n if right_window > len(errors):\n right_window = len(errors)\n\n moving_avg.append(np.mean(errors[left_window:right_window]))\n\n return moving_avg", "docstring": "Calculates the forecasting error for two arrays of data. If smoothed errors desired,\nruns EWMA.\nArgs:\ny_hat (list): forecasted values. len(y_hat)==len(y_true).\ny_true (list): true values. len(y_hat)==len(y_true).\nwindow_size (int):\nbatch_size (int):\nsmoothing_percent (float):\nsmoothed (bool): whether the returned errors should be smoothed with EWMA.\nReturns:\n(list): error residuals. Smoothed if specified by user.", "source": "juraj-google-style"} {"code": "def rep1sep(parser: Union[Parser, Sequence[Input]], separator: Union[Parser, Sequence[Input]]) \\\n -> RepeatedOnceSeparatedParser:\n \n if isinstance(parser, str):\n parser = lit(parser)\n if isinstance(separator, str):\n separator = lit(separator)\n return RepeatedOnceSeparatedParser(parser, separator)", "docstring": "Match a parser one or more times separated by another parser.\n\nThis matches repeated sequences of ``parser`` separated by ``separator``.\nIf there is at least one match, a list containing the values of the\n``parser`` matches is returned. The values from ``separator`` are discarded.\nIf it does not match ``parser`` at all, it fails.\n\nArgs:\nparser: Parser or literal\nseparator: Parser or literal", "source": "juraj-google-style"} {"code": "def _checkResponseRegisterAddress(payload, registeraddress):\n _checkString(payload, minlength=2, description='payload')\n _checkRegisteraddress(registeraddress)\n BYTERANGE_FOR_STARTADDRESS = slice(0, 2)\n bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS]\n receivedStartAddress = _twoByteStringToNum(bytesForStartAddress)\n if (receivedStartAddress != registeraddress):\n raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format(receivedStartAddress, registeraddress, payload))", "docstring": "Check that the start adress as given in the response is correct.\n\nThe first two bytes in the payload holds the address value.\n\nArgs:\n* payload (string): The payload\n* registeraddress (int): The register address (use decimal numbers, not hex).\n\nRaises:\nTypeError, ValueError", "source": "codesearchnet"} {"code": "def migration_exchange(self, *, users: List[str], **kwargs) -> SlackResponse:\n \n kwargs.update({\"users\": users})\n return self.api_call(\"migration.exchange\", http_verb=\"GET\", params=kwargs)", "docstring": "For Enterprise Grid workspaces, map local user IDs to global user IDs\n\nArgs:\nusers (list): A list of user ids, up to 400 per request.\ne.g. ['W1234567890', 'U2345678901', 'U3456789012']", "source": "juraj-google-style"} {"code": "def kill_log_monitor(self, check_alive=True):\n \n self._kill_process_type(\n ray_constants.PROCESS_TYPE_LOG_MONITOR, check_alive=check_alive)", "docstring": "Kill the log monitor.\n\nArgs:\ncheck_alive (bool): Raise an exception if the process was already\ndead.", "source": "juraj-google-style"} {"code": "def data(self, namespace):\n \n assert namespace\n\n if namespace in self._data:\n return self._data[namespace]\n\n new_data = {}\n self._data[namespace] = new_data\n return new_data", "docstring": "Gets the thread.local data (dict) for a given namespace.\n\nArgs:\nnamespace (string): The namespace, or key, of the data dict.\n\nReturns:\n(dict)", "source": "juraj-google-style"} {"code": "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns the dataset specified by datasetID.\n\nArgs:\nrequest: (BigqueryDatasetsGetRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Dataset) The response message.", "source": "github-repos"} {"code": "def copy_coding(source: message.Message, target: message.Message) -> None:\n if not fhir_types.is_type_or_profile_of_coding(source.DESCRIPTOR):\n raise fhir_errors.InvalidFhirError(f'Source: {source.DESCRIPTOR.full_name} is not a type or profile of Coding.')\n if not fhir_types.is_type_or_profile_of_coding(target.DESCRIPTOR):\n raise fhir_errors.InvalidFhirError(f'Target: {target.DESCRIPTOR.full_name} is not a type or profile of Coding.')\n if proto_utils.are_same_message_type(source.DESCRIPTOR, target.DESCRIPTOR):\n target.CopyFrom(source)\n return\n proto_utils.copy_common_field(source, target, 'id')\n proto_utils.copy_common_field(source, target, 'extension')\n proto_utils.copy_common_field(source, target, 'version')\n proto_utils.copy_common_field(source, target, 'display')\n proto_utils.copy_common_field(source, target, 'user_selected')\n source_code = proto_utils.get_value_at_field(source, 'code')\n copy_code(source_code, proto_utils.set_in_parent_or_add(target, 'code'))\n target_system_field = target.DESCRIPTOR.fields_by_name.get('system')\n if target_system_field is not None:\n source_system_str = get_system_for_code(source_code)\n target_system_uri = proto_utils.set_in_parent_or_add(target, target_system_field)\n proto_utils.set_value_at_field(target_system_uri, 'value', source_system_str)", "docstring": "Copies all fields from source to target \"Coding\" messages.\n\nArgs:\nsource: The FHIR coding instance to copy from.\ntarget: The FHIR coding instance to copy to.\n\nRaises:\nInvalidFhirError: In the event that source or target is not a type/profile\nof Coding.", "source": "github-repos"} {"code": "def valid_as_v2_0(voevent):\n \n _return_to_standard_xml(voevent)\n valid_bool = voevent_v2_0_schema.validate(voevent)\n _remove_root_tag_prefix(voevent)\n return valid_bool", "docstring": "Tests if a voevent conforms to the schema.\n\nArgs:\nvoevent(:class:`Voevent`): Root node of a VOEvent etree.\nReturns:\nbool: Whether VOEvent is valid", "source": "juraj-google-style"} {"code": "def fix_pbc(structure, matrix=None):\n \n\n spec = []\n coords = []\n if matrix is None:\n latte = Lattice(structure.lattice.matrix)\n else:\n latte = Lattice(matrix)\n\n for site in structure:\n spec.append(site.specie)\n coord = np.array(site.frac_coords)\n for i in range(3):\n coord[i] -= floor(coord[i])\n if np.allclose(coord[i], 1):\n coord[i] = 0\n elif np.allclose(coord[i], 0):\n coord[i] = 0\n else:\n coord[i] = round(coord[i], 7)\n coords.append(coord)\n\n return Structure(latte, spec, coords, site_properties=structure.site_properties)", "docstring": "Set all frac_coords of the input structure within [0,1].\n\nArgs:\nstructure (pymatgen structure object):\ninput structure\nmatrix (lattice matrix, 3 by 3 array/matrix)\nnew structure's lattice matrix, if none, use\ninput structure's matrix\n\nReturn:\nnew structure with fixed frac_coords and lattice matrix", "source": "juraj-google-style"} {"code": "def _component_specs(self):\n raise NotImplementedError('%s._component_specs()' % type(self).__name__)", "docstring": "A nested structure of TypeSpecs for this type's components.\n\nReturns:\nA nested structure describing the component encodings that are returned\nby this TypeSpec's `_to_components` method. In particular, for a\nTypeSpec `spec` and a compatible value `value`:\n\n```\nnest.map_structure(lambda t, c: assert t.is_compatible_with(c),\nspec._component_specs, spec._to_components(value))\n```", "source": "github-repos"} {"code": "def unique_name(self, name, mark_as_used=True):\n \n scope_name = tf.get_variable_scope().name\n if scope_name:\n name = scope_name + \"/\" + name\n\n \n \n name_key = name.lower()\n i = self._names_in_use.get(name_key, 0)\n if mark_as_used:\n self._names_in_use[name_key] = i + 1\n if i > 0:\n base_name_key = name_key\n while name_key in self._names_in_use:\n name_key = \"%s_%d\" % (base_name_key, i)\n i += 1\n if mark_as_used:\n self._names_in_use[name_key] = 1\n name = \"%s_%d\" % (name, i-1)\n\n return name", "docstring": "Like tf.Graph.unique_name, returns a unique operation name for `name`.\n\nArgs:\nname: The name for an operation.\nmark_as_used: whether to mark this name as being used.\n\nReturns:\nA string to use as the name for the operation.", "source": "juraj-google-style"} {"code": "def wrap_and_format(self, width=None, include_params=False, include_return=False, excluded_params=None):\n if (excluded_params is None):\n excluded_params = []\n out = StringIO()\n if (width is None):\n (width, _height) = get_terminal_size()\n for line in self.maindoc:\n if isinstance(line, Line):\n out.write(fill(line.contents, width=width))\n out.write('\\n')\n elif isinstance(line, BlankLine):\n out.write('\\n')\n elif isinstance(line, ListItem):\n out.write(fill(line.contents, initial_indent=(' %s ' % line.marker[0]), subsequent_indent=' ', width=width))\n out.write('\\n')\n if include_params:\n included_params = (set(self.param_info) - set(excluded_params))\n if (len(included_params) > 0):\n out.write('\\nParameters:\\n')\n for param in included_params:\n info = self.param_info[param]\n out.write((' - %s (%s):\\n' % (param, info.type_name)))\n out.write(fill(info.desc, initial_indent=' ', subsequent_indent=' ', width=width))\n out.write('\\n')\n if include_return:\n print('Returns:')\n print((' ' + self.return_info.type_name))\n return out.getvalue()", "docstring": "Wrap, format and print this docstring for a specific width.\n\nArgs:\nwidth (int): The number of characters per line. If set to None\nthis will be inferred from the terminal width and default\nto 80 if not passed or if passed as None and the terminal\nwidth cannot be determined.\ninclude_return (bool): Include the return information section\nin the output.\ninclude_params (bool): Include a parameter information section\nin the output.\nexcluded_params (list): An optional list of parameter names to exclude.\nOptions for excluding things are, for example, 'self' or 'cls'.", "source": "codesearchnet"} {"code": "def locate(self, maximum_items=None, storage_status_mask=None, object_group_member=None, attributes=None):\n if (maximum_items is not None):\n if (not isinstance(maximum_items, six.integer_types)):\n raise TypeError('maximum_items must be an integer')\n if (storage_status_mask is not None):\n if (not isinstance(storage_status_mask, six.integer_types)):\n raise TypeError('storage_status_mask must be an integer')\n if (object_group_member is not None):\n if (not isinstance(object_group_member, enums.ObjectGroupMember)):\n raise TypeError('object_group_member must be a ObjectGroupMemberenumeration')\n if (attributes is not None):\n if ((not isinstance(attributes, list)) or (all((isinstance(item, cobjects.Attribute) for item in attributes)) is False)):\n raise TypeError('attributes must be a list of attributes')\n result = self.proxy.locate(maximum_items, storage_status_mask, object_group_member, attributes)\n status = result.result_status.value\n if (status == enums.ResultStatus.SUCCESS):\n return result.uuids\n else:\n reason = result.result_reason.value\n message = result.result_message.value\n raise exceptions.KmipOperationFailure(status, reason, message)", "docstring": "Search for managed objects, depending on the attributes specified in\nthe request.\n\nArgs:\nmaximum_items (integer): Maximum number of object identifiers the\nserver MAY return.\nstorage_status_mask (integer): A bit mask that indicates whether\non-line or archived objects are to be searched.\nobject_group_member (ObjectGroupMember): An enumeration that\nindicates the object group member type.\nattributes (list): Attributes the are REQUIRED to match those in a\ncandidate object.\n\nReturns:\nlist: The Unique Identifiers of the located objects\n\nRaises:\nClientConnectionNotOpen: if the client connection is unusable\nKmipOperationFailure: if the operation result is a failure\nTypeError: if the input arguments are invalid", "source": "codesearchnet"} {"code": "def _create_simple_tf1_gather_model(self, input_type: dtypes.DType, use_variable_for_filter=False) -> Tuple[core.Tensor, core.Tensor]:\n in_placeholder = array_ops.placeholder(input_type, shape=6)\n filters = np.random.randn(128, 32).astype(np.float32)\n if use_variable_for_filter:\n filters = variables.Variable(filters)\n output_tensor = array_ops.gather_v2(filters, in_placeholder)\n return (in_placeholder, output_tensor)", "docstring": "Creates a basic gather model.\n\nThis is intended to be used for TF1 (graph mode) tests.\n\nArgs:\ninput_type: type of the input index tensor for gather operation.\nuse_variable_for_filter: Setting this to `True` makes the filter for the\ngather operation a `tf.Variable`.\n\nReturns:\nin_placeholder: Input tensor placeholder.\noutput_tensor: The resulting tensor of the gather operation.", "source": "github-repos"} {"code": "def do_ams_sto_put(endpoint, body, content_length):\n \n headers = {\"Accept\": json_acceptformat,\n \"Accept-Charset\" : charset,\n \"x-ms-blob-type\" : \"BlockBlob\",\n \"x-ms-meta-m1\": \"v1\",\n \"x-ms-meta-m2\": \"v2\",\n \"x-ms-version\" : \"2015-02-21\",\n \"Content-Length\" : str(content_length)}\n return requests.put(endpoint, data=body, headers=headers)", "docstring": "Do a PUT request to the Azure Storage API and return JSON.\nArgs:\nendpoint (str): Azure Media Services Initial Endpoint.\nbody (str): Azure Media Services Content Body.\ncontent_length (str): Content_length.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"} {"code": "def is_active(self, node):\n if (isinstance(node.value, gast.Call) and (anno.getanno(node.value, 'func', False) == utils.pop)):\n return True\n for succ in gast.walk(node.value):\n if (isinstance(succ, gast.Name) and isinstance(succ.ctx, gast.Load) and (succ.id in self.active_variables)):\n return True\n return False", "docstring": "Checks whether a statement is active.\n\nAn assignment is active when its right hand side contains active\nvariables.\n\nArgs:\nnode: an instance of gast.Assign\n\nReturns:\nWhether the statement is active.", "source": "codesearchnet"} {"code": "def randwindow(self, window_shape):\n \n row = random.randrange(window_shape[0], self.shape[1])\n col = random.randrange(window_shape[1], self.shape[2])\n return self[:, row-window_shape[0]:row, col-window_shape[1]:col]", "docstring": "Get a random window of a given shape from within an image\n\nArgs:\nwindow_shape (tuple): The desired shape of the returned image as (height, width) in pixels.\n\nReturns:\nimage: a new image object of the specified shape and same type", "source": "juraj-google-style"} {"code": "def construct_optional_traversal_tree(complex_optional_roots, location_to_optional_roots):\n tree = OptionalTraversalTree(complex_optional_roots)\n for optional_root_locations_stack in six.itervalues(location_to_optional_roots):\n tree.insert(list(optional_root_locations_stack))\n return tree", "docstring": "Return a tree of complex optional root locations.\n\nArgs:\ncomplex_optional_roots: list of @optional locations (location immmediately preceding\nan @optional Traverse) that expand vertex fields\nlocation_to_optional_roots: dict mapping from location -> optional_roots where location is\nwithin some number of @optionals and optional_roots is a list\nof optional root locations preceding the successive @optional\nscopes within which the location resides\n\nReturns:\nOptionalTraversalTree object representing the tree of complex optional roots", "source": "codesearchnet"} {"code": "def _PrintAnalysisReportsDetails(self, storage_reader):\n \n if not storage_reader.HasAnalysisReports():\n self._output_writer.Write('No analysis reports stored.\\n\\n')\n return\n\n for index, analysis_report in enumerate(\n storage_reader.GetAnalysisReports()):\n title = 'Analysis report: {0:d}'.format(index)\n table_view = views.ViewsFactory.GetTableView(\n self._views_format_type, title=title)\n\n table_view.AddRow(['String', analysis_report.GetString()])\n\n table_view.Write(self._output_writer)", "docstring": "Prints the details of the analysis reports.\n\nArgs:\nstorage_reader (StorageReader): storage reader.", "source": "juraj-google-style"} {"code": "def listen(self, grpc_port):\n \n if self._grpc_port:\n raise ValueError(\n 'This InteractiveDebuggerPlugin instance is already listening at '\n 'gRPC port %d' % self._grpc_port)\n self._grpc_port = grpc_port\n\n sys.stderr.write('Creating InteractiveDebuggerPlugin at port %d\\n' %\n self._grpc_port)\n sys.stderr.flush()\n self._debugger_data_server = (\n interactive_debugger_server_lib.InteractiveDebuggerDataServer(\n self._grpc_port))\n\n self._server_thread = threading.Thread(\n target=self._debugger_data_server.run_server)\n self._server_thread.start()\n\n signal.signal(signal.SIGINT, self.signal_handler)", "docstring": "Start listening on the given gRPC port.\n\nThis method of an instance of InteractiveDebuggerPlugin can be invoked at\nmost once. This method is not thread safe.\n\nArgs:\ngrpc_port: port number to listen at.\n\nRaises:\nValueError: If this instance is already listening at a gRPC port.", "source": "juraj-google-style"} {"code": "def __init__(self, conf=None, conf_file=\".onshapepy\"):\n \n\n default_conf = {\n 'stack': 'https:\n 'logging': False,\n 'creds': None\n }\n\n try:\n user_conf = yaml.load(Path.home().joinpath(conf_file))\n default_conf.update(user_conf)\n except:\n pass\n\n if conf:\n default_conf.update(conf)\n\n self.conf = default_conf\n\n self._stack = default_conf['stack']\n self._api = Onshape(default_conf['stack'], default_conf['creds'], default_conf['logging'])", "docstring": "Instantiates a new Onshape client.\n\nAttributes:\n- conf: the configuration that generated this client. This is read-only and for testing purposes.\n\nArgs:\n- configuration (dict, optional): a dictionary of configuration options. Default behavior is to load this from a YAML file that\nis located in user's home directory and name '.onshapepy'. For options that can be set, look\nat the documentation section on 'configuration'.", "source": "juraj-google-style"} {"code": "def date_to_datetime(self, time_input, tz=None):\n dt = None\n try:\n dt = parser.parse(time_input)\n if ((tz is not None) and (tz != dt.tzname())):\n if (dt.tzinfo is None):\n dt = self._replace_timezone(dt)\n dt = dt.astimezone(timezone(tz))\n except IndexError:\n pass\n except TypeError:\n pass\n except ValueError:\n pass\n return dt", "docstring": "Convert ISO 8601 and other date strings to datetime.datetime type.\n\nArgs:\ntime_input (string): The time input string (see formats above).\ntz (string): The time zone for the returned data.\n\nReturns:\n(datetime.datetime): Python datetime.datetime object.", "source": "codesearchnet"} {"code": "def _readable_flags(transport):\n \n if 'flags' not in transport:\n return None\n _flag_list = []\n flags = transport['flags']\n if flags & dpkt.tcp.TH_SYN:\n if flags & dpkt.tcp.TH_ACK:\n _flag_list.append('syn_ack')\n else:\n _flag_list.append('syn')\n elif flags & dpkt.tcp.TH_FIN:\n if flags & dpkt.tcp.TH_ACK:\n _flag_list.append('fin_ack')\n else:\n _flag_list.append('fin')\n elif flags & dpkt.tcp.TH_RST:\n _flag_list.append('rst')\n elif flags & dpkt.tcp.TH_PUSH:\n _flag_list.append('psh')\n return _flag_list", "docstring": "Method that turns bit flags into a human readable list\n\nArgs:\ntransport (dict): transport info, specifically needs a 'flags' key with bit_flags\nReturns:\nlist: a list of human readable flags (e.g. ['syn_ack', 'fin', 'rst', ...]", "source": "juraj-google-style"} {"code": "def setExtention(self, ext):\n \n import warnings\n msg = \"the setExtention method is deprecated, please use setExtension\"\n warnings.warn(msg)\n self.setExtension(ext)", "docstring": "Deprecated: use :meth:`setExtension`.\n\nArgs:\next (str):", "source": "juraj-google-style"} {"code": "def build_from_generator(cls, generator, target_size, max_subtoken_length=None, reserved_tokens=None):\n token_counts = collections.defaultdict(int)\n for item in generator:\n for tok in tokenizer.encode(native_to_unicode(item)):\n token_counts[tok] += 1\n encoder = cls.build_to_target_size(target_size, token_counts, 1, 1000.0, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens)\n return encoder", "docstring": "Builds a SubwordTextEncoder from the generated text.\n\nArgs:\ngenerator: yields text.\ntarget_size: int, approximate vocabulary size to create.\nmax_subtoken_length: Maximum length of a subtoken. If this is not set,\nthen the runtime and memory use of creating the vocab is quadratic in\nthe length of the longest token. If this is set, then it is instead\nO(max_subtoken_length * length of longest token).\nreserved_tokens: List of reserved tokens. The global variable\n`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this\nargument is `None`, it will use `RESERVED_TOKENS`.\n\nReturns:\nSubwordTextEncoder with `vocab_size` approximately `target_size`.", "source": "codesearchnet"} {"code": "def _MatmulExtractingThreeDiagonals(x, y_tr):\n diag = math_ops.reduce_sum(x * y_tr, axis=-1)\n if y_tr.shape.is_fully_defined():\n zeros = array_ops.zeros(list(x.shape[:-2]) + [1, x.shape[-1]], dtype=x.dtype)\n superdiag = math_ops.reduce_sum(x * array_ops.concat((y_tr[..., 1:, :], zeros), axis=-2), axis=-1)\n subdiag = math_ops.reduce_sum(x * array_ops.concat((zeros, y_tr[..., :-1, :]), axis=-2), axis=-1)\n else:\n rank = array_ops.rank(y_tr)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1], [0, 0]])), axis=0)\n superdiag = math_ops.reduce_sum(x * array_ops.pad(y_tr[..., 1:, :], superdiag_pad), axis=-1)\n subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0], [0, 0]])), axis=0)\n subdiag = math_ops.reduce_sum(x * array_ops.pad(y_tr[..., :-1, :], subdiag_pad), axis=-1)\n return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)", "docstring": "Multiplies matrices and extracts three diagonals from the product.\n\nWith sizes M x K and K x M, this function takes O(MK) time and O(M) space,\nwhile using math_ops.matmul, and then extracting the diagonals would take\nO(M^2 K) time and O(M^2) space.\n\nArgs:\nx: first matrix\ny_tr: second matrix transposed\n\nReturns:\nDiagonals of the product in compact format (see\nlinalg_ops.tridiagonal_solve)", "source": "github-repos"} {"code": "def _SerializeAttributeContainer(self, attribute_container):\n \n if self._serializers_profiler:\n self._serializers_profiler.StartTiming(\n attribute_container.CONTAINER_TYPE)\n\n try:\n attribute_container_data = self._serializer.WriteSerialized(\n attribute_container)\n if not attribute_container_data:\n raise IOError(\n 'Unable to serialize attribute container: {0:s}.'.format(\n attribute_container.CONTAINER_TYPE))\n\n attribute_container_data = attribute_container_data.encode('utf-8')\n\n finally:\n if self._serializers_profiler:\n self._serializers_profiler.StopTiming(\n attribute_container.CONTAINER_TYPE)\n\n return attribute_container_data", "docstring": "Serializes an attribute container.\n\nArgs:\nattribute_container (AttributeContainer): attribute container.\n\nReturns:\nbytes: serialized attribute container.\n\nRaises:\nIOError: if the attribute container cannot be serialized.\nOSError: if the attribute container cannot be serialized.", "source": "juraj-google-style"} {"code": "def get_bucket(self, key, rate=None, capacity=None, **kwargs):\n \n return buckets.Bucket(\n key=key,\n rate=rate or self.rate,\n capacity=capacity or self.capacity,\n storate=self.storate,\n **kwargs)", "docstring": "Fetch a Bucket for the given key.\n\nrate and capacity might be overridden from the Throttler defaults.\n\nArgs:\nrate (float): Units regenerated by second, or None to keep\nThrottler defaults\ncapacity (int): Maximum units available, or None to keep Throttler\ndefaults", "source": "juraj-google-style"} {"code": "def liquid_precipitation_quantity(self, value=99.0):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `liquid_precipitation_quantity`'.format(value))\n\n self._liquid_precipitation_quantity = value", "docstring": "Corresponds to IDD Field `liquid_precipitation_quantity`\n\nArgs:\nvalue (float): value for IDD Field `liquid_precipitation_quantity`\nUnit: hr\nMissing value: 99.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def pageNames(matching=False,workbooks=True,graphs=True):\n \n \n pages=[]\n if workbooks:\n pages.extend(PyOrigin.WorksheetPages())\n if graphs:\n pages.extend(PyOrigin.GraphPages())\n\n \n pages = [x.GetName() for x in pages]\n\n \n if matching:\n pages=[x for x in pages if matching in x]\n return pages", "docstring": "Returns the names of everything (books, notes, graphs, etc.) in the project.\n\nArgs:\nmatching (str, optional): if given, only return names with this string in it\nworkbooks (bool): if True, return workbooks\ngraphs (bool): if True, return workbooks\n\nReturns:\nA list of the names of what you requested", "source": "juraj-google-style"} {"code": "class ClapProcessor(ProcessorMixin):\n feature_extractor_class = 'ClapFeatureExtractor'\n tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast')\n\n def __init__(self, feature_extractor, tokenizer):\n super().__init__(feature_extractor, tokenizer)\n\n def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):\n \n sampling_rate = kwargs.pop('sampling_rate', None)\n if text is None and audios is None:\n raise ValueError('You have to specify either text or audios. Both cannot be none.')\n if text is not None:\n encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n if audios is not None:\n audio_features = self.feature_extractor(audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs)\n if text is not None and audios is not None:\n encoding.update(audio_features)\n return encoding\n elif text is not None:\n return encoding\n else:\n return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)\n\n def batch_decode(self, *args, **kwargs):\n \n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \n return self.tokenizer.decode(*args, **kwargs)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n feature_extractor_input_names = self.feature_extractor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))", "docstring": "Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.\n\n[`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the\n[`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.\n\nArgs:\nfeature_extractor ([`ClapFeatureExtractor`]):\nThe audio processor is a required input.\ntokenizer ([`RobertaTokenizerFast`]):\nThe tokenizer is a required input.", "source": "github-repos"} {"code": "def add_block_parser(subparsers, parent_parser):\n \n parser = subparsers.add_parser(\n 'block',\n description='Provides subcommands to display information about the '\n 'blocks in the current blockchain.',\n help='Displays information on blocks in the current blockchain')\n\n grand_parsers = parser.add_subparsers(\n title='subcommands',\n dest='subcommand')\n\n grand_parsers.required = True\n\n description = (\n 'Displays information for all blocks on the current '\n 'blockchain, including the block id and number, public keys all '\n 'of allsigners, and number of transactions and batches.')\n\n list_parser = grand_parsers.add_parser(\n 'list',\n help='Displays information for all blocks on the current blockchain',\n description=description,\n parents=[base_http_parser(), base_list_parser()],\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n list_parser.add_argument(\n '-n',\n '--count',\n default=100,\n type=int,\n help='the number of blocks to list',\n )\n\n description = (\n 'Displays information about the specified block on '\n 'the current blockchain')\n\n show_parser = grand_parsers.add_parser(\n 'show',\n help=description,\n description=description + '.',\n parents=[base_http_parser(), base_show_parser()],\n formatter_class=argparse.RawDescriptionHelpFormatter)\n show_parser.add_argument(\n 'block_id',\n type=str,\n help='id (header_signature) of the block')", "docstring": "Adds arguments parsers for the block list and block show commands\n\nArgs:\nsubparsers: Add parsers to this subparser object\nparent_parser: The parent argparse.ArgumentParser object", "source": "juraj-google-style"} {"code": "def add_device(self, path):\n\t\t\n\n\t\thdevice = self._libinput.libinput_path_add_device(\n\t\t\tself._li, path.encode())\n\t\tif hdevice:\n\t\t\treturn Device(hdevice, self._libinput)\n\t\treturn None", "docstring": "Add a device to a libinput context.\n\nIf successful, the device will be added to the internal list and\nre-opened on :meth:`~libinput.LibInput.resume`. The device can be\nremoved with :meth:`remove_device`.\nIf the device was successfully initialized, it is returned.\n\nArgs:\npath (str): Path to an input device.\nReturns:\n~libinput.define.Device: A device object or :obj:`None`.", "source": "juraj-google-style"} {"code": "def process_obj(self, obj: Union[(URIRef, Literal, str)]) -> Union[(URIRef, Literal)]:\n if (isinstance(obj, dict) or isinstance(obj, list)):\n exit((str(obj) + ': should be str or intended to be a URIRef or Literal.'))\n if (isinstance(obj, Literal) or isinstance(obj, URIRef)):\n prefix = self.find_prefix(obj)\n if prefix:\n self.process_prefix(prefix)\n return obj\n if (len(obj) > 8):\n if (('http' == obj[:4]) and (':\n prefix = self.find_prefix(obj)\n if prefix:\n self.process_prefix(prefix)\n return URIRef(obj)\n if (':' in str(obj)):\n (presumed_prefix, info) = obj.split(':', 1)\n namespace: Union[(Namespace, None)] = self.process_prefix(presumed_prefix)\n if namespace:\n return namespace[info]\n return Literal(obj)", "docstring": "Gives component the proper node type\n\nArgs:\nobj: Entity object to be converted to its appropriate node type\n\nReturns:\nURIRef or Literal type of the object provided.\n\nRaises:\nSystemExit: If object is a dict or list it becomes str with broken data. Needs to\ncome in one object at a time.", "source": "codesearchnet"} {"code": "def FoldByteStream(self, mapped_value, **unused_kwargs):\n \n try:\n value = self.FoldValue(mapped_value)\n return self._operation.WriteTo(tuple([value]))\n\n except Exception as exception:\n error_string = (\n 'Unable to write: {0:s} to byte stream with error: {1!s}').format(\n self._data_type_definition.name, exception)\n raise errors.FoldingError(error_string)", "docstring": "Folds the data type into a byte stream.\n\nArgs:\nmapped_value (object): mapped value.\n\nReturns:\nbytes: byte stream.\n\nRaises:\nFoldingError: if the data type definition cannot be folded into\nthe byte stream.", "source": "juraj-google-style"} {"code": "def get_storage_usage(access_token, subscription_id, location):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Storage/locations/', location, '/usages', '?api-version=', STORAGE_API])\n return do_get(endpoint, access_token)", "docstring": "Returns storage usage and quota information for the specified subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body of storage account usage.", "source": "codesearchnet"} {"code": "def Load(cls, file_input, client=None):\n if (client is None):\n client = AdWordsClient.LoadFromStorage()\n try:\n data = yaml.safe_load(file_input)\n except yaml.YAMLError as e:\n raise googleads.errors.GoogleAdsError(('Error loading IncrementalUploadHelper from file: %s' % str(e)))\n try:\n request_builder = BatchJobHelper.GetRequestBuilder(client, version=data['version'], server=data['server'])\n return cls(request_builder, data['upload_url'], current_content_length=data['current_content_length'], is_last=data['is_last'])\n except KeyError as e:\n raise googleads.errors.GoogleAdsValueError(('Can\\'t parse IncrementalUploadHelper from file. Required field \"%s\" is missing.' % e.message))", "docstring": "Loads an IncrementalUploadHelper from the given file-like object.\n\nArgs:\nfile_input: a file-like object containing a serialized\nIncrementalUploadHelper.\nclient: an AdWordsClient instance. If not specified, an AdWordsClient will\nbe instantiated using the default configuration file.\n\nReturns:\nAn IncrementalUploadHelper instance initialized using the contents of the\nserialized input file.\n\nRaises:\nGoogleAdsError: If there is an error reading the input file containing the\nserialized IncrementalUploadHelper.\nGoogleAdsValueError: If the contents of the input file can't be parsed to\nproduce an IncrementalUploadHelper.", "source": "codesearchnet"} {"code": "def QueueQueryAndOwn(self, queue, lease_seconds, limit, timestamp):\n \n \n try:\n lock = DB.LockRetryWrapper(queue, lease_time=lease_seconds)\n return self._QueueQueryAndOwn(\n lock.subject,\n lease_seconds=lease_seconds,\n limit=limit,\n timestamp=timestamp)\n except DBSubjectLockError:\n \n \n \n return []\n except Error as e:\n logging.warning(\"Datastore exception: %s\", e)\n return []", "docstring": "Returns a list of Tasks leased for a certain time.\n\nArgs:\nqueue: The queue to query from.\nlease_seconds: The tasks will be leased for this long.\nlimit: Number of values to fetch.\ntimestamp: Range of times for consideration.\n\nReturns:\nA list of GrrMessage() objects leased.", "source": "juraj-google-style"} {"code": "def _apply_dense(self, grad, var):\n raise NotImplementedError()", "docstring": "Add ops to apply dense gradients to `var`.\n\nArgs:\ngrad: A `Tensor`.\nvar: A `Variable` object.\n\nReturns:\nAn `Operation`.", "source": "github-repos"} {"code": "def update_server_def(self, server_def, keep_alive_secs=_KEEP_ALIVE_SECS):\n if not server_def:\n raise ValueError('server_def is None.')\n self._server_def = server_def\n if self._context_handle:\n server_def_str = server_def.SerializeToString()\n pywrap_tfe.TFE_ContextUpdateServerDef(self._context_handle, keep_alive_secs, server_def_str)\n self._initialize_logical_devices()\n self._clear_caches()", "docstring": "Update a server_def on the context.\n\nArgs:\nserver_def: A tensorflow::ServerDef proto. Enables execution on remote\ndevices.\nkeep_alive_secs: Num. seconds after which the remote end will hang up. As\nlong as the client is still alive, the server state for the context will\nbe kept alive. If the client is killed (or there is some failure), the\nserver will clean up its context keep_alive_secs after the final RPC it\nreceives.\n\nRaises:\nValueError: if server_def is None.", "source": "github-repos"} {"code": "def _set_batch_size(self, batch_size):\n if not self._has_valid_tensors():\n raise ValueError('The batch size cannot be set for this model. Please use input_shapes parameter.')\n for tensor in self._input_tensors:\n shape = tensor.shape.as_list()\n if shape[0] is None:\n shape[0] = batch_size\n tensor.set_shape(shape)", "docstring": "Sets the first dimension of the input tensor to `batch_size`.\n\nArgs:\nbatch_size: Batch size for the model. Replaces the first dimension of an\ninput size array if undefined. (default 1)\n\nRaises:\nValueError: input_tensor is not defined.", "source": "github-repos"} {"code": "def easeInOutQuint(n):\n \n _checkRange(n)\n n = 2 * n\n if n < 1:\n return 0.5 * n**5\n else:\n n = n - 2\n return 0.5 * (n**5 + 2)", "docstring": "A quintic tween function that accelerates, reaches the midpoint, and then decelerates.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"} {"code": "def get_new_python_files(diff_with_last_commit=False) -> List[str]:\n repo = Repo(PATH_TO_REPO)\n try:\n main = repo.refs.main\n except AttributeError:\n main = repo.remotes.origin.refs.main\n if not diff_with_last_commit:\n print(f'main is at {main.commit}')\n print(f'Current head is at {repo.head.commit}')\n commits = repo.merge_base(main, repo.head)\n for commit in commits:\n print(f'Branching commit: {commit}')\n else:\n print(f'main is at {main.commit}')\n commits = main.commit.parents\n for commit in commits:\n print(f'Parent commit: {commit}')\n return get_new_python_files_between_commits(repo.head.commit, commits)", "docstring": "Return a list of python files that have been added between the current head and the main branch.\n\nReturns:\n`List[str]`: The list of python files added.", "source": "github-repos"} {"code": "def invoke_process(self, windowed_value, restriction=None, watermark_estimator_state=None, additional_args=None, additional_kwargs=None):\n raise NotImplementedError", "docstring": "Invokes the DoFn.process() function.\n\nArgs:\nwindowed_value: a WindowedValue object that gives the element for which\nprocess() method should be invoked along with the window\nthe element belongs to.\nrestriction: The restriction to use when executing this splittable DoFn.\nShould only be specified for splittable DoFns.\nwatermark_estimator_state: The watermark estimator state to use when\nexecuting this splittable DoFn. Should only\nbe specified for splittable DoFns.\nadditional_args: additional arguments to be passed to the current\n`DoFn.process()` invocation, usually as side inputs.\nadditional_kwargs: additional keyword arguments to be passed to the\ncurrent `DoFn.process()` invocation.", "source": "github-repos"} {"code": "def _get_namedrange(book, rangename, sheetname=None):\n\n def cond(namedef):\n if (namedef.type.upper() == 'RANGE'):\n if (namedef.name.upper() == rangename.upper()):\n if (sheetname is None):\n if (not namedef.localSheetId):\n return True\n else:\n sheet_id = [sht.upper() for sht in book.sheetnames].index(sheetname.upper())\n if (namedef.localSheetId == sheet_id):\n return True\n return False\n\n def get_destinations(name_def):\n 'Workaround for the bug in DefinedName.destinations'\n from openpyxl.formula import Tokenizer\n from openpyxl.utils.cell import SHEETRANGE_RE\n if (name_def.type == 'RANGE'):\n tok = Tokenizer(('=' + name_def.value))\n for part in tok.items:\n if (part.subtype == 'RANGE'):\n m = SHEETRANGE_RE.match(part.value)\n if m.group('quoted'):\n sheet_name = m.group('quoted')\n else:\n sheet_name = m.group('notquoted')\n (yield (sheet_name, m.group('cells')))\n namedef = next((item for item in book.defined_names.definedName if cond(item)), None)\n if (namedef is None):\n return None\n dests = get_destinations(namedef)\n xlranges = []\n sheetnames_upper = [name.upper() for name in book.sheetnames]\n for (sht, addr) in dests:\n if sheetname:\n sht = sheetname\n index = sheetnames_upper.index(sht.upper())\n xlranges.append(book.worksheets[index][addr])\n if (len(xlranges) == 1):\n return xlranges[0]\n else:\n return xlranges", "docstring": "Get range from a workbook.\n\nA workbook can contain multiple definitions for a single name,\nas a name can be defined for the entire book or for\na particular sheet.\n\nIf sheet is None, the book-wide def is searched,\notherwise sheet-local def is looked up.\n\nArgs:\nbook: An openpyxl workbook object.\nrangename (str): Range expression, such as \"A1\", \"$G4:$K10\",\nnamed range \"NamedRange1\".\nsheetname (str, optional): None for book-wide name def,\nsheet name for sheet-local named range.\n\nReturns:\nRange object specified by the name.", "source": "codesearchnet"} {"code": "def add(self, files):\n \n if files.__class__.__name__ == 'str':\n self._files.append(files)\n else:\n self._files.extend(files)", "docstring": "Adds files to check.\n\nArgs:\nfiles: List of files to check.", "source": "juraj-google-style"} {"code": "def search(self, q):\n results = self._api.search(q=q)\n return results", "docstring": "Search tweets by keyword.\n\nArgs:\nq: keyword\n\nReturns:\nlist: tweet list", "source": "codesearchnet"} {"code": "def usufyToPngExport(d, fPath):\n newGraph = _generateGraphData(d)\n import matplotlib.pyplot as plt\n nx.draw(newGraph)\n plt.savefig(fPath)", "docstring": "Workaround to export to a png file.\n\nArgs:\n-----\nd: Data to export.\nfPath: File path for the output file.", "source": "codesearchnet"} {"code": "def __init__(self, downloader, read_buffer_size=io.DEFAULT_BUFFER_SIZE, mode='rb'):\n self._downloader = downloader\n self.mode = mode\n self._position = 0\n self._reader_buffer_size = read_buffer_size", "docstring": "Initializes the stream.\n\nArgs:\ndownloader: (Downloader) Filesystem dependent implementation.\nread_buffer_size: (int) Buffer size to use during read operations.\nmode: (string) Python mode attribute for this stream.", "source": "github-repos"} {"code": "def _aspect_preserving_resize(image, resize_min):\n \n mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING,\n value={\"min\": resize_min})\n\n shape = tf.shape(image)\n height, width = shape[0], shape[1]\n\n new_height, new_width = _smallest_size_at_least(height, width, resize_min)\n\n return _resize_image(image, new_height, new_width)", "docstring": "Resize images preserving the original aspect ratio.\n\nArgs:\nimage: A 3-D image `Tensor`.\nresize_min: A python integer or scalar `Tensor` indicating the size of\nthe smallest side after resize.\n\nReturns:\nresized_image: A 3-D tensor containing the resized image.", "source": "juraj-google-style"} {"code": "def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):\n \n if len(M_list)==0:\n M_list = []\n for i in range(n_runs):\n M, W, ll = poisson_estimate_state(data, k, **se_params)\n M_list.append(M)\n M_stacked = np.hstack(M_list)\n M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params)\n W_new = np.dot(data.T, M_new)\n W_new = W_new/W_new.sum(0)\n return M_new, W_new, ll", "docstring": "Runs an ensemble method on the list of M results...\n\nArgs:\ndata: genes x cells array\nk: number of classes\nn_runs (optional): number of random initializations of state estimation\nM_list (optional): list of M arrays from state estimation\nse_params (optional): optional poisson_estimate_state params\n\nReturns:\nM_new\nW_new\nll", "source": "juraj-google-style"} {"code": "def _inf_or_operator_handler_factory(c_start, is_delegate=True):\n\n @coroutine\n def inf_or_operator_handler(c, ctx):\n next_ctx = None\n if (not is_delegate):\n ctx.value.append(c_start)\n (c, self) = (yield)\n else:\n assert (ctx.value[0] == c_start)\n assert (c not in _DIGITS)\n ctx.queue.unread(c)\n next_ctx = ctx\n (_, self) = (yield)\n assert (c == _)\n maybe_inf = True\n ctx.set_ion_type(IonType.FLOAT)\n match_index = 0\n trans = ctx.immediate_transition(self)\n while True:\n if maybe_inf:\n if (match_index < len(_INF_SUFFIX)):\n maybe_inf = (c == _INF_SUFFIX[match_index])\n elif (_ends_value(c) or ((ctx.container.ion_type is IonType.SEXP) and (c in _OPERATORS))):\n (yield ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.FLOAT, (((c_start == _MINUS) and _NEG_INF) or _POS_INF)))\n else:\n maybe_inf = False\n if maybe_inf:\n match_index += 1\n else:\n ctx.set_unicode()\n if (match_index > 0):\n next_ctx = ctx.derive_child_context(ctx.whence)\n for ch in _INF_SUFFIX[0:match_index]:\n next_ctx.value.append(ch)\n break\n (c, self) = (yield trans)\n if (ctx.container is not _C_SEXP):\n _illegal_character(c, (((next_ctx is None) and ctx) or next_ctx), ('Illegal character following %s.' % (_chr(c_start),)))\n if (match_index == 0):\n if (c in _OPERATORS):\n (yield ctx.immediate_transition(_operator_symbol_handler(c, ctx)))\n (yield ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, ctx.value.as_symbol()))\n (yield _CompositeTransition(ctx.event_transition(IonEvent, IonEventType.SCALAR, IonType.SYMBOL, ctx.value.as_symbol()), ctx, partial(_unquoted_symbol_handler, c), next_ctx))\n return inf_or_operator_handler", "docstring": "Generates handler co-routines for values that may be `+inf` or `-inf`.\n\nArgs:\nc_start (int): The ordinal of the character that starts this token (either `+` or `-`).\nis_delegate (bool): True if a different handler began processing this token; otherwise, False. This will only\nbe true for `-inf`, because it is not the only value that can start with `-`; `+inf` is the only value\n(outside of a s-expression) that can start with `+`.", "source": "codesearchnet"} {"code": "def _read_tcp_options(self, size):\n counter = 0\n optkind = list()\n options = dict()\n while (counter < size):\n kind = self._read_unpack(1)\n opts = TCP_OPT.get(kind)\n enum = OPT_TYPE.get(kind)\n if (opts is None):\n len_ = (size - counter)\n counter = size\n optkind.append(enum)\n options[enum.name] = self._read_fileng(len_)\n break\n dscp = opts[1]\n if opts[0]:\n len_ = self._read_unpack(1)\n byte = opts[2](len_)\n if byte:\n data = process_opt[opts[3]](self, byte, kind)\n else:\n data = dict(kind=kind, length=2, flag=True)\n else:\n len_ = 1\n data = dict(kind=kind, length=1)\n counter += len_\n if (enum in optkind):\n if isinstance(options[dscp], tuple):\n options[dscp] += (Info(data),)\n else:\n options[dscp] = (Info(options[dscp]), Info(data))\n else:\n optkind.append(enum)\n options[dscp] = data\n if (not kind):\n break\n if (counter < size):\n len_ = (size - counter)\n options['padding'] = self._read_fileng(len_)\n return (tuple(optkind), options)", "docstring": "Read TCP option list.\n\nPositional arguments:\n* size -- int, length of option list\n\nReturns:\n* tuple -- TCP option list\n* dict -- extracted TCP option", "source": "codesearchnet"} {"code": "def write(self, x, access_logits):\n \n gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name=\"gamma\")\n write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1)\n candidate_value = tf.layers.dense(x, self.val_depth,\n activation=tf.nn.relu,\n name=\"candidate_value\")\n erase_gates = tf.layers.dense(x, self.memory_size,\n activation=tf.nn.sigmoid,\n name=\"erase\")\n write_weights = tf.nn.softmax(write_logits)\n erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3)\n erase = tf.multiply(erase_weights,\n tf.expand_dims(self.mem_vals, 1))\n addition = tf.multiply(\n tf.expand_dims(write_weights, 3),\n tf.expand_dims(candidate_value, 2))\n update_value_op = self.mem_vals.assign(\n tf.reduce_mean(erase + addition, axis=1))\n with tf.control_dependencies([update_value_op]):\n write_op = self.mean_logits.assign(\n self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1))\n return write_op", "docstring": "Write to the memory based on a combination of similarity and least used.\n\nBased on arXiv:1607.00036v2 [cs.LG].\n\nArgs:\nx: a tensor in the shape of [batch_size, length, depth].\naccess_logits: the logits for accessing the memory.\nReturns:\nthe update op.", "source": "juraj-google-style"} {"code": "def np2str(value):\n \n if hasattr(value, 'dtype') and \\\n issubclass(value.dtype.type, (np.string_, np.object_)) and value.size == 1:\n value = np.asscalar(value)\n if not isinstance(value, str):\n \n \n value = value.decode()\n return value\n else:\n raise ValueError(\"Array is not a string type or is larger than 1\")", "docstring": "Convert an `numpy.string_` to str.\n\nArgs:\nvalue (ndarray): scalar or 1-element numpy array to convert\n\nRaises:\nValueError: if value is array larger than 1-element or it is not of\ntype `numpy.string_` or it is not a numpy array", "source": "juraj-google-style"} {"code": "def compute_covariance(L_aug, Y, k, p):\n (n, d) = L_aug.shape\n assert (Y.shape[0] == n)\n mu = compute_mu(L_aug, Y, k, p)\n return (((L_aug.T @ L_aug) / n) - ((mu @ np.diag(p)) @ mu.T))", "docstring": "Given label matrix L_aug and labels Y, compute the covariance.\n\nArgs:\nL: (np.array {0,1}) [n, d] The augmented (indicator) label matrix\nY: (np.array int) [n] The true labels in {1,...,k}\nk: (int) Cardinality\np: (np.array float) [k] The class balance", "source": "codesearchnet"} {"code": "def divide(x1, x2):\n if any_symbolic_tensors((x1, x2)):\n return Divide().symbolic_call(x1, x2)\n return backend.numpy.divide(x1, x2)", "docstring": "Divide arguments element-wise.\n\n`keras.ops.true_divide` is an alias for this function.\n\nArgs:\nx1: First input tensor.\nx2: Second input tensor.\n\nReturns:\nOutput tensor, the quotient `x1/x2`, element-wise.", "source": "github-repos"} {"code": "def user_id(self):\n if self._user_id is None:\n self._user_id = self._adb.current_user_id\n return self._user_id", "docstring": "The user id to use for this snippet client.\n\nAll the operations of the snippet client should be used for a particular\nuser. For more details, see the Android documentation of testing\nmultiple users.\n\nThus this value is cached and, once set, does not change through the\nlifecycles of this snippet client object. This caching also reduces the\nnumber of adb calls needed.\n\nAlthough for now self._user_id won't be modified once set, we use\n`property` to avoid issuing adb commands in the constructor.\n\nReturns:\nAn integer of the user id.", "source": "github-repos"} {"code": "def create_table(bq_legacy_client: BigQueryLegacyClient, table_metadata: TableMetadata, schema: List[SchemaField]) -> Table:\n table_def = Table(table_metadata.full_table_id, schema=schema)\n table = bq_legacy_client.create_table(table_def)\n return table", "docstring": "Create a table in BigQuery given the ID and schema.\n\nNote: Does NOT support nested columns.\n\nArgs:\n* bq_legacy_client: BigQuery Legacy API client\n* table_metadata: TableMetadata object\n* schema: valid BigQuery table schema\n\nReturns:\n* Created Table object", "source": "github-repos"} {"code": "def fileLoad(self, filePath=None, updatePath=True):\n if (not filePath):\n filePath = self.filePath\n if (not os.path.isfile(filePath)):\n raise FileNotFoundError((\"Data file '%s' does not exist.\" % filePath))\n else:\n print((\"Importing existing data file '%s' ... \" % filePath), end='', flush=True)\n with open(filePath, 'r') as q:\n data_raw = q.read()\n print('Imported!')\n self.data = json.loads(data_raw)\n if updatePath:\n self.filePath = filePath", "docstring": "Load a JSON data file into the internal JSON data dictionary.\n\nCurrent internal data will be overwritten.\nIf no file path is provided, the stored data file path will be used.\n\nArgs:\nfilePath (Optional[str]): A relative or absolute path to a\n'.json' file. Defaults to None.\nupdatePath (Optional[bool]): Specifies whether or not to update\nthe stored data file path. Defaults to True.", "source": "codesearchnet"} {"code": "def _get_sorted_inputs(filename):\n \n with tf.gfile.Open(filename) as f:\n records = f.read().split(\"\\n\")\n inputs = [record.strip() for record in records]\n if not inputs[-1]:\n inputs.pop()\n\n input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]\n sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True)\n\n sorted_inputs = []\n sorted_keys = {}\n for i, (index, _) in enumerate(sorted_input_lens):\n sorted_inputs.append(inputs[index])\n sorted_keys[index] = i\n return sorted_inputs, sorted_keys", "docstring": "Read and sort lines from the file sorted by decreasing length.\n\nArgs:\nfilename: String name of file to read inputs from.\nReturns:\nSorted list of inputs, and dictionary mapping original index->sorted index\nof each element.", "source": "juraj-google-style"} {"code": "def retrieve(self, txid, headers=None):\n \n path = self.path + txid\n return self.transport.forward_request(\n method='GET', path=path, headers=None)", "docstring": "Retrieves the transaction with the given id.\n\nArgs:\ntxid (str): Id of the transaction to retrieve.\nheaders (dict): Optional headers to pass to the request.\n\nReturns:\ndict: The transaction with the given id.", "source": "juraj-google-style"} {"code": "def run():\n if not _SMCLI_INPUTS.value and (not _SMCLI_INPUT_EXPRS.value) and (not _SMCLI_INPUT_EXAMPLES.value):\n raise AttributeError('At least one of --inputs, --input_exprs or --input_examples must be required')\n tensor_key_feed_dict = load_inputs_from_input_arg_string(_SMCLI_INPUTS.value, _SMCLI_INPUT_EXPRS.value, _SMCLI_INPUT_EXAMPLES.value)\n run_saved_model_with_feed_dict(_SMCLI_DIR.value, _SMCLI_TAG_SET.value, _SMCLI_SIGNATURE_DEF.value, tensor_key_feed_dict, _SMCLI_OUTDIR.value, _SMCLI_OVERWRITE.value, worker=_SMCLI_WORKER.value, init_tpu=_SMCLI_INIT_TPU.value, use_tfrt=_SMCLI_USE_TFRT.value, tf_debug=_SMCLI_TF_DEBUG.value)", "docstring": "Function triggered by run command.\n\nRaises:\nAttributeError: An error when neither --inputs nor --input_exprs is passed\nto run command.", "source": "github-repos"} {"code": "def __call__(self, table: Union['pd.DataFrame', List['pd.DataFrame']]=None, query: Optional[Union[TextInput, List[TextInput]]]=None, answer: Optional[Union[str, List[str]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n if table is not None:\n return self.source_call_func(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n elif answer is not None:\n return self.target_call_func(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n else:\n raise ValueError('You need to provide either a `table` or an `answer`.')", "docstring": "Main method to tokenize and prepare for the model one or several table-sequence pair(s).\n\nArgs:\ntable (`pd.DataFrame`, `List[pd.DataFrame]`):\nTable(s) containing tabular data.\nquery (`str` or `List[str]`, *optional*):\nSentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of\nsentences must match the number of tables.\nanswer (`str` or `List[str]`, *optional*):\nOptionally, the corresponding answer to the questions as supervision.", "source": "github-repos"} {"code": "def _get_output_columns(nodes, context):\n \n columns = []\n for node in nodes:\n for sql_output in sql_context_helpers.get_outputs(node, context):\n field_name = sql_output.field_name\n column = sql_context_helpers.get_column(field_name, node, context)\n column = column.label(sql_output.output_name)\n columns.append(column)\n return columns", "docstring": "Get the output columns for a list of SqlNodes.\n\nArgs:\nnodes: List[SqlNode], the nodes to get output columns from.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nList[Column], list of SqlAlchemy Columns to output for this query.", "source": "juraj-google-style"} {"code": "def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:\n series = table[self.name]\n self._check_series_name(series)\n col = self.name\n data = series.copy()\n for recoder in self.recoders.values():\n try:\n data = recoder(data)\n except BaseException as err:\n raise RecodingError(col, recoder, err)\n if validate:\n failed_rows = find_failed_rows(self.validate(data.to_frame()))\n if (failed_rows.shape[0] > 0):\n raise ValidationError(f)\n return data.to_frame()", "docstring": "Pass the provided series obj through each recoder function sequentially and return the final result.\n\nArgs:\ntable (pd.DataFrame): A dataframe on which to apply recoding logic.\nvalidate (bool): If ``True``, recoded table must pass validation tests.", "source": "codesearchnet"} {"code": "def _FormatOpaqueToken(self, token_data):\n data = ''.join(['{0:02x}'.format(byte) for byte in token_data.data])\n return {'data': data}", "docstring": "Formats an opaque token as a dictionary of values.\n\nArgs:\ntoken_data (bsm_token_data_opaque): AUT_OPAQUE token data.\n\nReturns:\ndict[str, str]: token values.", "source": "codesearchnet"} {"code": "def mapped_repr_stripping_underscores(\n obj: Any, attrnames: List[str],\n with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:\n \n attributes = []\n for attr_name in attrnames:\n if attr_name.startswith('_'):\n init_param_name = attr_name[1:]\n else:\n init_param_name = attr_name\n attributes.append((attr_name, init_param_name))\n return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner)", "docstring": "Convenience function for :func:`__repr__`.\nHere, you pass a list of internal attributes, and it assumes that the\n:func:`__init__` parameter names have the leading underscore dropped.\n\nArgs:\nobj: object to display\nattrnames: list of attribute names\nwith_addr: include the memory address of ``obj``\njoiner: string with which to join the elements\n\nReturns:\nstring: :func:`repr`-style representation", "source": "juraj-google-style"} {"code": "def stop_condition(self, condition):\n for cond_format in self._known_conditions:\n try:\n cond = cond_format.FromString(condition)\n self.stop_conditions.append(cond)\n return\n except ArgumentError:\n continue\n raise ArgumentError('Stop condition could not be processed by any known StopCondition type', condition=condition, suggestion='It may be mistyped or otherwise invalid.')", "docstring": "Add a stop condition to this simulation.\n\nStop conditions are specified as strings and parsed into\nthe appropriate internal structures.\n\nArgs:\ncondition (str): a string description of the stop condition", "source": "codesearchnet"} {"code": "def update_vm(access_token, subscription_id, resource_group, vm_name, body):\n \n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/Microsoft.Compute/virtualMachines/', vm_name,\n '?api-version=', COMP_API])\n return do_put(endpoint, body, access_token)", "docstring": "Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nvm_name (str): Name of the virtual machine.\nbody (dict): JSON body of the VM.\n\nReturns:\nHTTP response.", "source": "juraj-google-style"} {"code": "def save(self, path_info, checksum):\n \n assert path_info[\"scheme\"] == \"local\"\n assert checksum is not None\n\n path = path_info[\"path\"]\n assert os.path.exists(path)\n\n actual_mtime, actual_size = get_mtime_and_size(path)\n actual_inode = get_inode(path)\n\n existing_record = self.get_state_record_for_inode(actual_inode)\n if not existing_record:\n self._insert_new_state_record(\n path, actual_inode, actual_mtime, actual_size, checksum\n )\n return\n\n self._update_state_for_path_changed(\n path, actual_inode, actual_mtime, actual_size, checksum\n )", "docstring": "Save checksum for the specified path info.\n\nArgs:\npath_info (dict): path_info to save checksum for.\nchecksum (str): checksum to save.", "source": "juraj-google-style"} {"code": "def cdna_transformation(prev_image, cdna_input, num_masks, color_channels,\n dna_kernel_size, relu_shift):\n \n batch_size = tf.shape(cdna_input)[0]\n height = int(prev_image.get_shape()[1])\n width = int(prev_image.get_shape()[2])\n\n \n cdna_kerns = tfl.dense(\n cdna_input, dna_kernel_size * dna_kernel_size * num_masks,\n name=\"cdna_params\",\n activation=None)\n\n \n cdna_kerns = tf.reshape(\n cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks])\n cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift)\n norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)\n cdna_kerns /= norm_factor\n\n \n \n \n \n cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])\n cdna_kerns = tf.reshape(\n cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks])\n \n prev_image = tf.transpose(prev_image, [3, 1, 2, 0])\n\n \n transformed = tf.nn.depthwise_conv2d(\n prev_image, cdna_kerns, [1, 1, 1, 1], \"SAME\")\n\n \n transformed = tf.reshape(\n transformed, [color_channels, height, width, batch_size, num_masks])\n transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])\n transformed = tf.unstack(transformed, axis=-1)\n return transformed", "docstring": "Apply convolutional dynamic neural advection to previous image.\n\nArgs:\nprev_image: previous image to be transformed.\ncdna_input: hidden lyaer to be used for computing CDNA kernels.\nnum_masks: number of masks and hence the number of CDNA transformations.\ncolor_channels: the number of color channels in the images.\ndna_kernel_size: dna kernel size.\nrelu_shift: shift for ReLU function.\nReturns:\nList of images transformed by the predicted CDNA kernels.", "source": "juraj-google-style"} {"code": "def _on_fail(self, record):\n self.on_fail(record)", "docstring": "Proxy function to guarantee the base implementation of on_fail is\ncalled.\n\nArgs:\nrecord: records.TestResultRecord, a copy of the test record for\nthis test, containing all information of the test execution\nincluding exception objects.", "source": "github-repos"} {"code": "def register_handler(self, callable_obj, entrypoint, methods=('GET',)):\n \n\n router_obj = Route.wrap_callable(\n uri=entrypoint,\n methods=methods,\n callable_obj=callable_obj\n )\n\n if router_obj.is_valid:\n self._routes.add(router_obj)\n return self\n \n raise RouteError( \n \"Missing params: methods: {} - entrypoint: {}\".format(\n methods, entrypoint\n )\n )", "docstring": "Register a handler callable to a specific route.\n\nArgs:\nentrypoint (str): The uri relative path.\nmethods (tuple): A tuple of valid method strings.\ncallable_obj (callable): The callable object.\n\nReturns:\nThe Router instance (for chaining purposes).\n\nRaises:\nRouteError, for missing routing params or invalid callable\nobject type.", "source": "juraj-google-style"} {"code": "def render(self, mode=None, vertices=-1, *, first=0, instances=1) -> None:\n \n\n if mode is None:\n mode = TRIANGLES\n\n self.mglo.render(mode, vertices, first, instances)", "docstring": "The render primitive (mode) must be the same as\nthe input primitive of the GeometryShader.\n\nArgs:\nmode (int): By default :py:data:`TRIANGLES` will be used.\nvertices (int): The number of vertices to transform.\n\nKeyword Args:\nfirst (int): The index of the first vertex to start with.\ninstances (int): The number of instances.", "source": "juraj-google-style"} {"code": "def run(self, resources):\n \n\n hwman = resources['connection']\n\n updater = hwman.hwman.app(name='device_updater')\n updater.run_script(self._script, no_reboot=self._no_reboot)", "docstring": "Actually send the trub script.\n\nArgs:\nresources (dict): A dictionary containing the required resources that\nwe needed access to in order to perform this step.", "source": "juraj-google-style"} {"code": "class AriaProjector(nn.Module):\n\n def __init__(self, config: AriaConfig):\n super().__init__()\n self.patch_to_query_dict = config.projector_patch_to_query_dict\n self.in_features = config.vision_config.hidden_size\n self.num_heads = config.vision_config.num_attention_heads\n self.kv_dim = config.vision_config.hidden_size\n self.hidden_features = config.text_config.hidden_size\n self.output_dim = config.text_config.hidden_size\n self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))\n self.cross_attn = AriaCrossAttention(config)\n self.layer_norm = nn.LayerNorm(self.in_features)\n self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)\n\n def forward(self, key_value_states: torch.Tensor, attn_mask: Optional[torch.Tensor]=None):\n \n batch_size, num_patches = (key_value_states.shape[0], key_value_states.shape[1])\n if num_patches not in self.patch_to_query_dict.keys():\n raise KeyError(f'Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}.')\n query_num = self.patch_to_query_dict[num_patches]\n queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)\n if attn_mask is not None:\n attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)\n attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)\n attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)\n out = self.feed_forward(self.layer_norm(attention_out))\n return out", "docstring": "Aria Projector module.\n\nThis module projects vision features into the language model's embedding space, enabling interaction between vision and language components.\n\nArgs:\nconfig (`AriaConfig`):\nConfiguration object for the model.", "source": "github-repos"} {"code": "def ParseNetworkDataUsage(self, parser_mediator, cache=None, database=None, table=None, **unused_kwargs):\n self._ParseGUIDTable(parser_mediator, cache, database, table, self._NETWORK_DATA_USAGE_VALUES_MAP, SRUMNetworkDataUsageEventData)", "docstring": "Parses the network data usage monitor table.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ncache (Optional[ESEDBCache]): cache, which contains information about\nthe identifiers stored in the SruDbIdMapTable table.\ndatabase (Optional[pyesedb.file]): ESE database.\ntable (Optional[pyesedb.table]): table.", "source": "codesearchnet"} {"code": "def corpus_token_counts(\n text_filepattern, corpus_max_lines, split_on_newlines=True):\n \n counts = collections.Counter()\n for doc in _read_filepattern(\n text_filepattern,\n max_lines=corpus_max_lines,\n split_on_newlines=split_on_newlines):\n counts.update(encode(_native_to_unicode(doc)))\n\n mlperf_log.transformer_print(\n key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts))\n return counts", "docstring": "Read the corpus and compute a dictionary of token counts.\n\nArgs:\ntext_filepattern: A pattern matching one or more files.\ncorpus_max_lines: An integer; maximum total lines to read.\nsplit_on_newlines: A boolean. If true, then split files by lines and strip\nleading and trailing whitespace from each line. Otherwise, treat each\nfile as a single string.\n\nReturns:\na dictionary mapping token to count.", "source": "juraj-google-style"} {"code": "def set_image_data_format(data_format):\n global _IMAGE_DATA_FORMAT\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n _IMAGE_DATA_FORMAT = str(data_format)", "docstring": "Sets the value of the image data format convention.\n\nArgs:\ndata_format: string. `'channels_first'` or `'channels_last'`.\n\nExample:\n>>> tf.keras.backend.image_data_format()\n'channels_last'\n>>> tf.keras.backend.set_image_data_format('channels_first')\n>>> tf.keras.backend.image_data_format()\n'channels_first'\n>>> tf.keras.backend.set_image_data_format('channels_last')\n\nRaises:\nValueError: In case of invalid `data_format` value.", "source": "github-repos"} {"code": "def _get_target(self, target):\n \n depth = target.count('.') + 1\n parts = target.split('.', 1)\n for m in self.modules:\n if parts[0] == m.name:\n if depth < 3:\n return m\n for p in self.packages:\n if parts[0] == p.name:\n if depth == 1:\n return p\n \n target = p._get_target(parts[1])\n if target:\n return target\n \n \n \n \n \n \n if depth < 3:\n return p\n return None", "docstring": "Get the Package or Module related to given target.\n\nArgs:\ntarget (str): target to find.\n\nReturns:\nPackage/Module: package containing target or corresponding module.", "source": "juraj-google-style"} {"code": "def enable_extana_streaming(self, include_imu=False, enabled_sensors=SENSOR_ALL):\n if (not self.dongle._enable_extana_streaming(self, include_imu, enabled_sensors)):\n logger.warn('Failed to enable SK8-ExtAna streaming!')\n return False\n if include_imu:\n self.enabled_imus = [0]\n return True", "docstring": "Configures and enables sensor data streaming from the SK8-ExtAna device.\n\nBy default this will cause the SK8 to only stream data from the analog\nsensors on the SK8-ExtAna, but if `include_imu` is set to True, it will\nalso send data from the internal IMU in the SK8.\n\nNOTE: only one streaming mode can be active at any time, so e.g. if you\nwant to stream IMU data normally, you must disable SK8-ExtAna streaming first.\n\nArgs:\ninclude_imu (bool): If False, only SK8-ExtAna packets will be streamed.\nIf True, the device will also stream data from the SK8's internal IMU.\nenabled_sensors (int): If `include_imu` is True, this can be used to\nselect which IMU sensors will be active.\n\nReturns:\nbool. True if successful, False if an error occurred.", "source": "codesearchnet"} {"code": "def from_json(cls, data):\n \n user = cls()\n\n user.user_id = data['userId']\n user.username = data['username']\n user.auth_system = data['authSystem']\n user.roles = data['roles']\n\n return user", "docstring": "Return object based on JSON / dict input\n\nArgs:\ndata (dict): Dictionary containing a serialized User object\n\nReturns:\n:obj:`User`: User object representing the data", "source": "juraj-google-style"} {"code": "def requires_submit(func):\n\n @functools.wraps(func)\n def _wrapper(self, *args, **kwargs):\n if (self._future is None):\n raise JobError('Job not submitted yet!. You have to .submit() first!')\n return func(self, *args, **kwargs)\n return _wrapper", "docstring": "Decorator to ensure that a submit has been performed before\ncalling the method.\n\nArgs:\nfunc (callable): test function to be decorated.\n\nReturns:\ncallable: the decorated function.", "source": "codesearchnet"} {"code": "def __init__(self, app, sender_email=None, sender_name=None):\n \n\n super(SendmailEmailAdapter, self).__init__(app)\n\n \n try:\n from flask_sendmail import Mail\n except ImportError:\n raise ConfigError(\n \"The Flask-Sendmail package is missing. Install Flask-Sendmail with 'pip install Flask-Sendmail'.\")\n self.mail = Mail(app)", "docstring": "Check config settings and setup Flask-Sendemail.\n\nArgs:\napp(Flask): The Flask application instance.", "source": "juraj-google-style"} {"code": "def listen(self, orb, listeners):\n \n\n if isinstance(listeners, Listener):\n listeners = [listeners]\n\n orb.event = None\n results = []\n for listener in listeners:\n if listener.check(orb):\n results.append(self._bisect(listener.prev, orb, listener))\n\n \n listener.prev = orb\n return sorted(results, key=lambda x: x.date)", "docstring": "This method allows to loop over the listeners and trigger the :py:meth:`_bisect` method\nin case a watched parameter has its state changed.\n\nArgs:\norb (Orbit): The current state of the orbit\nlisteners (iterable): List of Listener objects\nReturn:\nlist of Orbit: Orbits corresponding to events, sorted by dates", "source": "juraj-google-style"} {"code": "def predict(self, data, graph=None, nruns=6, njobs=None, gpus=0, verbose=None, plot=False, plot_generated_pair=False, return_list_results=False):\n (verbose, njobs) = SETTINGS.get_default(('verbose', verbose), ('nb_jobs', njobs))\n if (njobs != 1):\n list_out = Parallel(n_jobs=njobs)((delayed(run_SAM)(data, skeleton=graph, lr_gen=self.lr, lr_disc=self.dlr, regul_param=self.l1, nh=self.nh, dnh=self.dnh, gpu=bool(gpus), train_epochs=self.train, test_epochs=self.test, batch_size=self.batchsize, plot=plot, verbose=verbose, gpu_no=(idx % max(gpus, 1))) for idx in range(nruns)))\n else:\n list_out = [run_SAM(data, skeleton=graph, lr_gen=self.lr, lr_disc=self.dlr, regul_param=self.l1, nh=self.nh, dnh=self.dnh, gpu=bool(gpus), train_epochs=self.train, test_epochs=self.test, batch_size=self.batchsize, plot=plot, verbose=verbose, gpu_no=0) for idx in range(nruns)]\n if return_list_results:\n return list_out\n else:\n W = list_out[0]\n for w in list_out[1:]:\n W += w\n W /= nruns\n return nx.relabel_nodes(nx.DiGraph(W), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Execute SAM on a dataset given a skeleton or not.\n\nArgs:\ndata (pandas.DataFrame): Observational data for estimation of causal relationships by SAM\nskeleton (numpy.ndarray): A priori knowledge about the causal relationships as an adjacency matrix.\nCan be fed either directed or undirected links.\nnruns (int): Number of runs to be made for causal estimation.\nRecommended: >=12 for optimal performance.\nnjobs (int): Numbers of jobs to be run in Parallel.\nRecommended: 1 if no GPU available, 2*number of GPUs else.\ngpus (int): Number of available GPUs for the algorithm.\nverbose (bool): verbose mode\nplot (bool): Plot losses interactively. Not recommended if nruns>1\nplot_generated_pair (bool): plots a generated pair interactively. Not recommended if nruns>1\nReturns:\nnetworkx.DiGraph: Graph estimated by SAM, where A[i,j] is the term\nof the ith variable for the jth generator.", "source": "codesearchnet"} {"code": "class _StatelessThresholdDoFn(_BaseThresholdDoFn):\n\n def __init__(self, threshold_fn_spec: Spec):\n assert isinstance(threshold_fn_spec.config, dict)\n threshold_fn_spec.config['_run_init'] = True\n self._threshold_fn = Specifiable.from_spec(threshold_fn_spec)\n assert isinstance(self._threshold_fn, ThresholdFn)\n assert not self._threshold_fn.is_stateful, 'This DoFn can only take stateless function as threshold_fn'\n\n def process(self, element: NestedKeyedOutputT, **kwargs) -> Iterable[NestedKeyedOutputT]:\n \n k1, (k2, result) = element\n yield (k1, (k2, self._apply_threshold_to_predictions(result)))", "docstring": "Applies a stateless ThresholdFn to anomaly detection results.\n\nThis DoFn is designed for stateless `ThresholdFn` implementations. It\ninitializes the `ThresholdFn` once during setup and applies it to each\nincoming element without maintaining any state across elements.\n\nArgs:\nthreshold_fn_spec (Spec): Specification defining the `ThresholdFn` to be\nused.\n\nRaises:\nAssertionError: If the provided `threshold_fn_spec` leads to the\ncreation of a stateful `ThresholdFn`.", "source": "github-repos"} {"code": "def residual_block_v2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n preact = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_preact_bn')(x)\n preact = layers.Activation('relu', name=name + '_preact_relu')(preact)\n if conv_shortcut:\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(preact)\n else:\n shortcut = layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x\n x = layers.Conv2D(filters, 1, strides=1, use_bias=False, name=name + '_1_conv')(preact)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.Conv2D(filters, kernel_size, strides=stride, use_bias=False, name=name + '_2_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_2_bn')(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)\n x = layers.Add(name=name + '_out')([shortcut, x])\n return x", "docstring": "A residual block for ResNet*_v2.\n\nArgs:\nx: Input tensor.\nfilters: No of filters in the bottleneck layer.\nkernel_size: Kernel size of the bottleneck layer. Defaults to `3`.\nstride: Stride of the first layer. Defaults to `1`.\nconv_shortcut: Use convolution shortcut if `True`, otherwise\nuse identity shortcut. Defaults to `True`\nname(optional): Name of the block\n\nReturns:\nOutput tensor for the residual block.", "source": "github-repos"} {"code": "def ip_address(address):\n \n try:\n return IPv4Address(address)\n except (AddressValueError, NetmaskValueError):\n pass\n\n try:\n return IPv6Address(address)\n except (AddressValueError, NetmaskValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %\n address)", "docstring": "Take an IP string/int and return an object of the correct type.\n\nArgs:\naddress: A string or integer, the IP address. Either IPv4 or\nIPv6 addresses may be supplied; integers less than 2**32 will\nbe considered to be IPv4 by default.\n\nReturns:\nAn IPv4Address or IPv6Address object.\n\nRaises:\nValueError: if the *address* passed isn't either a v4 or a v6\naddress", "source": "juraj-google-style"} {"code": "def get_encoding(path):\n \n def check_ascii(_data):\n \n try:\n _data.decode('ASCII')\n log.debug('Found ASCII')\n except UnicodeDecodeError:\n return False\n else:\n return True\n\n def check_bom(_data):\n \n \n \n boms = [\n ('UTF-32-BE', salt.utils.stringutils.to_bytes(codecs.BOM_UTF32_BE)),\n ('UTF-32-LE', salt.utils.stringutils.to_bytes(codecs.BOM_UTF32_LE)),\n ('UTF-16-BE', salt.utils.stringutils.to_bytes(codecs.BOM_UTF16_BE)),\n ('UTF-16-LE', salt.utils.stringutils.to_bytes(codecs.BOM_UTF16_LE)),\n ('UTF-8', salt.utils.stringutils.to_bytes(codecs.BOM_UTF8)),\n ('UTF-7', salt.utils.stringutils.to_bytes('\\x2b\\x2f\\x76\\x38\\x2D')),\n ('UTF-7', salt.utils.stringutils.to_bytes('\\x2b\\x2f\\x76\\x38')),\n ('UTF-7', salt.utils.stringutils.to_bytes('\\x2b\\x2f\\x76\\x39')),\n ('UTF-7', salt.utils.stringutils.to_bytes('\\x2b\\x2f\\x76\\x2b')),\n ('UTF-7', salt.utils.stringutils.to_bytes('\\x2b\\x2f\\x76\\x2f')),\n ]\n for _encoding, bom in boms:\n if _data.startswith(bom):\n log.debug('Found BOM for %s', _encoding)\n return _encoding\n return False\n\n def check_utf8_markers(_data):\n try:\n decoded = _data.decode('UTF-8')\n except UnicodeDecodeError:\n return False\n else:\n \n if six.PY2:\n for char in decoded:\n if 0xD800 <= ord(char) <= 0xDFFF:\n return False\n return True\n\n def check_system_encoding(_data):\n try:\n _data.decode(__salt_system_encoding__)\n except UnicodeDecodeError:\n return False\n else:\n return True\n\n if not os.path.isfile(path):\n raise CommandExecutionError('Not a file')\n try:\n with fopen(path, 'rb') as fp_:\n data = fp_.read(2048)\n except os.error:\n raise CommandExecutionError('Failed to open file')\n\n \n encoding = check_bom(data)\n if encoding:\n return encoding\n\n \n if check_utf8_markers(data):\n return 'UTF-8'\n\n \n if check_system_encoding(data):\n return __salt_system_encoding__\n\n \n if check_ascii(data):\n return 'ASCII'\n\n raise CommandExecutionError('Could not detect file encoding')", "docstring": "Detect a file's encoding using the following:\n- Check for Byte Order Marks (BOM)\n- Check for UTF-8 Markers\n- Check System Encoding\n- Check for ascii\n\nArgs:\n\npath (str): The path to the file to check\n\nReturns:\nstr: The encoding of the file\n\nRaises:\nCommandExecutionError: If the encoding cannot be detected", "source": "juraj-google-style"} {"code": "def _paginate_expand_value_set_request(request_func: Callable[[int], requests.Response], value_set_url: str, value_set_version: Optional[str]) -> value_set_pb2.ValueSet:\n offset = 0\n codes: List[value_set_pb2.ValueSet.Expansion.Contains] = []\n while True:\n resp = request_func(offset)\n if resp.status_code >= 400:\n logging.error('Error from terminology service: %s', resp.text)\n resp.raise_for_status()\n resp_json = resp.json()\n response_value_set = json_format.json_fhir_object_to_proto(resp_json, value_set_pb2.ValueSet, validate=False)\n codes.extend(response_value_set.expansion.contains)\n offset += len(resp_json['expansion'].get('contains', ()))\n if 'total' not in resp_json['expansion'] or offset >= resp_json['expansion']['total']:\n del response_value_set.expansion.contains[:]\n response_value_set.expansion.contains.extend(codes)\n response_value_set.url.value = value_set_url\n if value_set_version is not None:\n response_value_set.version.value = value_set_version\n return response_value_set", "docstring": "Performs a request to the terminology service, including pagination.\n\nGiven a function which performs a request against a terminology service, use\nthe function to make requests until the full response has been paginated\nthrough.\n\nArgs:\nrequest_func: The function to call to perform a request to the terminology\nservice. The function must accept an integer representing the pagination\noffset value to include in the request and return a requests Response\nobject.\nvalue_set_url: The URL of the value set being expanded.\nvalue_set_version: The version of the value set being expanded.\n\nReturns:\nThe current definition of the value set from the server with its expanded\ncodes present.", "source": "github-repos"} {"code": "def _find_image_files(data_dir, labels_file):\n print(('Determining list of input files and labels from %s.' % data_dir))\n unique_labels = [l.strip() for l in tf.gfile.FastGFile(labels_file, 'r').readlines()]\n labels = []\n filenames = []\n texts = []\n label_index = 1\n for text in unique_labels:\n jpeg_file_path = ('%s/%s/*' % (data_dir, text))\n matching_files = tf.gfile.Glob(jpeg_file_path)\n labels.extend(([label_index] * len(matching_files)))\n texts.extend(([text] * len(matching_files)))\n filenames.extend(matching_files)\n if (not (label_index % 100)):\n print(('Finished finding files in %d of %d classes.' % (label_index, len(labels))))\n label_index += 1\n shuffled_index = list(range(len(filenames)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n filenames = [filenames[i] for i in shuffled_index]\n texts = [texts[i] for i in shuffled_index]\n labels = [labels[i] for i in shuffled_index]\n print(('Found %d JPEG files across %d labels inside %s.' % (len(filenames), len(unique_labels), data_dir)))\n return (filenames, texts, labels)", "docstring": "Build a list of all images files and labels in the data set.\n\nArgs:\ndata_dir: string, path to the root directory of images.\n\nAssumes that the image data set resides in JPEG files located in\nthe following directory structure.\n\ndata_dir/dog/another-image.JPEG\ndata_dir/dog/my-image.jpg\n\nwhere 'dog' is the label associated with these images.\n\nlabels_file: string, path to the labels file.\n\nThe list of valid labels are held in this file. Assumes that the file\ncontains entries as such:\ndog\ncat\nflower\nwhere each line corresponds to a label. We map each label contained in\nthe file to an integer starting with the integer 0 corresponding to the\nlabel contained in the first line.\n\nReturns:\nfilenames: list of strings; each string is a path to an image file.\ntexts: list of strings; each string is the class, e.g. 'dog'\nlabels: list of integer; each integer identifies the ground truth.", "source": "codesearchnet"} {"code": "def __init__(self, channel):\n \n self.CreateClientEvent = channel.unary_unary(\n \"/google.cloud.talent.v4beta1.EventService/CreateClientEvent\",\n request_serializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_event__service__pb2.CreateClientEventRequest.SerializeToString,\n response_deserializer=google_dot_cloud_dot_talent__v4beta1_dot_proto_dot_event__pb2.ClientEvent.FromString,\n )", "docstring": "Constructor.\n\nArgs:\nchannel: A grpc.Channel.", "source": "juraj-google-style"} {"code": "def write_file(self, filename, *, overwrite=False):\n if (exists(filename) and (not overwrite)):\n raise OSError((filename + ' already present. Specify \"overwrite=True\" to overwrite, or rename.'))\n with open(filename, 'w') as yaml_file:\n yaml.dump(self._properties, yaml_file)", "docstring": "Write new ChemKED YAML file based on object.\n\nArguments:\nfilename (`str`): Filename for target YAML file\noverwrite (`bool`, optional): Whether to overwrite file with given name if present.\nMust be supplied as a keyword-argument.\n\nRaises:\n`NameError`: If ``filename`` is already present, and ``overwrite`` is not ``True``.\n\nExample:\n>>> dataset = ChemKED(yaml_file)\n>>> dataset.write_file(new_yaml_file)", "source": "codesearchnet"} {"code": "def delete(self, url=None, post_data={}, parse_data=False, key=None, parameters=None):\n return self._fetch('DELETE', url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True)", "docstring": "Issue a PUT request.\n\nKwargs:\nurl (str): Destination URL\npost_data (dict): Dictionary of parameter and values\nparse_data (bool): If true, parse response data\nkey (string): If parse_data==True, look for this key when parsing data\nparameters (dict): Additional GET parameters to append to the URL\n\nReturns:\ndict. Response (a dict with keys: success, data, info, body)\n\nRaises:\nAuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception", "source": "codesearchnet"} {"code": "def get_log_level(args):\n \n \n index = -1\n log_level = None\n if '' in args and args['']:\n index = sys.argv.index(args[''])\n if args.get('--debug'):\n log_level = 'DEBUG'\n if '--debug' in sys.argv and sys.argv.index('--debug') < index:\n sys.argv.remove('--debug')\n elif '-d' in sys.argv and sys.argv.index('-d') < index:\n sys.argv.remove('-d')\n elif args.get('--verbose'):\n log_level = 'INFO'\n if '--verbose' in sys.argv and sys.argv.index('--verbose') < index:\n sys.argv.remove('--verbose')\n elif '-v' in sys.argv and sys.argv.index('-v') < index:\n sys.argv.remove('-v')\n elif args.get('--log-level'):\n log_level = args['--log-level']\n sys.argv.remove('--log-level')\n sys.argv.remove(log_level)\n if log_level not in (None, 'DEBUG', 'INFO', 'WARN', 'ERROR'):\n raise exceptions.InvalidLogLevelError(log_level)\n return getattr(logging, log_level) if log_level else None", "docstring": "Get the log level from the CLI arguments.\n\nRemoves logging arguments from sys.argv.\n\nArgs:\nargs: The parsed docopt arguments to be used to determine the logging\nlevel.\n\nReturns:\nThe correct log level based on the three CLI arguments given.\n\nRaises:\nValueError: Raised if the given log level is not in the acceptable\nlist of values.", "source": "juraj-google-style"} {"code": "def VerifyRow(self, parser_mediator, row):\n \n if len(row) < self.MIN_COLUMNS:\n return False\n\n \n \n try:\n timestamp = self._ConvertToTimestamp(row['date'], row['time'])\n except ValueError:\n return False\n\n if timestamp is None:\n return False\n\n try:\n block_mode = int(row['block_mode'], 10)\n except (ValueError, TypeError):\n return False\n\n if block_mode not in formatter.BLOCK_MODES:\n return False\n return True", "docstring": "Verifies if a line of the file is in the expected format.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.\n\nReturns:\nbool: True if this is the correct parser, False otherwise.", "source": "juraj-google-style"} {"code": "def play_match(black_model, white_model, games, sgf_dir):\n \n with utils.logged_timer(\"Loading weights\"):\n black_net = dual_net.DualNetwork(black_model)\n white_net = dual_net.DualNetwork(white_model)\n\n readouts = FLAGS.num_readouts\n\n black = MCTSPlayer(black_net, two_player_mode=True)\n white = MCTSPlayer(white_net, two_player_mode=True)\n\n black_name = os.path.basename(black_net.save_file)\n white_name = os.path.basename(white_net.save_file)\n\n for i in range(games):\n num_move = 0 \n\n for player in [black, white]:\n player.initialize_game()\n first_node = player.root.select_leaf()\n prob, val = player.network.run(first_node.position)\n first_node.incorporate_results(prob, val, first_node)\n\n while True:\n start = time.time()\n active = white if num_move % 2 else black\n inactive = black if num_move % 2 else white\n\n current_readouts = active.root.N\n while active.root.N < current_readouts + readouts:\n active.tree_search()\n\n \n if FLAGS.verbose >= 3:\n print(active.root.position)\n\n \n if active.should_resign(): \n active.set_result(-1 *\n active.root.position.to_play, was_resign=True)\n inactive.set_result(\n active.root.position.to_play, was_resign=True)\n\n if active.is_done():\n fname = \"{:d}-{:s}-vs-{:s}-{:d}.sgf\".format(int(time.time()),\n white_name, black_name, i)\n active.set_result(active.root.position.result(), was_resign=False)\n with gfile.GFile(os.path.join(sgf_dir, fname), 'w') as _file:\n sgfstr = sgf_wrapper.make_sgf(active.position.recent,\n active.result_string, black_name=black_name,\n white_name=white_name)\n _file.write(sgfstr)\n print(\"Finished game\", i, active.result_string)\n break\n\n move = active.pick_move()\n active.play_move(move)\n inactive.play_move(move)\n\n dur = time.time() - start\n num_move += 1\n\n if (FLAGS.verbose > 1) or (FLAGS.verbose == 1 and num_move % 10 == 9):\n timeper = (dur / readouts) * 100.0\n print(active.root.position)\n print(\"%d: %d readouts, %.3f s/100. (%.2f sec)\" % (num_move,\n readouts,\n timeper,\n dur))", "docstring": "Plays matches between two neural nets.\n\nArgs:\nblack_model: Path to the model for black player\nwhite_model: Path to the model for white player", "source": "juraj-google-style"} {"code": "def read_counts(node):\n \n cfg.forward(node, cfg.ReachingDefinitions())\n\n rc = ReadCounts()\n rc.visit(node)\n return rc.n_read", "docstring": "Check how many times a variable definition was used.\n\nArgs:\nnode: An AST to analyze.\n\nReturns:\nA dictionary from assignment nodes to the number of times the assigned to\nvariable was used.", "source": "juraj-google-style"} {"code": "def get_build_output(self, process):\n while True:\n output = process.stdout.readline()\n if ((output == b'') and (process.poll() is not None)):\n if (process.returncode > 0):\n raise Exception(('Compilation ended with an error.\\nSTDERR\\n%s\\nSTDOUT\\n%s' % (process.stderr.read(), process.stdout.read())))\n return\n if output:\n matches = re.search('\\\\[\\\\s*(\\\\d+?)/(\\\\d+)\\\\].*', output.strip().decode('utf-8'))\n if (matches is not None):\n (yield [int(matches.group(1)), int(matches.group(2))])", "docstring": "Parse the output of the ns-3 build process to extract the information\nthat is needed to draw the progress bar.\n\nArgs:\nprocess: the subprocess instance to listen to.", "source": "codesearchnet"} {"code": "def setup(self, timezone=None):\n self._timezone = timezone\n self._output_path = tempfile.mkdtemp()", "docstring": "Sets up the _timezone attribute.\n\nArgs:\ntimezone: Timezone name (optional)", "source": "codesearchnet"} {"code": "def expand_docstring(**kwargs):\n\n def _fn_wrapped(fn):\n 'Original function with modified `__doc__` attribute.'\n doc = inspect.cleandoc(fn.__doc__)\n for (k, v) in six.iteritems(kwargs):\n pattern = (('\\\\$\\\\{' + str(k)) + '\\\\}')\n doc = re.sub(pattern, (lambda match: v), doc)\n fn.__doc__ = doc\n return fn\n return _fn_wrapped", "docstring": "Decorator to programmatically expand the docstring.\n\nArgs:\n**kwargs: Keyword arguments to set. For each key-value pair `k` and `v`,\nthe key is found as `${k}` in the docstring and replaced with `v`.\n\nReturns:\nDecorated function.", "source": "codesearchnet"} {"code": "def _get_weighted_mean_squared_error(self, quant_min, quant_max) -> tuple[float, float, float]:\n dequantized_hist_mids = self._get_dequantized_hist_mids_after_quantize(quant_min, quant_max)\n squared_error = (self._hist_mids - dequantized_hist_mids) ** 2\n weighted_error = np.sum(squared_error * self._hist_freq)\n return (weighted_error, quant_min, quant_max)", "docstring": "Gets mean squared error between hist_mids and dequantized hist_mids.\n\nQuantization converts the range of numbers from [quant_min, quant_max] to\n[0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and\nvalues greater than quant_max are converted to 2^num_bits - 1.\n\nArgs:\nquant_min: The minimum real value that can be represented by a quantized\nvalue.\nquant_max: The maximum real value that can be represented by a quantized\nvalue.\n\nReturns:\n(error, quant_min, quant_max): Tuple of weighted mean squared error.\nerror = (hist_mids - dequantized_hist_mids)**2 * hist_freq", "source": "github-repos"} {"code": "def __init__(self, shape, mean=0.0, log_stddev=0.0, scope='gaussian', summary_labels=()):\n \n self.shape = shape\n action_size = util.prod(self.shape)\n\n self.mean = Linear(size=action_size, bias=mean, scope='mean', summary_labels=summary_labels)\n self.log_stddev = Linear(size=action_size, bias=log_stddev, scope='log-stddev', summary_labels=summary_labels)\n\n super(Gaussian, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)", "docstring": "Categorical distribution.\n\nArgs:\nshape: Action shape.\nmean: Optional distribution bias for the mean.\nlog_stddev: Optional distribution bias for the standard deviation.", "source": "juraj-google-style"} {"code": "def parse_coach_go(infile):\n go_list = []\n with open(infile) as go_file:\n for line in go_file.readlines():\n go_dict = {}\n go_split = line.split()\n go_dict['go_id'] = go_split[0]\n go_dict['c_score'] = go_split[1]\n go_dict['go_term'] = ' '.join(go_split[2:])\n go_list.append(go_dict)\n return go_list", "docstring": "Parse a GO output file from COACH and return a rank-ordered list of GO term predictions\n\nThe columns in all files are: GO terms, Confidence score, Name of GO terms. The files are:\n\n- GO_MF.dat - GO terms in 'molecular function'\n- GO_BP.dat - GO terms in 'biological process'\n- GO_CC.dat - GO terms in 'cellular component'\n\nArgs:\ninfile (str): Path to any COACH GO prediction file\n\nReturns:\nPandas DataFrame: Organized dataframe of results, columns defined below\n\n- ``go_id``: GO term ID\n- ``go_term``: GO term text\n- ``c_score``: confidence score of the GO prediction", "source": "codesearchnet"} {"code": "def GetOrderKey(self):\n context_attributes = ['_type']\n context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS)\n context_attributes.extend(self._GetExtraOrderAttributes())\n tokens = []\n for context_attribute in context_attributes:\n tokens.append(getattr(self, context_attribute, None))\n return tokens", "docstring": "Return a tuple that can be used to sort problems into a consistent order.\n\nReturns:\nA list of values.", "source": "codesearchnet"} {"code": "def integrate(self, x1, x2, name=None):\n name = name or self._name + '_integrate'\n with tf.name_scope(name):\n x1 = tf.convert_to_tensor(x1, dtype=self.dtype(), name='x1')\n x2 = tf.convert_to_tensor(x2, dtype=self.dtype(), name='x2')\n batch_shape = tf.shape(self._jump_locations)[:-1]\n x1 = _try_broadcast_to(x1, batch_shape)\n x2 = _try_broadcast_to(x2, batch_shape)\n return _piecewise_constant_integrate(x1, x2, self._jump_locations, self._values, self._batch_rank)", "docstring": "Integrates the piecewise constant function between end points.\n\nReturns a value of the integral on the interval `[x1, x2]` of a piecewise\nconstant function with jump locations and values given by the initializer.\n\nArgs:\nx1: A real `Tensor` of shape `batch_shape + [num_points]`. Left end points\nat which the function has to be integrated.\nx2: A `Tensor` of the same shape and `dtype` as `x1`. Right end points at\nwhich the function has to be integrated.\nname: Python `str` name prefixed to ops created by this method.\nDefault value: `None` which is mapped to the default name\n`self.name() + `_integrate``.\n\nReturns:\nA `Tensor` of the same `dtype` as `x` and shape\n`batch_shape + [num_points] + event_shape` containing values of the\nintegral of the piecewise constant function between `[x1, x2]`.", "source": "github-repos"} {"code": "def to_file(self, path):\n with open(os.path.expanduser(path), 'w') as ofile:\n ofile.write(self.__repr__())", "docstring": "Write object XML to path.\n\nArgs:\npath: String file path to the file you wish to (over)write.\nPath will have ~ expanded prior to opening.", "source": "codesearchnet"} {"code": "def launch_task(self, task_id, executable, *args, **kwargs):\n self.tasks[task_id]['time_submitted'] = datetime.datetime.now()\n (hit, memo_fu) = self.memoizer.check_memo(task_id, self.tasks[task_id])\n if hit:\n logger.info('Reusing cached result for task {}'.format(task_id))\n return memo_fu\n executor_label = self.tasks[task_id]['executor']\n try:\n executor = self.executors[executor_label]\n except Exception:\n logger.exception('Task {} requested invalid executor {}: config is\\n{}'.format(task_id, executor_label, self._config))\n if ((self.monitoring is not None) and self.monitoring.resource_monitoring_enabled):\n executable = self.monitoring.monitor_wrapper(executable, task_id, self.monitoring.monitoring_hub_url, self.run_id, self.monitoring.resource_monitoring_interval)\n with self.submitter_lock:\n exec_fu = executor.submit(executable, *args, **kwargs)\n self.tasks[task_id]['status'] = States.launched\n if (self.monitoring is not None):\n task_log_info = self._create_task_log_info(task_id, 'lazy')\n self.monitoring.send(MessageType.TASK_INFO, task_log_info)\n exec_fu.retries_left = (self._config.retries - self.tasks[task_id]['fail_count'])\n logger.info('Task {} launched on executor {}'.format(task_id, executor.label))\n return exec_fu", "docstring": "Handle the actual submission of the task to the executor layer.\n\nIf the app task has the executors attributes not set (default=='all')\nthe task is launched on a randomly selected executor from the\nlist of executors. This behavior could later be updated to support\nbinding to executors based on user specified criteria.\n\nIf the app task specifies a particular set of executors, it will be\ntargeted at those specific executors.\n\nArgs:\ntask_id (uuid string) : A uuid string that uniquely identifies the task\nexecutable (callable) : A callable object\nargs (list of positional args)\nkwargs (arbitrary keyword arguments)\n\n\nReturns:\nFuture that tracks the execution of the submitted executable", "source": "codesearchnet"} {"code": "class SessionRunArgs(collections.namedtuple('SessionRunArgs', ['fetches', 'feed_dict', 'options'])):\n\n def __new__(cls, fetches, feed_dict=None, options=None):\n return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)", "docstring": "Represents arguments to be added to a `Session.run()` call.\n\nArgs:\nfetches: Exactly like the 'fetches' argument to Session.Run().\nCan be a single tensor or op, a list of 'fetches' or a dictionary\nof fetches. For example:\nfetches = global_step_tensor\nfetches = [train_op, summary_op, global_step_tensor]\nfetches = {'step': global_step_tensor, 'summ': summary_op}\nNote that this can recurse as expected:\nfetches = {'step': global_step_tensor,\n'ops': [train_op, check_nan_op]}\nfeed_dict: Exactly like the `feed_dict` argument to `Session.Run()`\noptions: Exactly like the `options` argument to `Session.run()`, i.e., a\nconfig_pb2.RunOptions proto.", "source": "github-repos"} {"code": "def _gather(params, indices, axis, batch_dims):\n params_is_ragged = ragged_tensor.is_ragged(params)\n indices_is_ragged = ragged_tensor.is_ragged(indices)\n if not (params_is_ragged or indices_is_ragged):\n return array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)\n if batch_dims > 0:\n return _batch_gather(params, indices, axis, batch_dims)\n if axis > 0:\n return _axis_gather(params, indices, axis)\n if indices_is_ragged:\n return indices.with_values(_gather(params, indices.values, 0, 0))\n if indices.shape.ndims is None:\n raise ValueError('rank(indices) must be known statically')\n out_ragged_rank = indices.shape.ndims + len(params.nested_row_splits) - 1\n result = gen_ragged_array_ops.ragged_gather(indices=indices, params_dense_values=params.flat_values, params_nested_splits=params.nested_row_splits, OUTPUT_RAGGED_RANK=out_ragged_rank)\n result = ragged_tensor.RaggedTensor.from_nested_row_splits(result.output_dense_values, result.output_nested_splits, validate=False)\n if indices.shape.ndims > 1:\n target = result\n indices_shape = array_ops.shape(indices, out_type=params.row_splits.dtype)\n shape_cumprod = math_ops.cumprod(indices_shape)\n for dim in range(indices.shape.ndims - 1):\n target._cached_nrows = shape_cumprod[dim]\n target._uniform_row_length = indices_shape[dim + 1]\n target = target.values\n return result", "docstring": "Helper that implements the body for ragged gather().\n\nAssumes that `params` and `indices` have been converted to tensors or\nragged tensors, and that `axis` and `batch_dims` have been normalized to\nbe positive. (So these conversions & normalizations can be skipped in\nrecursive calls to _gather).\n\nArgs:\nparams: The tensor from which to gather values.\nindices: The indices of values to gather.\naxis: The axis in `params` to gather `indices` from.\nbatch_dims: The number of batch dimensions.\n\nReturns:\nA potentially ragged tensor.", "source": "github-repos"} {"code": "def parse_frequency(variant, info_key):\n raw_annotation = variant.INFO.get(info_key)\n raw_annotation = (None if (raw_annotation == '.') else raw_annotation)\n frequency = (float(raw_annotation) if raw_annotation else None)\n return frequency", "docstring": "Parse any frequency from the info dict\n\nArgs:\nvariant(cyvcf2.Variant)\ninfo_key(str)\n\nReturns:\nfrequency(float): or None if frequency does not exist", "source": "codesearchnet"} {"code": "def update_from_group(self, data=None, timeout=-1):\n \n uri = \"{}/updateFromGroup\".format(self.data[\"uri\"])\n return self._helper.update(data, uri, timeout=timeout)", "docstring": "Use this action to make a logical enclosure consistent with the enclosure group when the logical enclosure is\nin the Inconsistent state.\n\nArgs:\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Logical enclosure.", "source": "juraj-google-style"} {"code": "def error_wrapper(fn, error_class):\n\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n six.reraise(error_class, error_class(e), sys.exc_info()[2])\n return wrapper", "docstring": "Wraps function fn in a try catch block that re-raises error_class.\n\nArgs:\nfn (function): function to wrapped\nerror_class (Exception): Error class to be re-raised\n\nReturns:\n(object): fn wrapped in a try catch.", "source": "codesearchnet"} {"code": "def parse_xhtml_species_notes(entry):\n properties = {}\n if (entry.xml_notes is not None):\n cobra_notes = dict(parse_xhtml_notes(entry))\n for key in ('pubchem_id', 'chebi_id'):\n if (key in cobra_notes):\n properties[key] = cobra_notes[key]\n if ('formula' in cobra_notes):\n properties['formula'] = cobra_notes['formula']\n if ('kegg_id' in cobra_notes):\n properties['kegg'] = cobra_notes['kegg_id']\n if ('charge' in cobra_notes):\n try:\n value = int(cobra_notes['charge'])\n except ValueError:\n logger.warning('Unable to parse charge for {} as an integer: {}'.format(entry.id, cobra_notes['charge']))\n value = cobra_notes['charge']\n properties['charge'] = value\n return properties", "docstring": "Return species properties defined in the XHTML notes.\n\nOlder SBML models often define additional properties in the XHTML notes\nsection because structured methods for defining properties had not been\ndeveloped. This will try to parse the following properties: ``PUBCHEM ID``,\n``CHEBI ID``, ``FORMULA``, ``KEGG ID``, ``CHARGE``.\n\nArgs:\nentry: :class:`SBMLSpeciesEntry`.", "source": "codesearchnet"} {"code": "def min_max_variable_partitioner(max_partitions=1, axis=0, min_slice_size=256 << 10, bytes_per_string_element=16):\n\n def _partitioner(shape, dtype):\n \n if axis >= len(shape):\n raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n return partitions_list\n return _partitioner", "docstring": "Partitioner to allocate minimum size per slice.\n\nReturns a partitioner that partitions the variable of given shape and dtype\nsuch that each partition has a minimum of `min_slice_size` slice of the\nvariable. The maximum number of such partitions (upper bound) is given by\n`max_partitions`.\n\nArgs:\nmax_partitions: Upper bound on the number of partitions. Defaults to 1.\naxis: Axis along which to partition the variable. Defaults to 0.\nmin_slice_size: Minimum size of the variable slice per partition. Defaults\nto 256K.\nbytes_per_string_element: If the `Variable` is of type string, this provides\nan estimate of how large each scalar in the `Variable` is.\n\nReturns:\nA partition function usable as the `partitioner` argument to\n`variable_scope` and `get_variable`.", "source": "github-repos"} {"code": "def get_scores(self, avg=False, ignore_empty=False):\n \n hyp_path, ref_path = self.hyp_path, self.ref_path\n\n with io.open(hyp_path, encoding=\"utf-8\", mode=\"r\") as hyp_file:\n hyps = [line[:-1] for line in hyp_file]\n with io.open(ref_path, encoding=\"utf-8\", mode=\"r\") as ref_file:\n refs = [line[:-1] for line in ref_file]\n\n return self.rouge.get_scores(hyps, refs, avg=avg,\n ignore_empty=ignore_empty)", "docstring": "Calculate ROUGE scores between each pair of\nlines (hyp_file[i], ref_file[i]).\nArgs:\n* hyp_path: hypothesis file path\n* ref_path: references file path\n* avg (False): whether to get an average scores or a list", "source": "juraj-google-style"} {"code": "def _GetEnableOsLoginValue(self, metadata_dict):\n \n instance_data, project_data = self._GetInstanceAndProjectAttributes(\n metadata_dict)\n instance_value = instance_data.get('enable-oslogin')\n project_value = project_data.get('enable-oslogin')\n value = instance_value or project_value or ''\n\n return value.lower() == 'true'", "docstring": "Get the value of the enable-oslogin metadata key.\n\nArgs:\nmetadata_dict: json, the deserialized contents of the metadata server.\n\nReturns:\nbool, True if OS Login is enabled for VM access.", "source": "juraj-google-style"} {"code": "def _exec_command(self, cmd):\n (_, out, err) = self._client.exec_command(cmd, timeout=self._timeout)\n return (out.read().strip() if (not err.read().strip()) else None)", "docstring": "Run a command on the remote SSH server.\n\nReturns:\nbytes: the output of the command, if it didn't fail\nNone: if the error pipe of the command was not empty", "source": "codesearchnet"} {"code": "def get_role(self, name):\n \n\n address = _create_role_address(name)\n role_list_bytes = None\n\n try:\n role_list_bytes = self._state_view.get(address=address)\n except KeyError:\n return None\n\n if role_list_bytes is not None:\n role_list = _create_from_bytes(role_list_bytes,\n identity_pb2.RoleList)\n for role in role_list.roles:\n if role.name == name:\n return role\n return None", "docstring": "Get a single Role by name.\n\nArgs:\nname (str): The name of the Role.\n\nReturns:\n(:obj:`Role`): The Role that matches the name or None.", "source": "juraj-google-style"} {"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n residual = hidden_states\n hidden_states = self.layer_norm1(hidden_states)\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`):\nInput to the layer of shape `(batch, seq_len, embed_dim)`.\nattention_mask (`torch.FloatTensor`):\nAttention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.\noutput_attentions (`bool`, *optional*, defaults to `False`):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"} {"code": "def _CTCLossV2Grad(op, grad_loss, _):\n return _CTCLossGradImpl(op, grad_loss, _)", "docstring": "The derivative provided by CTC Loss V2.\n\nArgs:\nop: the CTCLossV2 op.\ngrad_loss: The backprop for cost.\n\nReturns:\nThe CTC Loss V2 gradient.", "source": "github-repos"} {"code": "def scan_backends(self, backends):\n engines = OrderedDict()\n filenames = OrderedDict()\n extensions = OrderedDict()\n for item in backends:\n engines[item._kind_name] = item\n filenames[item._default_filename] = item._kind_name\n extensions[item._file_extension] = item._kind_name\n return (engines, filenames, extensions)", "docstring": "From given backends create and return engine, filename and extension\nindexes.\n\nArguments:\nbackends (list): List of backend engines to scan. Order does matter\nsince resulted indexes are stored in an ``OrderedDict``. So\ndiscovering will stop its job if it meets the first item.\n\nReturns:\ntuple: Engine, filename and extension indexes where:\n\n* Engines are indexed on their kind name with their backend object\nas value;\n* Filenames are indexed on their filename with engine kind name as\nvalue;\n* Extensions are indexed on their extension with engine kind name\nas value;", "source": "codesearchnet"} {"code": "def _sync_directories(from_directory, to_directory):\n if (not os.path.exists(to_directory)):\n os.mkdir(to_directory)\n for (root, dirs, files) in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to_child_dir = os.path.join(to_root, directory)\n if (not os.path.exists(to_child_dir)):\n os.mkdir(to_child_dir)\n for fname in files:\n from_file = os.path.join(root, fname)\n to_file = os.path.join(to_root, fname)\n with open(from_file, 'rb') as a, open(to_file, 'wb') as b:\n b.write(a.read())", "docstring": "Sync to_directory with from_directory by copying each file in\nto_directory with new contents. Files in to_directory will be\noverwritten by files of the same name in from_directory. We need to\nkeep two copies of the log directory because otherwise TensorBoard\npicks up temp files from `aws s3 sync` and then stops reading the\ncorrect tfevent files. We walk the directory and copy each file\nindividually because the directory that TensorBoard watches needs to\nalways exist.\n\nArgs:\nfrom_directory (str): The directory with updated files.\nto_directory (str): The directory to be synced.", "source": "codesearchnet"} {"code": "def sg_sugar_func(func):\n r\n @wraps(func)\n def wrapper(tensor, **kwargs):\n \n out = func(tensor, tf.sg_opt(kwargs))\n \n out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs)+sg_get_context(), prev=tensor)\n \n out.sg_reuse = types.MethodType(sg_reuse, out)\n return out\n\n return wrapper", "docstring": "r\"\"\" Decorates a function `func` so that it can be a sugar function.\nSugar function can be used in a chainable manner.\n\nArgs:\nfunc: function to decorate\n\nReturns:\nA sugar function.", "source": "juraj-google-style"} {"code": "def get_version_by_value(context, value):\n \n versions = get_versions(context)\n for version in versions:\n if version.value == value:\n return version\n fail(\"Didn't find a matching version for: \"\n \"{}:{} in env/service: {}/{}\".format(\n context.key, value,\n context.env, context.service_name))", "docstring": "Get the latest version that matches the provided ami-id\nArgs:\ncontext: a populated EFVersionContext object\nvalue: the value of the version to look for", "source": "juraj-google-style"} {"code": "def _SkipFieldValue(tokenizer):\n \n \n \n if tokenizer.TryConsumeByteString():\n while tokenizer.TryConsumeByteString():\n pass\n return\n\n if (not tokenizer.TryConsumeIdentifier() and\n not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and\n not tokenizer.TryConsumeFloat()):\n raise ParseError('Invalid field value: ' + tokenizer.token)", "docstring": "Skips over a field value.\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.\n\nRaises:\nParseError: In case an invalid field value is found.", "source": "juraj-google-style"} {"code": "def random_array(shape, mean=128.0, std=20.0):\n x = np.random.random(shape)\n x = ((x - np.mean(x)) / (np.std(x) + K.epsilon()))\n x = ((x * std) + mean)\n return x", "docstring": "Creates a uniformly distributed random array with the given `mean` and `std`.\n\nArgs:\nshape: The desired shape\nmean: The desired mean (Default value = 128)\nstd: The desired std (Default value = 20)\n\nReturns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`.", "source": "codesearchnet"} {"code": "def _get_image_depth(self):\n max_column_widths = []\n for layer in self.ops:\n current_max = 0\n for op in layer:\n arg_str_len = 0\n for arg in op.op.params:\n arg_str = re.sub('[-+]?\\\\d*\\\\.\\\\d{2,}|\\\\d{2,}', _truncate_float, str(arg))\n arg_str_len += len(arg_str)\n current_max = max(arg_str_len, current_max)\n max_column_widths.append(current_max)\n columns = 2\n columns += len(self.ops)\n sum_column_widths = sum(((1 + (v / 3)) for v in max_column_widths))\n return (columns, (math.ceil(sum_column_widths) + 4))", "docstring": "Get depth information for the circuit.\n\nReturns:\nint: number of columns in the circuit\nint: total size of columns in the circuit", "source": "codesearchnet"} {"code": "def to_basis(self, basis=None,\n start=None,\n stop=None,\n step=None,\n undefined=None):\n \n if basis is None:\n if start is None:\n new_start = self.start\n else:\n new_start = start\n new_step = step or self.step\n new_stop = stop or self.stop\n \n \n steps = 1 + (new_stop - new_start) / new_step\n basis = np.linspace(new_start, new_stop, int(steps), endpoint=True)\n else:\n new_start = basis[0]\n new_step = basis[1] - basis[0]\n\n if undefined is None:\n undefined = np.nan\n else:\n undefined = undefined\n\n interp = interp1d(self.basis, self,\n bounds_error=False,\n fill_value=undefined)\n\n data = interp(basis)\n\n params = self.__dict__.copy()\n params['step'] = float(new_step)\n params['start'] = float(new_start)\n\n return Curve(data, params=params)", "docstring": "Make a new curve in a new basis, given a basis, or a new start, step,\nand/or stop. You only need to set the parameters you want to change.\nIf the new extents go beyond the current extents, the curve is padded\nwith the ``undefined`` parameter.\n\nArgs:\nbasis (ndarray)\nstart (float)\nstop (float)\nstep (float)\nundefined (float)\n\nReturns:\nCurve. The current instance in the new basis.", "source": "juraj-google-style"} {"code": "def __decode_dictionary(self, message_type, dictionary):\n message = message_type()\n for (key, value) in six.iteritems(dictionary):\n if (value is None):\n try:\n message.reset(key)\n except AttributeError:\n pass\n continue\n try:\n field = message.field_by_name(key)\n except KeyError:\n variant = self.__find_variant(value)\n if variant:\n message.set_unrecognized_field(key, value, variant)\n continue\n if field.repeated:\n if (not isinstance(value, list)):\n value = [value]\n valid_value = [self.decode_field(field, item) for item in value]\n setattr(message, field.name, valid_value)\n continue\n if (value == []):\n continue\n try:\n setattr(message, field.name, self.decode_field(field, value))\n except messages.DecodeError:\n if (not isinstance(field, messages.EnumField)):\n raise\n variant = self.__find_variant(value)\n if variant:\n message.set_unrecognized_field(key, value, variant)\n return message", "docstring": "Merge dictionary in to message.\n\nArgs:\nmessage: Message to merge dictionary in to.\ndictionary: Dictionary to extract information from. Dictionary\nis as parsed from JSON. Nested objects will also be dictionaries.", "source": "codesearchnet"} {"code": "def _verify_pair(prev, curr):\n if (prev._dimension != 2):\n raise ValueError('Curve not in R^2', prev)\n end = prev._nodes[(:, (- 1))]\n start = curr._nodes[(:, 0)]\n if (not _helpers.vector_close(end, start)):\n raise ValueError('Not sufficiently close', 'Consecutive sides do not have common endpoint', prev, curr)", "docstring": "Verify a pair of sides share an endpoint.\n\n.. note::\n\nThis currently checks that edge endpoints match **exactly**\nbut allowing some roundoff may be desired.\n\nArgs:\nprev (.Curve): \"Previous\" curve at piecewise junction.\ncurr (.Curve): \"Next\" curve at piecewise junction.\n\nRaises:\nValueError: If the previous side is not in 2D.\nValueError: If consecutive sides don't share an endpoint.", "source": "codesearchnet"} {"code": "def _predict_one(self, document, encoding=None, return_blocks=False):\n \n \n blocks = self.blockifier.blockify(document, encoding=encoding)\n \n try:\n features = self.features.transform(blocks)\n except ValueError: \n preds = np.zeros((len(blocks)))\n \n else:\n if self.prob_threshold is None:\n preds = self.model.predict(features)\n else:\n self._positive_idx = (\n self._positive_idx or list(self.model.classes_).index(1))\n preds = self.model.predict_proba(features) > self.prob_threshold\n preds = preds[:, self._positive_idx].astype(int)\n\n if return_blocks:\n return preds, blocks\n else:\n return preds", "docstring": "Predict class (content=1 or not-content=0) of each block in an HTML\ndocument.\n\nArgs:\ndocuments (str): HTML document\n\nReturns:\n``np.ndarray``: array of binary predictions for content (1) or\nnot-content (0).", "source": "juraj-google-style"} {"code": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.return_dict\n if attention_mask is None:\n attention_mask = jnp.ones_like(input_ids)\n rngs = {}\n if dropout_rng is not None:\n rngs['dropout'] = dropout_rng\n\n def _encoder_forward(module, input_ids, attention_mask, **kwargs):\n encode_module = module._get_encoder_module()\n return encode_module(input_ids, attention_mask, **kwargs)\n return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-base\")\n>>> model = FlaxLongT5ForConditionalGeneration.from_pretrained(\"google/long-t5-local-base\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, return_tensors=\"np\")\n>>> encoder_outputs = model.encode(**inputs)\n```", "source": "github-repos"} {"code": "def substitute(self, index, func_grp, bond_order=1):\n all_non_terminal_nn = []\n for (nn, dist) in self.get_neighbors(self[index], 3):\n for (inn, dist2) in self.get_neighbors(nn, 3):\n if ((inn != self[index]) and (dist2 < (1.2 * get_bond_length(nn.specie, inn.specie)))):\n all_non_terminal_nn.append((nn, dist))\n break\n if (len(all_non_terminal_nn) == 0):\n raise RuntimeError(\"Can't find a non-terminal neighbor to attach functional group to.\")\n non_terminal_nn = min(all_non_terminal_nn, key=(lambda d: d[1]))[0]\n origin = non_terminal_nn.coords\n if isinstance(func_grp, Molecule):\n func_grp = func_grp\n elif (func_grp not in FunctionalGroups):\n raise RuntimeError(\"Can't find functional group in list. Provide explicit coordinate instead\")\n else:\n func_grp = FunctionalGroups[func_grp]\n try:\n bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie, bond_order=bond_order)\n except TypeError:\n bl = None\n if (bl is not None):\n func_grp = func_grp.copy()\n vec = (func_grp[0].coords - func_grp[1].coords)\n vec /= np.linalg.norm(vec)\n func_grp[0] = ('X', (func_grp[1].coords + (float(bl) * vec)))\n x = func_grp[0]\n func_grp.translate_sites(list(range(len(func_grp))), (origin - x.coords))\n v1 = (func_grp[1].coords - origin)\n v2 = (self[index].coords - origin)\n angle = get_angle(v1, v2)\n if (1 < abs((angle % 180)) < 179):\n axis = np.cross(v1, v2)\n op = SymmOp.from_origin_axis_angle(origin, axis, angle)\n func_grp.apply_operation(op)\n elif (abs((abs(angle) - 180)) < 1):\n for i in range(len(func_grp)):\n func_grp[i] = (func_grp[i].species, (origin - (func_grp[i].coords - origin)))\n del self[index]\n for site in func_grp[1:]:\n s_new = PeriodicSite(site.species, site.coords, self.lattice, coords_are_cartesian=True)\n self._sites.append(s_new)", "docstring": "Substitute atom at index with a functional group.\n\nArgs:\nindex (int): Index of atom to substitute.\nfunc_grp: Substituent molecule. There are two options:\n\n1. Providing an actual Molecule as the input. The first atom\nmust be a DummySpecie X, indicating the position of\nnearest neighbor. The second atom must be the next\nnearest atom. For example, for a methyl group\nsubstitution, func_grp should be X-CH3, where X is the\nfirst site and C is the second site. What the code will\ndo is to remove the index site, and connect the nearest\nneighbor to the C atom in CH3. The X-C bond indicates the\ndirectionality to connect the atoms.\n2. A string name. The molecule will be obtained from the\nrelevant template in func_groups.json.\nbond_order (int): A specified bond order to calculate the bond\nlength between the attached functional group and the nearest\nneighbor site. Defaults to 1.", "source": "codesearchnet"} {"code": "def GetVolumeByIndex(self, volume_index):\n if (not self._is_parsed):\n self._Parse()\n self._is_parsed = True\n if ((volume_index < 0) or (volume_index >= len(self._volume_identifiers))):\n return None\n volume_identifier = self._volume_identifiers[volume_index]\n return self._volumes[volume_identifier]", "docstring": "Retrieves a specific volume based on the index.\n\nArgs:\nvolume_index (int): index of the volume.\n\nReturns:\nVolume: a volume or None if not available.", "source": "codesearchnet"} {"code": "def first_seen(self, first_seen):\n \n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n first_seen = self._utils.format_datetime(first_seen, date_format='%Y-%m-%dT%H:%M:%SZ')\n self._data['firstSeen'] = first_seen\n request = {'firstSeen': first_seen}\n return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)", "docstring": "Updates the campaign with the new first_seen date.\n\nArgs:\nfirst_seen: The first_seen date. Converted to %Y-%m-%dT%H:%M:%SZ date format\n\nReturns:", "source": "juraj-google-style"} {"code": "def GetMessages(self, formatter_mediator, event):\n \n if self.DATA_TYPE != event.data_type:\n raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(\n event.data_type))\n\n event_values = event.CopyToDict()\n\n attribute_type = event_values.get('attribute_type', 0)\n event_values['attribute_name'] = self._ATTRIBUTE_NAMES.get(\n attribute_type, 'UNKNOWN')\n\n file_reference = event_values.get('file_reference', None)\n if file_reference:\n event_values['file_reference'] = '{0:d}-{1:d}'.format(\n file_reference & 0xffffffffffff, file_reference >> 48)\n\n parent_file_reference = event_values.get('parent_file_reference', None)\n if parent_file_reference:\n event_values['parent_file_reference'] = '{0:d}-{1:d}'.format(\n parent_file_reference & 0xffffffffffff, parent_file_reference >> 48)\n\n if not event_values.get('is_allocated', False):\n event_values['unallocated'] = 'unallocated'\n\n return self._ConditionalFormatMessages(event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.\n\nRaises:\nWrongFormatter: if the event object cannot be formatted by the formatter.", "source": "juraj-google-style"} {"code": "def __init__(self, split_type=\"neutral\", **kwargs):\n \n super(AbstractReasoningConfig, self).__init__(**kwargs)\n self.split_type = split_type", "docstring": "BuilderConfig for AbstractReasoning.\n\nArgs:\nsplit_type: String with split_type to use. Should be one of [\"neutral\",\n\"interpolation\", \"extrapolation\", \"attr.rel.pairs\", \"attr.rels\",\n\"attrs.pairs\", \"attrs.shape.color\", \"attrs.line.type\",].\n**kwargs: keyword arguments forwarded to super.", "source": "juraj-google-style"} {"code": "def watch(self, tensor):\n for t in _extract_tensors_and_variables(tensor):\n if not backprop_util.IsTrainable(t):\n logging.log_first_n(logging.WARN, 'The dtype of the watched tensor must be floating (e.g. tf.float32), got %r', 5, t.dtype)\n if hasattr(t, 'handle'):\n tape.watch_variable(self._tape, t)\n else:\n tape.watch(self._tape, t)", "docstring": "Ensures that `tensor` is being traced by this tape.\n\nArgs:\ntensor: a Tensor/Variable or list of Tensors/Variables.\n\nRaises:\nValueError: if it encounters something that is not a tensor.", "source": "github-repos"} {"code": "def merge_two_dictionaries(a, b, merge_lists=False):\n \n \n key = None\n \n \n try:\n if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)):\n \n a = b\n elif isinstance(a, list):\n \n if isinstance(b, list):\n if merge_lists:\n \n a.extend(b)\n else:\n \n a = b\n else:\n \n a.append(b)\n elif isinstance(a, (dict, UserDict)):\n \n if isinstance(b, (dict, UserDict)):\n for key in b:\n if key in a:\n a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists)\n else:\n a[key] = b[key]\n else:\n raise ValueError('Cannot merge non-dict \"%s\" into dict \"%s\"' % (b, a))\n else:\n raise ValueError('NOT IMPLEMENTED \"%s\" into \"%s\"' % (b, a))\n except TypeError as e:\n raise ValueError('TypeError \"%s\" in key \"%s\" when merging \"%s\" into \"%s\"' % (e, key, b, a))\n return a", "docstring": "Merges b into a and returns merged result\n\nNOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen\n\nArgs:\na (DictUpperBound): dictionary to merge into\nb (DictUpperBound): dictionary to merge from\nmerge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False.\n\nReturns:\nDictUpperBound: Merged dictionary", "source": "juraj-google-style"} {"code": "def get_contents(self, path):\n \n try:\n if not os.path.exists(path):\n raise ConfigurationError('specified path does not exist %s' % path)\n\n with open(path) as f:\n data = f.read()\n\n return data\n\n except (IOError, OSError) as exc:\n raise ConfigurationError('error trying to load file contents: %s' % exc)", "docstring": "Loads the contents of the file specified by path\n\nArgs:\npath (string): The relative or absolute path to the file to\nbe loaded. If the path is relative, then it is combined\nwith the base_path to generate a full path string\n\nReturns:\nstring: The contents of the file as a string\n\nRaises:\nConfigurationError: If the file cannot be loaded", "source": "juraj-google-style"} {"code": "def multi_get(self, urls, query_params=None, to_json=True):\n return self._multi_request(MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json)", "docstring": "Issue multiple GET requests.\n\nArgs:\nurls - A string URL or list of string URLs\nquery_params - None, a dict, or a list of dicts representing the query params\nto_json - A boolean, should the responses be returned as JSON blobs\nReturns:\na list of dicts if to_json is set of requests.response otherwise.\nRaises:\nInvalidRequestError - Can not decide how many requests to issue.", "source": "codesearchnet"} {"code": "def mkdir_uchroot(dirpath, root=\".\"):\n \n from benchbuild.utils.uchroot import no_args, uretry\n\n uchroot = no_args()\n uchroot = uchroot[\"-E\", \"-A\", \"-C\", \"-w\", \"/\", \"-r\"]\n uchroot = uchroot[os.path.abspath(root)]\n uretry(uchroot[\"--\", \"/bin/mkdir\", \"-p\", dirpath])", "docstring": "Create a file inside a uchroot env.\n\nYou will want to use this when you need to create a file with apropriate\nrights inside a uchroot container with subuid/subgid handling enabled.\n\nArgs:\ndirpath:\nThe dirpath that should be created. Absolute inside the\nuchroot container.\nroot:\nThe root PATH of the container filesystem as seen outside of\nthe container.", "source": "juraj-google-style"} {"code": "def __init__(self, path):\n \n self._path = os.path.normpath(decode(path))", "docstring": "Create an instance of *PyFileSearcher* bound to specific directory.\n\nArgs:\npath (str): path to local directory", "source": "juraj-google-style"} {"code": "def to_html_str(value: Any, *, name: Optional[str]=None, root_path: Optional[utils.KeyPath]=None, view_id: str='html-tree-view', content_only: bool=False, **kwargs) -> str:\n return to_html(value, name=name, root_path=root_path, view_id=view_id, **kwargs).to_str(content_only=content_only)", "docstring": "Returns a HTML str for a value.\n\nArgs:\nvalue: The value to render.\nname: The name of the value.\nroot_path: The root path of the value.\nview_id: The ID of the view to render the value.\nSee `pg.views.HtmlView.dir()` for all available HTML view IDs.\ncontent_only: If True, only the content will be returned.\n**kwargs: Additional keyword arguments passed from `pg.to_html`, wich\nwill be passed to the `HtmlView.render_xxx()` (thus\n`Extension._html_xxx()`) methods.\n\nReturns:\nThe rendered HTML str.", "source": "github-repos"} {"code": "def ReadPreprocessingInformation(self, knowledge_base):\n \n generator = self._GetAttributeContainers(\n self._CONTAINER_TYPE_SYSTEM_CONFIGURATION)\n for stream_number, system_configuration in enumerate(generator):\n \n knowledge_base.ReadSystemConfigurationArtifact(\n system_configuration, session_identifier=stream_number)", "docstring": "Reads preprocessing information.\n\nThe preprocessing information contains the system configuration which\ncontains information about various system specific configuration data,\nfor example the user accounts.\n\nArgs:\nknowledge_base (KnowledgeBase): is used to store the preprocessing\ninformation.", "source": "juraj-google-style"} {"code": "def events_from_logdir(logdir):\n assert gfile.Exists(logdir)\n files = gfile.ListDirectory(logdir)\n assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files\n return events_from_file(os.path.join(logdir, files[0]))", "docstring": "Returns all events in the single eventfile in logdir.\n\nArgs:\nlogdir: The directory in which the single event file is sought.\n\nReturns:\nA list of all tf.compat.v1.Event protos from the single event file.\n\nRaises:\nAssertionError: If logdir does not contain exactly one file.", "source": "github-repos"} {"code": "def __init__(self, instance_id: Optional[str] = None):\n \n self.instance_id = instance_id\n if instance_id:\n self.middleware_id += \"", "docstring": "Initialize the middleware.\nInherited initializer must call the \"super init\" method\nat the beginning.\n\nArgs:\ninstance_id: Instance ID of the middleware.", "source": "juraj-google-style"} {"code": "def placeholder_symbol_table(name, version, max_id):\n \n if version <= 0:\n raise ValueError('Version must be grater than or equal to 1: %s' % version)\n if max_id < 0:\n raise ValueError('Max ID must be zero or positive: %s' % max_id)\n\n return SymbolTable(\n table_type=SHARED_TABLE_TYPE,\n symbols=repeat(None, max_id),\n name=name,\n version=version,\n is_substitute=True\n )", "docstring": "Constructs a shared symbol table that consists symbols that all have no known text.\n\nThis is generally used for cases where a shared symbol table is not available by the\napplication.\n\nArgs:\nname (unicode): The name of the shared symbol table.\nversion (int): The version of the shared symbol table.\nmax_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``\n\nReturns:\nSymbolTable: The synthesized table.", "source": "juraj-google-style"} {"code": "def cosh(x):\n if any_symbolic_tensors((x,)):\n return Cosh().symbolic_call(x)\n return backend.numpy.cosh(x)", "docstring": "Hyperbolic cosine, element-wise.\n\nArguments:\nx: Input tensor.\n\nReturns:\nOutput tensor of same shape as `x`.", "source": "github-repos"} {"code": "def get_course_details(self, course_id):\n \n try:\n return self.client.course(course_id).get()\n except (SlumberBaseException, ConnectionError, Timeout) as exc:\n LOGGER.exception(\n 'Failed to retrieve course enrollment details for course [%s] due to: [%s]',\n course_id, str(exc)\n )\n return {}", "docstring": "Query the Enrollment API for the course details of the given course_id.\n\nArgs:\ncourse_id (str): The string value of the course's unique identifier\n\nReturns:\ndict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)", "source": "juraj-google-style"} {"code": "def start(self, request: Request) -> Response:\n if (self._session_state != SessionState.ready):\n raise RuntimeError('Session already started')\n assert (not self._request)\n self._request = request\n _logger.debug(__('Client fetch request {0}.', request))\n connection = (yield from self._acquire_request_connection(request))\n full_url = (connection.proxied and (not connection.tunneled))\n self._stream = stream = self._stream_factory(connection)\n (yield from self._stream.reconnect())\n request.address = connection.address\n self.event_dispatcher.notify(self.Event.begin_request, request)\n write_callback = functools.partial(self.event_dispatcher.notify, self.Event.request_data)\n stream.data_event_dispatcher.add_write_listener(write_callback)\n (yield from stream.write_request(request, full_url=full_url))\n if request.body:\n assert ('Content-Length' in request.fields)\n length = int(request.fields['Content-Length'])\n (yield from stream.write_body(request.body, length=length))\n stream.data_event_dispatcher.remove_write_listener(write_callback)\n self.event_dispatcher.notify(self.Event.end_request, request)\n read_callback = functools.partial(self.event_dispatcher.notify, self.Event.response_data)\n stream.data_event_dispatcher.add_read_listener(read_callback)\n self._response = response = (yield from stream.read_response())\n response.request = request\n self.event_dispatcher.notify(self.Event.begin_response, response)\n self._session_state = SessionState.request_sent\n return response", "docstring": "Begin a HTTP request\n\nArgs:\nrequest: Request information.\n\nReturns:\nA response populated with the HTTP headers.\n\nOnce the headers are received, call :meth:`download`.\n\nCoroutine.", "source": "codesearchnet"} {"code": "def _parse_schema_resource(info):\n \n if \"fields\" not in info:\n return ()\n\n schema = []\n for r_field in info[\"fields\"]:\n name = r_field[\"name\"]\n field_type = r_field[\"type\"]\n mode = r_field.get(\"mode\", \"NULLABLE\")\n description = r_field.get(\"description\")\n sub_fields = _parse_schema_resource(r_field)\n schema.append(SchemaField(name, field_type, mode, description, sub_fields))\n return schema", "docstring": "Parse a resource fragment into a schema field.\n\nArgs:\ninfo: (Mapping[str->dict]): should contain a \"fields\" key to be parsed\n\nReturns:\n(Union[Sequence[:class:`google.cloud.bigquery.schema.SchemaField`],None])\na list of parsed fields, or ``None`` if no \"fields\" key found.", "source": "juraj-google-style"} {"code": "def __floordiv__(self, other):\n \n return self.__class__(self.x, self.y.__floordiv__(other), *self._args,\n **self._kwargs)", "docstring": "True division of y\nArgs:\nother: The divisor\n\nReturns:\nSpectrum object with y values divided", "source": "juraj-google-style"} {"code": "def mktar_from_dockerfile(fileobject: BinaryIO) -> IO:\n \n\n f = tempfile.NamedTemporaryFile()\n t = tarfile.open(mode=\"w:gz\", fileobj=f)\n\n if isinstance(fileobject, BytesIO):\n dfinfo = tarfile.TarInfo(\"Dockerfile\")\n dfinfo.size = len(fileobject.getvalue())\n fileobject.seek(0)\n else:\n dfinfo = t.gettarinfo(fileobj=fileobject, arcname=\"Dockerfile\")\n\n t.addfile(dfinfo, fileobject)\n t.close()\n f.seek(0)\n return f", "docstring": "Create a zipped tar archive from a Dockerfile\n**Remember to close the file object**\nArgs:\nfileobj: a Dockerfile\nReturns:\na NamedTemporaryFile() object", "source": "juraj-google-style"} {"code": "def track_origin(enabled: bool=True) -> ContextManager[None]:\n return thread_local.thread_local_value_scope(_TLS_ENABLE_ORIGIN_TRACKING, enabled, False)", "docstring": "Returns a context manager to enable or disable origin tracking.\n\n`track_origin` is thread-safe and can be nested. For example::\n\na = pg.Dict(x=1)\nwith pg.track_origin(False):\nwith pg.track_origin(True):\n# b's origin will be tracked, which can be accessed by `b.sym_origin`.\nb = a.clone()\n# c's origin will not be tracked, `c.sym_origin` returns None.\nc = a.clone()\n\nArgs:\nenabled: If True, the origin of symbolic values will be tracked during\nobject cloning and retuning from functors under current scope.\n\nReturns:\nA context manager for enable or disable origin tracking.", "source": "github-repos"} {"code": "def log_every_n_seconds(level, msg, n_seconds, *args):\n \n should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)\n log_if(level, msg, should_log, *args)", "docstring": "Logs 'msg % args' at level 'level' iff 'n_seconds' elapsed since last call.\n\nLogs the first call, logs subsequent calls if 'n' seconds have elapsed since\nthe last logging call from the same call site (file + line). Not thread-safe.\n\nArgs:\nlevel: int, the absl logging level at which to log.\nmsg: str, the message to be logged.\nn_seconds: float or int, seconds which should elapse before logging again.\n*args: The args to be substitued into the msg.", "source": "juraj-google-style"} {"code": "def get_train_op(self,\n loss,\n learning_rate,\n optimizer=None,\n clip_norm=None,\n learnable_scopes=None,\n optimizer_scope_name=None,\n **kwargs):\n \n if optimizer_scope_name is None:\n opt_scope = tf.variable_scope('Optimizer')\n else:\n opt_scope = tf.variable_scope(optimizer_scope_name)\n with opt_scope:\n if learnable_scopes is None:\n variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n else:\n variables_to_train = []\n for scope_name in learnable_scopes:\n variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name))\n\n if optimizer is None:\n optimizer = tf.train.AdamOptimizer\n\n \n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n\n def clip_if_not_none(grad):\n if grad is not None:\n return tf.clip_by_norm(grad, clip_norm)\n\n opt = optimizer(learning_rate, **kwargs)\n grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train)\n if clip_norm is not None:\n grads_and_vars = [(clip_if_not_none(grad), var)\n for grad, var in grads_and_vars]\n train_op = opt.apply_gradients(grads_and_vars)\n return train_op", "docstring": "Get train operation for given loss\n\nArgs:\nloss: loss, tf tensor or scalar\nlearning_rate: scalar or placeholder.\nclip_norm: clip gradients norm by clip_norm.\nlearnable_scopes: which scopes are trainable (None for all).\noptimizer: instance of tf.train.Optimizer, default Adam.\n**kwargs: parameters passed to tf.train.Optimizer object\n(scalars or placeholders).\n\nReturns:\ntrain_op", "source": "juraj-google-style"} {"code": "def _ProcessMetadataFile(self, mediator, file_entry):\n self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING\n self._event_extractor.ParseFileEntryMetadata(mediator, file_entry)\n for data_stream in file_entry.data_streams:\n if self._abort:\n break\n self.last_activity_timestamp = time.time()\n self._event_extractor.ParseMetadataFile(mediator, file_entry, data_stream.name)", "docstring": "Processes a metadata file.\n\nArgs:\nmediator (ParserMediator): mediates the interactions between\nparsers and other components, such as storage and abort signals.\nfile_entry (dfvfs.FileEntry): file entry of the metadata file.", "source": "codesearchnet"} {"code": "def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6):\n if isinstance(seed, string_types):\n ids = dataset.get_studies(mask=seed, activation_threshold=threshold)\n else:\n ids = dataset.get_studies(peaks=seed, r=r, activation_threshold=threshold)\n ma = meta.MetaAnalysis(dataset, ids)\n ma.save_results(output_dir, prefix)", "docstring": "Compute and save coactivation map given input image as seed.\n\nThis is essentially just a wrapper for a meta-analysis defined\nby the contrast between those studies that activate within the seed\nand those that don't.\n\nArgs:\ndataset: a Dataset instance containing study and activation data.\nseed: either a Nifti or Analyze image defining the boundaries of the\nseed, or a list of triples (x/y/z) defining the seed(s). Note that\nvoxels do not need to be contiguous to define a seed--all supra-\nthreshold voxels will be lumped together.\nthreshold: optional float indicating the threshold above which voxels\nare considered to be part of the seed ROI (default = 0)\nr: optional integer indicating radius (in mm) of spheres to grow\n(only used if seed is a list of coordinates).\noutput_dir: output directory to write to. Defaults to current.\nIf none, defaults to using the first part of the seed filename.\nprefix: optional string to prepend to all coactivation images.\n\nOutput:\nA set of meta-analysis images identical to that generated by\nmeta.MetaAnalysis.", "source": "codesearchnet"} {"code": "def to_instruction(self):\n from qiskit.circuit.instruction import Instruction\n n_qubits = int(np.log2(self._input_dim))\n if ((self._input_dim != self._output_dim) or ((2 ** n_qubits) != self._input_dim)):\n raise QiskitError('Cannot convert QuantumChannel to Instruction: channel is not an N-qubit channel.')\n if (not self.is_cptp()):\n raise QiskitError('Cannot convert QuantumChannel to Instruction: channel is not CPTP.')\n (kraus, _) = _to_kraus(self.rep, self._data, *self.dim)\n if (len(kraus) == 1):\n return Operator(kraus[0]).to_instruction()\n return Instruction('kraus', n_qubits, 0, kraus)", "docstring": "Convert to a Kraus or UnitaryGate circuit instruction.\n\nIf the channel is unitary it will be added as a unitary gate,\notherwise it will be added as a kraus simulator instruction.\n\nReturns:\nInstruction: A kraus instruction for the channel.\n\nRaises:\nQiskitError: if input data is not an N-qubit CPTP quantum channel.", "source": "codesearchnet"} {"code": "def _parse_normalizations(self, normalizations):\n parsed_normalizations = []\n if isinstance(normalizations, list):\n for item in normalizations:\n normalization = self._parse_normalization(item)\n if normalization:\n parsed_normalizations.append(normalization)\n else:\n raise ConfigError(('List expected. Found %s' % type(normalizations)))\n return parsed_normalizations", "docstring": "Returns a list of parsed normalizations.\n\nIterates over a list of normalizations, removing those\nnot correctly defined. It also transform complex items\nto have a common format (list of tuples and strings).\n\nArgs:\nnormalizations: List of normalizations to parse.\n\nReturns:\nA list of normalizations after being parsed and curated.", "source": "codesearchnet"} {"code": "def update_state(self, y_true, y_pred, sample_weight=None):\n metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates true positive and false positive statistics.\n\nArgs:\ny_true: The ground truth values, with the same dimensions as\n`y_pred`. Will be cast to `bool`.\ny_pred: The predicted values. Each element must be in the range\n`[0, 1]`.\nsample_weight: Optional weighting of each example. Defaults to `1`.\nCan be a tensor whose rank is either 0, or the same rank as\n`y_true`, and must be broadcastable to `y_true`.", "source": "github-repos"} {"code": "def load_module(self, fullmodname):\n \n submodname, is_package, fullpath, source = self._get_source(fullmodname)\n code = compile(source, fullpath, 'exec')\n mod = sys.modules.get(fullmodname)\n try:\n if mod is None:\n mod = sys.modules[fullmodname] = types.ModuleType(fullmodname)\n mod.__loader__ = self\n mod.__file__ = fullpath\n mod.__name__ = fullmodname\n if is_package:\n mod.__path__ = [os.path.dirname(mod.__file__)]\n exec(code, mod.__dict__)\n except:\n if fullmodname in sys.modules:\n del sys.modules[fullmodname]\n raise\n return mod", "docstring": "PEP-302-compliant load_module() method.\n\nArgs:\nfullmodname: The dot-separated full module name, e.g. 'django.core.mail'.\n\nReturns:\nThe module object constructed from the source code.\n\nRaises:\nSyntaxError if the module's source code is syntactically incorrect.\nImportError if there was a problem accessing the source code.\nWhatever else can be raised by executing the module's source code.", "source": "juraj-google-style"} {"code": "def _clean_required_args(self, url, redirect_uri, client_type):\n for url_to_validate in (url, redirect_uri):\n try:\n URLValidator()(url_to_validate)\n except ValidationError:\n raise CommandError('URLs provided are invalid. Please provide valid application and redirect URLs.')\n client_type = client_type.lower()\n client_type = {'confidential': CONFIDENTIAL, 'public': PUBLIC}.get(client_type)\n if (client_type is None):\n raise CommandError(\"Client type provided is invalid. Please use one of 'confidential' or 'public'.\")\n self.fields = {'url': url, 'redirect_uri': redirect_uri, 'client_type': client_type}", "docstring": "Validate and clean the command's arguments.\n\nArguments:\nurl (str): Client's application URL.\nredirect_uri (str): Client application's OAuth2 callback URI.\nclient_type (str): Client's type, indicating whether the Client application\nis capable of maintaining the confidentiality of its credentials (e.g., running on a\nsecure server) or is incapable of doing so (e.g., running in a browser).\n\nRaises:\nCommandError, if the URLs provided are invalid, or if the client type provided is invalid.", "source": "codesearchnet"} {"code": "def add_callback(self, callback):\n self.callback_handler.add_callback(callback)", "docstring": "Add a callback to the current list of [`~transformers.TrainerCallback`].\n\nArgs:\ncallback (`type` or [`~transformers.TrainerCallback]`):\nA [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the\nfirst case, will instantiate a member of that class.", "source": "github-repos"} {"code": "def add_profile_variants(self, profile_variants):\n\n \n\n results = self.db.profile_variant.insert_many(profile_variants)\n\n return results", "docstring": "Add several variants to the profile_variant collection in the\ndatabase\n\nArgs:\n\nprofile_variants(list(models.ProfileVariant))", "source": "juraj-google-style"} {"code": "def __init__(self, max_batch_size: int=5000, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog'):\n self.max_batch_size = max_batch_size\n self.project = project\n self.retry = retry\n self.timeout = timeout\n self.metadata = metadata\n self.catalog_name = catalog_name", "docstring": "Initializes a :class:`ImportCatalogItems` transform\n\nArgs:\nbatch_size (int): Required. Maximum number of catalogitems per\nrequest.\nproject (str): Optional. GCP project name in which the catalog\ndata will be imported.\nretry: Optional. Designation of what\nerrors, if any, should be retried.\ntimeout (float): Optional. The amount of time, in seconds, to wait\nfor the request to complete.\nmetadata: Optional. Strings which\nshould be sent along with the request as metadata.\ncatalog_name (str): Optional. Name of the catalog.\nDefault: 'default_catalog'", "source": "github-repos"} {"code": "def __getitem__(self, key):\n \n if not isinstance(key, int):\n raise TypeError()\n return self.items[key]", "docstring": "Allows caller to use array indices to get a :class:`PageItem`\n\nArgs:\ni (int): 0-based index of the element to retrieve\n\nReturns:\nPageItem: if valid item exists at index i\nNone if the index is too small or too large", "source": "juraj-google-style"} {"code": "def normalize_name(decl):\n \n if decl.cache.normalized_name is None:\n decl.cache.normalized_name = normalize(decl.name)\n return decl.cache.normalized_name", "docstring": "Cached variant of normalize\n\nArgs:\ndecl (declaration.declaration_t): the declaration\n\nReturns:\nstr: normalized name", "source": "juraj-google-style"} {"code": "def InitFromDataPoints(self, start_stats, complete_stats):\n self.start_points = self._ConvertToResultList(start_stats)\n self.complete_points = self._ConvertToResultList(complete_stats)\n return self", "docstring": "Check that this approval applies to the given token.\n\nArgs:\nstart_stats: A list of lists, each containing two values (a timestamp and\nthe number of clients started at this time).\ncomplete_stats: A list of lists, each containing two values (a timestamp\nand the number of clients completed at this time).\n\nReturns:\nA reference to the current instance to allow method chaining.", "source": "codesearchnet"} {"code": "def inference(cluster_info, feed_timeout=600, qname='input'):\n \n def _inference(iter):\n \n mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())\n try:\n queue_in = mgr.get_queue(qname)\n equeue = mgr.get_queue('error')\n except (AttributeError, KeyError):\n msg = \"Queue '{}' not found on this node, check for exceptions on other nodes.\".format(qname)\n raise Exception(msg)\n\n logging.info(\"Feeding partition {0} into {1} queue {2}\".format(iter, qname, queue_in))\n count = 0\n for item in iter:\n count += 1\n queue_in.put(item, block=True)\n\n \n queue_in.put(marker.EndPartition())\n\n \n if count == 0:\n return []\n\n \n joinThr = Thread(target=queue_in.join)\n joinThr.start()\n timeout = feed_timeout\n while (joinThr.isAlive()):\n if (not equeue.empty()):\n e_str = equeue.get()\n equeue.task_done()\n raise Exception(\"exception in worker:\\n\" + e_str)\n time.sleep(1)\n timeout -= 1\n if timeout <= 0:\n raise Exception(\"Timeout while feeding partition\")\n\n logging.info(\"Processed {0} items in partition\".format(count))\n\n \n results = []\n queue_out = mgr.get_queue('output')\n while count > 0:\n result = queue_out.get(block=True)\n results.append(result)\n count -= 1\n queue_out.task_done()\n\n logging.info(\"Finished processing partition\")\n return results\n\n return _inference", "docstring": "Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.\n\nArgs:\n:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)\n:feed_timeout: number of seconds after which data feeding times out (600 sec default)\n:qname: *INTERNAL_USE*\n\nReturns:\nA dataRDD.mapPartitions() function", "source": "juraj-google-style"} {"code": "def remove_indirect_links(g, alg=\"aracne\", **kwargs):\n \n alg = {\"aracne\": aracne,\n \"nd\": network_deconvolution,\n \"clr\": clr}[alg]\n mat = np.array(nx.adjacency_matrix(g).todense())\n return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)),\n {idx: i for idx, i in enumerate(list(g.nodes()))})", "docstring": "Apply deconvolution to a networkx graph.\n\nArgs:\ng (networkx.Graph): Graph to apply deconvolution to\nalg (str): Algorithm to use ('aracne', 'clr', 'nd')\nkwargs (dict): extra options for algorithms\n\nReturns:\nnetworkx.Graph: graph with undirected links removed.", "source": "juraj-google-style"} {"code": "def create_table_from_orm_class(engine: Engine,\n ormclass: DeclarativeMeta,\n without_constraints: bool = False) -> None:\n \n table = ormclass.__table__ \n log.info(\"Creating table {} on engine {}{}\",\n table.name,\n get_safe_url_from_engine(engine),\n \" (omitting constraints)\" if without_constraints else \"\")\n \n if without_constraints:\n include_foreign_key_constraints = []\n else:\n include_foreign_key_constraints = None \n creator = CreateTable(\n table,\n include_foreign_key_constraints=include_foreign_key_constraints\n )\n creator.execute(bind=engine)", "docstring": "From an SQLAlchemy ORM class, creates the database table via the specified\nengine, using a ``CREATE TABLE`` SQL (DDL) statement.\n\nArgs:\nengine: SQLAlchemy :class:`Engine` object\normclass: SQLAlchemy ORM class\nwithout_constraints: don't add foreign key constraints", "source": "juraj-google-style"} {"code": "def _boundaries_to_sizes(a, boundaries, axis):\n if axis >= len(a.shape):\n raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))\n total_size = a.shape[axis]\n sizes = []\n sizes_sum = 0\n prev = 0\n for i, b in enumerate(boundaries):\n size = b - prev\n if size < 0:\n raise ValueError('The %s-th boundary %s is smaller than the previous boundary %s' % (i, b, prev))\n size = builtins.min(size, builtins.max(0, total_size - sizes_sum))\n sizes.append(size)\n sizes_sum += size\n prev = b\n sizes.append(builtins.max(0, total_size - sizes_sum))\n return sizes", "docstring": "Converting boundaries of splits to sizes of splits.\n\nArgs:\na: the array to be split.\nboundaries: the boundaries, as in np.split.\naxis: the axis along which to split.\n\nReturns:\nA list of sizes of the splits, as in tf.split.", "source": "github-repos"} {"code": "def sampling_query(sql, fields=None, count=5, sampling=None):\n \n if sampling is None:\n sampling = Sampling.default(count=count, fields=fields)\n return sampling(sql)", "docstring": "Returns a sampling query for the SQL object.\n\nArgs:\nsql: the SQL object to sample\nfields: an optional list of field names to retrieve.\ncount: an optional count of rows to retrieve which is used if a specific\nsampling is not specified.\nsampling: an optional sampling strategy to apply to the table.\nReturns:\nA SQL query string for sampling the input sql.", "source": "juraj-google-style"} {"code": "def get_meshes_fld(step, var):\n fld = step.fields[var]\n if step.geom.twod_xz:\n (xmesh, ymesh) = (step.geom.x_mesh[(:, 0, :)], step.geom.z_mesh[(:, 0, :)])\n fld = fld[(:, 0, :, 0)]\n elif (step.geom.cartesian and step.geom.twod_yz):\n (xmesh, ymesh) = (step.geom.y_mesh[(0, :, :)], step.geom.z_mesh[(0, :, :)])\n fld = fld[(0, :, :, 0)]\n else:\n (xmesh, ymesh) = (step.geom.x_mesh[(0, :, :)], step.geom.y_mesh[(0, :, :)])\n fld = fld[(0, :, :, 0)]\n return (xmesh, ymesh, fld)", "docstring": "Return scalar field along with coordinates meshes.\n\nOnly works properly in 2D geometry.\n\nArgs:\nstep (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData\ninstance.\nvar (str): scalar field name.\nReturns:\ntuple of :class:`numpy.array`: xmesh, ymesh, fld\n2D arrays containing respectively the x position, y position, and\nthe value of the requested field.", "source": "codesearchnet"} {"code": "def cherry_pick(self, branch, **kwargs):\n \n path = '%s/%s/cherry_pick' % (self.manager.path, self.get_id())\n post_data = {'branch': branch}\n self.manager.gitlab.http_post(path, post_data=post_data, **kwargs)", "docstring": "Cherry-pick a commit into a branch.\n\nArgs:\nbranch (str): Name of target branch\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCherryPickError: If the cherry-pick could not be performed", "source": "juraj-google-style"} {"code": "def Decode(self, encoded_data):\n \n try:\n decoded_data = base64.b32decode(encoded_data, casefold=False)\n except (TypeError, binascii.Error) as exception:\n raise errors.BackEndError(\n 'Unable to decode base32 stream with error: {0!s}.'.format(\n exception))\n\n return decoded_data, b''", "docstring": "Decode the encoded data.\n\nArgs:\nencoded_data (byte): encoded data.\n\nReturns:\ntuple(bytes, bytes): decoded data and remaining encoded data.\n\nRaises:\nBackEndError: if the base32 stream cannot be decoded.", "source": "juraj-google-style"} {"code": "def InsertMessage(self, message, timeout=None):\n \n if not isinstance(message, common_pb2.Message):\n raise InvalidArgument(\"Attempt to send unexpected message type: %s\" %\n message.__class__.__name__)\n\n if not message.HasField(\"source\"):\n message.source.service_name = self._service_name\n\n \n \n if not message.message_id:\n message.message_id = os.urandom(32)\n\n return self._RetryLoop(\n lambda t: self._stub.InsertMessage(message, timeout=t))", "docstring": "Inserts a message into the Fleetspeak server.\n\nSets message.source, if unset.\n\nArgs:\nmessage: common_pb2.Message\nThe message to send.\n\ntimeout: How many seconds to try for.\n\nRaises:\ngrpc.RpcError: if the RPC fails.\nInvalidArgument: if message is not a common_pb2.Message.", "source": "juraj-google-style"} {"code": "def serialize_function(func):\n if isinstance(func, types.LambdaType):\n return (generic_utils.func_dump(func), 'lambda')\n return (func.__name__, 'function')", "docstring": "Serializes function for Keras.\n\n(De)serializing Python functions from/to bytecode is unsafe. Therefore we\nreturn the function's type as an anonymous function ('lambda') or named\nfunction in the Python environment ('function'). In the latter case, this lets\nus use the Python scope to obtain the function rather than reload it from\nbytecode. (Note that both cases are brittle!)\n\nThis serialization mimicks the implementation in `tf.keras.layers.Lambda`.\n\nArgs:\nfunc: Python function to serialize.\n\nReturns:\n(serial, function_type): Serialized object, which is a tuple of its\nbytecode (if function is anonymous) or name (if function is named), and its\nfunction type.", "source": "codesearchnet"} {"code": "def store_and_register(self, object_id, value, depth=100):\n counter = 0\n while True:\n if (counter == depth):\n raise Exception('Ray exceeded the maximum number of classes that it will recursively serialize when attempting to serialize an object of type {}.'.format(type(value)))\n counter += 1\n try:\n if isinstance(value, bytes):\n self.plasma_client.put_raw_buffer(value, object_id=pyarrow.plasma.ObjectID(object_id.binary()), metadata=ray_constants.RAW_BUFFER_METADATA, memcopy_threads=self.memcopy_threads)\n else:\n self.plasma_client.put(value, object_id=pyarrow.plasma.ObjectID(object_id.binary()), memcopy_threads=self.memcopy_threads, serialization_context=self.get_serialization_context(self.task_driver_id))\n break\n except pyarrow.SerializationCallbackError as e:\n try:\n register_custom_serializer(type(e.example_object), use_dict=True)\n warning_message = 'WARNING: Serializing objects of type {} by expanding them as dictionaries of their fields. This behavior may be incorrect in some cases.'.format(type(e.example_object))\n logger.debug(warning_message)\n except (serialization.RayNotDictionarySerializable, serialization.CloudPickleError, pickle.pickle.PicklingError, Exception):\n try:\n register_custom_serializer(type(e.example_object), use_pickle=True)\n warning_message = 'WARNING: Falling back to serializing objects of type {} by using pickle. This may be inefficient.'.format(type(e.example_object))\n logger.warning(warning_message)\n except serialization.CloudPickleError:\n register_custom_serializer(type(e.example_object), use_pickle=True, local=True)\n warning_message = 'WARNING: Pickling the class {} failed, so we are using pickle and only registering the class locally.'.format(type(e.example_object))\n logger.warning(warning_message)", "docstring": "Store an object and attempt to register its class if needed.\n\nArgs:\nobject_id: The ID of the object to store.\nvalue: The value to put in the object store.\ndepth: The maximum number of classes to recursively register.\n\nRaises:\nException: An exception is raised if the attempt to store the\nobject fails. This can happen if there is already an object\nwith the same ID in the object store or if the object store is\nfull.", "source": "codesearchnet"} {"code": "def poisson_source(rate, iterable, target):\n \n if rate <= 0.0:\n raise ValueError(\"poisson_source rate {} is not positive\".format(rate))\n\n it = iter(iterable)\n for item in it:\n duration = random.expovariate(rate)\n sleep(duration)\n try:\n target.send(item)\n except StopIteration:\n return prepend(item, it)\n return empty_iter()", "docstring": "Send events at random times with uniform probability.\n\nArgs:\nrate: The average number of events to send per second.\niterable: A series of items which will be sent to the target one by one.\ntarget: The target coroutine or sink.\n\nReturns:\nAn iterator over any remaining items.", "source": "juraj-google-style"} {"code": "def delete_row_range(self, format_str, start_game, end_game):\n row_keys = make_single_array(self.tf_table.keys_by_range_dataset(format_str.format(start_game), format_str.format(end_game)))\n row_keys = list(row_keys)\n if (not row_keys):\n utils.dbg(('No rows left for games %d..%d' % (start_game, end_game)))\n return\n utils.dbg(('Deleting %d rows: %s..%s' % (len(row_keys), row_keys[0], row_keys[(- 1)])))\n row_keys.reverse()\n total_keys = len(row_keys)\n utils.dbg(('Deleting total of %d keys' % total_keys))\n concurrency = min(MAX_BT_CONCURRENCY, (multiprocessing.cpu_count() * 2))\n with multiprocessing.Pool(processes=concurrency) as pool:\n batches = []\n with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:\n for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS, row_keys):\n pbar.update(len(b))\n batches.append((self.btspec, b))\n if (len(batches) >= concurrency):\n pool.map(_delete_rows, batches)\n batches = []\n pool.map(_delete_rows, batches)\n batches = []", "docstring": "Delete rows related to the given game range.\n\nArgs:\nformat_str: a string to `.format()` by the game numbers\nin order to create the row prefixes.\nstart_game: the starting game number of the deletion.\nend_game: the ending game number of the deletion.", "source": "codesearchnet"} {"code": "def AddNewSpecification(self, identifier):\n if (identifier in self._format_specifications):\n raise KeyError('Format specification {0:s} is already defined in store.'.format(identifier))\n self._format_specifications[identifier] = FormatSpecification(identifier)\n return self._format_specifications[identifier]", "docstring": "Adds a new format specification.\n\nArgs:\nidentifier (str): format identifier, which should be unique for the store.\n\nReturns:\nFormatSpecification: format specification.\n\nRaises:\nKeyError: if the store already contains a specification with\nthe same identifier.", "source": "codesearchnet"} {"code": "def calculate_timeout(http_date):\n \n try:\n return int(http_date)\n except ValueError:\n date_after = parse(http_date)\n utc_now = datetime.now(tz=timezone.utc)\n return int((date_after - utc_now).total_seconds())", "docstring": "Extract request timeout from e.g. ``Retry-After`` header.\n\nNotes:\nPer :rfc:`2616#section-14.37`, the ``Retry-After`` header can\nbe either an integer number of seconds or an HTTP date. This\nfunction can handle either.\n\nArguments:\nhttp_date (:py:class:`str`): The date to parse.\n\nReturns:\n:py:class:`int`: The timeout, in seconds.", "source": "juraj-google-style"} {"code": "def all_tokens(self, delimiter=' ', label_list_ids=None):\n tokens = set()\n for label_list in self.label_lists.values():\n if ((label_list_ids is None) or (label_list.idx in label_list_ids)):\n tokens = tokens.union(label_list.all_tokens(delimiter=delimiter))\n return tokens", "docstring": "Return a list of all tokens occurring in\none of the labels in the label-lists.\n\nArgs:\ndelimiter (str): The delimiter used to split labels into tokens\n(see :meth:`audiomate.annotations.Label.tokenized`).\nlabel_list_ids (list): If not None, only labels from label-lists with\nan idx contained in this list are considered.\n\nReturns:\n:class:`set`: A set of distinct tokens.", "source": "codesearchnet"} {"code": "def send_event(self, action, properties, event_severity=EVENT_SEVERITY):\n \n \n event_properties = dict() if (properties is None) else properties\n if type(event_properties) is not dict:\n raise TypeError('properties is not dict')\n\n \n event_bunch = Bunch(\n Product=self.product_name,\n Version=self.product_version,\n Server=self.server_name,\n Platform=self.platform,\n Action=action,\n Properties=event_properties)\n event_description = self._get_description_prefix() + \\\n json.dumps(event_bunch)\n\n use_custom_event = True\n if CSS_PRODUCT_EVENT in dir(self.xcli.cmd):\n try:\n \n log.debug(\"sending css_product_event \"\n \"description=%s severity=%s\",\n event_description, event_severity)\n self.xcli.cmd.css_product_event(severity=event_severity,\n product=self.product_name,\n version=self.product_version,\n server=self.server_name,\n platform=self.platform,\n action=action,\n properties=event_properties)\n use_custom_event = False\n except (UnrecognizedCommandError,\n OperationForbiddenForUserCategoryError):\n log.warning(\"failed css_product_event \"\n \"description=%s severity=%s\",\n event_description, event_severity)\n if use_custom_event:\n \n log.debug(\"sending custom_event description=%s severity=%s\",\n event_description, event_severity)\n self.xcli.cmd.custom_event(\n description=event_description, severity=event_severity)", "docstring": "send css_event and if fails send custom_event instead\nArgs:\naction (ACTIONS): the action causing the event\nproperties (dict): the action additional properties\nevent_severity (string): the event severity\nRaises:\nXCLIError: if the xcli.cmd.custom_event failed\nKeyError: if action wasn't predefined\nTypeError: if properties is not None or dict", "source": "juraj-google-style"} {"code": "def serialize_ndarray_readable(o):\n \n return dict(\n _type='np.ndarray',\n dtype=o.dtype,\n value=hint_tuples(o.tolist()))", "docstring": "Serializes a :obj:`numpy.ndarray` in a human-readable format.\n\nArgs:\no (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.\n\nReturns:\nA dictionary that can be passed to :obj:`json.dumps`.", "source": "juraj-google-style"} {"code": "def _GetMountpointBlacklist(xdev):\n if (xdev == rdf_file_finder.FileFinderArgs.XDev.NEVER):\n return _GetMountpoints(only_physical=False)\n if (xdev == rdf_file_finder.FileFinderArgs.XDev.LOCAL):\n physical = _GetMountpoints(only_physical=True)\n return (_GetMountpoints(only_physical=False) - physical)\n if (xdev == rdf_file_finder.FileFinderArgs.XDev.ALWAYS):\n return set()\n raise ValueError(('Incorrect `xdev` value: %s' % xdev))", "docstring": "Builds a list of mountpoints to ignore during recursive searches.\n\nArgs:\nxdev: A `XDev` value that determines policy for crossing device boundaries.\n\nReturns:\nA set of mountpoints to ignore.\n\nRaises:\nValueError: If `xdev` value is invalid.", "source": "codesearchnet"} {"code": "def reflect(self, name):\n result = None\n for scope in reversed(self.scopes):\n try:\n if isinstance(scope, type):\n result = structured.reflect_static_member(scope, name)\n else:\n result = structured.reflect_runtime_member(scope, name)\n if (result is not None):\n return result\n except (NotImplementedError, KeyError, AttributeError):\n continue\n return protocol.AnyType", "docstring": "Reflect 'name' starting with local scope all the way up to global.\n\nThis method will attempt both static and runtime reflection. This is the\nrecommended way of using reflection.\n\nReturns:\nType of 'name', or protocol.AnyType.\n\nCaveat:\nThe type of 'name' does not necessarily have to be an instance of\nPython's type - it depends on what the host application returns\nthrough the reflection API. For example, Rekall uses objects\ngenerated at runtime to simulate a native (C/C++) type system.", "source": "codesearchnet"} {"code": "def __init__(self, parent=None, **kwargs):\n \n if not parent:\n raise ValueError('Missing parent value.')\n\n super(GzipPathSpec, self).__init__(parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the gzip file path specification must have a parent.\n\nArgs:\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"} {"code": "def send(self,message,message_type,topic=''):\n \n if message_type == RAW:\n self._sock.send(message)\n elif message_type == PYOBJ:\n self._sock.send_pyobj(message)\n elif message_type == JSON:\n self._sock.send_json(message)\n elif message_type == MULTIPART:\n self._sock.send_multipart([topic, message])\n elif message_type == STRING:\n self._sock.send_string(message)\n elif message_type == UNICODE:\n self._sock.send_unicode(message)\n else:\n raise Exception(\"Unknown message type %s\"%(message_type,))", "docstring": "Send the message on the socket.\n\nArgs:\n- message: the message to publish\n- message_type: the type of message being sent\n- topic: the topic on which to send the message. Defaults to ''.", "source": "juraj-google-style"} {"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n return output", "docstring": "Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\nsequence pair mask has the following format:\n\n```\n0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n| first sequence | second sequence |\n```\n\nif token_ids_1 is None, only returns the first portion of the mask (0s).\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of ids.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).", "source": "github-repos"} {"code": "def enable(self):\n core_napps_manager = CoreNAppsManager(base_path=self._enabled)\n core_napps_manager.enable(self.user, self.napp)", "docstring": "Enable a NApp if not already enabled.\n\nRaises:\nFileNotFoundError: If NApp is not installed.\nPermissionError: No filesystem permission to enable NApp.", "source": "codesearchnet"} {"code": "def QA_fetch_get_sz_margin(date):\n if (date in trade_date_sse):\n return pd.read_excel(_sz_url.format(date)).assign(date=date).assign(sse='sz')", "docstring": "return shenzhen margin data\n\nArguments:\ndate {str YYYY-MM-DD} -- date format\n\nReturns:\npandas.DataFrame -- res for margin data", "source": "codesearchnet"} {"code": "def print_versions(file: typing.TextIO = None) -> None:\n \n\n print('** QuantumFlow dependencies (> python -m quantumflow.meta) **')\n print('quantumflow \\t', qf.__version__, file=file)\n print('python \\t', sys.version[0:5], file=file)\n print('numpy \\t', np.__version__, file=file)\n print('networkx \\t', nx.__version__, file=file)\n print('cvxpy \\t', cvx.__version__, file=file)\n print('pyquil \\t', pyquil.__version__, file=file)\n\n print(bk.name, ' \\t', bk.version, '(BACKEND)', file=file)", "docstring": "Print version strings of currently installed dependencies\n\n``> python -m quantumflow.meta``\n\n\nArgs:\nfile: Output stream. Defaults to stdout.", "source": "juraj-google-style"} {"code": "def click(self, x, y):\n \n print 'click at', x, y\n self._input_left_mouse(x, y)", "docstring": "Simulate click within window screen.\n\nArgs:\nx, y: int, pixel distance from window (left, top) as origin\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def get_constraint_expressions(self) -> Tuple[column_expression_builder.ColumnExpressionBuilder, ...]:\n return self._constraints", "docstring": "Returns the constraints used to define the view.\n\nReturns:\nA homogeneous tuple of FHIRPath expressions used to constrain the view.", "source": "github-repos"} {"code": "def relpath(self):\n here = os.path.abspath(os.path.curdir)\n relpath = os.path.relpath(self.fpath, here)\n return relpath", "docstring": "Determine the relative path to this repository\n\nReturns:\nstr: relative path to this repository", "source": "codesearchnet"} {"code": "def destroy(self, prefix_names=None):\n \n if prefix_names is None:\n self.destroy(prefix_names=self.prefixes.keys())\n return\n\n for prefix_name in prefix_names:\n if prefix_name == 'current' and self.current in prefix_names:\n continue\n\n elif prefix_name == 'current':\n prefix_name = self.current\n\n self.get_prefix(prefix_name).destroy()\n self.prefixes.pop(prefix_name)\n if self.prefixes:\n self._update_current()\n\n if not self.prefixes:\n shutil.rmtree(self.path)", "docstring": "Destroy all the given prefixes and remove any left files if no more\nprefixes are left\n\nArgs:\nprefix_names(list of str): list of prefix names to destroy, if None\npassed (default) will destroy all of them", "source": "juraj-google-style"} {"code": "def get_dim_label(js_dict, dim, input=\"dataset\"):\n \n\n if input == 'dataset':\n input = js_dict['dimension'][dim]\n label_col = 'label'\n elif input == 'dimension':\n label_col = js_dict['label']\n input = js_dict\n else:\n raise ValueError\n\n try:\n dim_label = input['category']['label']\n\n except KeyError:\n dim_index = get_dim_index(js_dict, dim)\n dim_label = pd.concat([dim_index['id'],\n dim_index['id']],\n axis=1)\n dim_label.columns = ['id', 'label']\n else:\n dim_label = pd.DataFrame(list(zip(dim_label.keys(),\n dim_label.values())),\n index=dim_label.keys(),\n columns=['id', label_col])\n \n try:\n dim_index = input['category']['index']\n except KeyError:\n dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),\n index=[0],\n columns=['id', 'index'])\n else:\n if type(dim_index) is list:\n dim_index = pd.DataFrame(list(zip(dim_index,\n range(0, len(dim_index)))),\n index=dim_index, columns=['id', 'index'])\n else:\n dim_index = pd.DataFrame(list(zip(dim_index.keys(),\n dim_index.values())),\n index=dim_index.keys(),\n columns=['id', 'index'])\n dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')\n return dim_label", "docstring": "Get label from a given dimension.\n\nArgs:\njs_dict (dict): dictionary containing dataset data and metadata.\ndim (string): dimension name obtained from JSON file.\n\nReturns:\ndim_label(pandas.DataFrame): DataFrame with label-based dimension data.", "source": "juraj-google-style"} {"code": "def get_product_trades(self, product_id, before='', after='', limit=None, result=None):\n return self._send_paginated_message('/products/{}/trades'.format(product_id))", "docstring": "List the latest trades for a product.\n\nThis method returns a generator which may make multiple HTTP requests\nwhile iterating through it.\n\nArgs:\nproduct_id (str): Product\nbefore (Optional[str]): start time in ISO 8601\nafter (Optional[str]): end time in ISO 8601\nlimit (Optional[int]): the desired number of trades (can be more than 100,\nautomatically paginated)\nresults (Optional[list]): list of results that is used for the pagination\nReturns:\nlist: Latest trades. Example::\n[{\n\"time\": \"2014-11-07T22:19:28.578544Z\",\n\"trade_id\": 74,\n\"price\": \"10.00000000\",\n\"size\": \"0.01000000\",\n\"side\": \"buy\"\n}, {\n\"time\": \"2014-11-07T01:08:43.642366Z\",\n\"trade_id\": 73,\n\"price\": \"100.00000000\",\n\"size\": \"0.01000000\",\n\"side\": \"sell\"\n}]", "source": "codesearchnet"} {"code": "def is_valid(self, value):\n \n try:\n if validation_on():\n self.validate(value, False)\n except ValueError:\n return False\n else:\n return True", "docstring": "Whether the value passes validation\n\nArgs:\nvalue (obj) : the value to validate against this property type\n\nReturns:\nTrue if valid, False otherwise", "source": "juraj-google-style"} {"code": "def jobs_get(self, job_id, project_id=None):\n if (project_id is None):\n project_id = self._project_id\n url = (Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id)))\n return datalab.utils.Http.request(url, credentials=self._credentials)", "docstring": "Issues a request to retrieve information about a job.\n\nArgs:\njob_id: the id of the job\nproject_id: the project id to use to fetch the results; use None for the default project.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"} {"code": "def get_webconfiguration_settings(name, settings, location=''):\n ret = {}\n ps_cmd = []\n ps_cmd_validate = []\n if (not settings):\n log.warning('No settings provided')\n return ret\n settings = _prepare_settings(name, settings)\n ps_cmd.append('$Settings = New-Object System.Collections.ArrayList;')\n for setting in settings:\n ps_cmd_validate.extend(['Get-WebConfigurationProperty', '-PSPath', \"'{0}'\".format(name), '-Filter', \"'{0}'\".format(setting['filter']), '-Name', \"'{0}'\".format(setting['name']), '-Location', \"'{0}'\".format(location), '-ErrorAction', 'Stop', '|', 'Out-Null;'])\n ps_cmd.append(\"$Property = Get-WebConfigurationProperty -PSPath '{0}'\".format(name))\n ps_cmd.append(\"-Name '{0}' -Filter '{1}' -Location '{2}' -ErrorAction Stop;\".format(setting['name'], setting['filter'], location))\n if (setting['name'].split('.')[(- 1)] == 'Collection'):\n if ('value' in setting):\n ps_cmd.append('$Property = $Property | select -Property {0} ;'.format(','.join(list(setting['value'][0].keys()))))\n ps_cmd.append(\"$Settings.add(@{{filter='{0}';name='{1}';location='{2}';value=[System.Collections.ArrayList] @($Property)}})| Out-Null;\".format(setting['filter'], setting['name'], location))\n else:\n ps_cmd.append('if (([String]::IsNullOrEmpty($Property) -eq $False) -and')\n ps_cmd.append(\"($Property.GetType()).Name -eq 'ConfigurationAttribute') {\")\n ps_cmd.append('$Property = $Property | Select-Object')\n ps_cmd.append('-ExpandProperty Value };')\n ps_cmd.append(\"$Settings.add(@{{filter='{0}';name='{1}';location='{2}';value=[String] $Property}})| Out-Null;\".format(setting['filter'], setting['name'], location))\n ps_cmd.append('$Property = $Null;')\n cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True)\n if (cmd_ret['retcode'] != 0):\n message = 'One or more invalid property names were specified for the provided container.'\n raise SaltInvocationError(message)\n ps_cmd.append('$Settings')\n cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)\n try:\n ret = salt.utils.json.loads(cmd_ret['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError('Unable to parse return data as Json.')\n return ret", "docstring": "r'''\nGet the webconfiguration settings for the IIS PSPath.\n\nArgs:\nname (str): The PSPath of the IIS webconfiguration settings.\nsettings (list): A list of dictionaries containing setting name and filter.\nlocation (str): The location of the settings (optional)\n\nReturns:\ndict: A list of dictionaries containing setting name, filter and value.\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.get_webconfiguration_settings name='IIS:\\' settings=\"[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication'}]\"", "source": "codesearchnet"} {"code": "def load_imgs(filenames, masker, nan_to_num=True):\n \n if isinstance(filenames, string_types):\n filenames = [filenames]\n data = np.zeros((masker.n_vox_in_mask, len(filenames)))\n for i, f in enumerate(filenames):\n data[:, i] = masker.mask(f, nan_to_num)\n return data", "docstring": "Load multiple images from file into an ndarray.\n\nArgs:\nfilenames: A single filename or list of filenames pointing to valid\nimages.\nmasker: A Masker instance.\nnan_to_num: Optional boolean indicating whether to convert NaNs to zero.\n\nReturns:\nAn m x n 2D numpy array, where m = number of voxels in mask and\nn = number of images passed.", "source": "juraj-google-style"} {"code": "def queue_scan_command(self, server_info: ServerConnectivityInfo, scan_command: PluginScanCommand) -> None:\n self._check_and_create_process(server_info.hostname)\n self._queued_tasks_nb += 1\n if scan_command.is_aggressive:\n self._hostname_queues_dict[server_info.hostname].put((server_info, scan_command))\n else:\n self._task_queue.put((server_info, scan_command))", "docstring": "Queue a scan command targeting a specific server.\n\nArgs:\nserver_info: The server's connectivity information. The test_connectivity_to_server() method must have been\ncalled first to ensure that the server is online and accessible.\nscan_command: The scan command to run against this server.", "source": "codesearchnet"} {"code": "def tsv_pairs_to_dict(line: str, key_lower: bool = True) -> Dict[str, str]:\n r\n items = line.split(\"\\t\")\n d = {} \n for chunk in chunks(items, 2):\n if len(chunk) < 2:\n log.warning(\"Bad chunk, not of length 2: {!r}\", chunk)\n continue\n key = chunk[0]\n value = unescape_tabs_newlines(chunk[1])\n if key_lower:\n key = key.lower()\n d[key] = value\n return d", "docstring": "r\"\"\"\nConverts a TSV line into sequential key/value pairs as a dictionary.\n\nFor example,\n\n.. code-block:: none\n\nfield1\\tvalue1\\tfield2\\tvalue2\n\nbecomes\n\n.. code-block:: none\n\n{\"field1\": \"value1\", \"field2\": \"value2\"}\n\nArgs:\nline: the line\nkey_lower: should the keys be forced to lower case?", "source": "juraj-google-style"} {"code": "def get_value(self, tau):\n \n tau = np.asarray(tau)\n (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,\n beta_complex_real, beta_complex_imag) = self.coefficients\n k = get_kernel_value(\n alpha_real, beta_real,\n alpha_complex_real, alpha_complex_imag,\n beta_complex_real, beta_complex_imag,\n tau.flatten(),\n )\n return np.asarray(k).reshape(tau.shape)", "docstring": "Compute the value of the term for an array of lags\n\nArgs:\ntau (array[...]): An array of lags where the term should be\nevaluated.\n\nReturns:\nThe value of the term for each ``tau``. This will have the same\nshape as ``tau``.", "source": "juraj-google-style"} {"code": "async def populate_projects(self, force=False):\n \n if force or not self.projects:\n with tempfile.TemporaryDirectory() as tmpdirname:\n self.projects = await load_json_or_yaml_from_url(\n self, self.config['project_configuration_url'],\n os.path.join(tmpdirname, 'projects.yml')\n )", "docstring": "Download the ``projects.yml`` file and populate ``self.projects``.\n\nThis only sets it once, unless ``force`` is set.\n\nArgs:\nforce (bool, optional): Re-run the download, even if ``self.projects``\nis already defined. Defaults to False.", "source": "juraj-google-style"} {"code": "def notify_changes(self, changes):\n ret = []\n child_changes = {}\n for change in changes:\n self._add_child_change(change, child_changes)\n if self.update_requests:\n serialized = serialize_object(self.data)\n for request in self.update_requests:\n ret.append(request.update_response(serialized))\n if self.delta_requests:\n for change in changes:\n change[(- 1)] = serialize_object(change[(- 1)])\n for request in self.delta_requests:\n ret.append(request.delta_response(changes))\n for (name, child_changes) in child_changes.items():\n ret += self.children[name].notify_changes(child_changes)\n return ret", "docstring": "Set our data and notify anyone listening\n\nArgs:\nchanges (list): [[path, optional data]] where path is the path to\nwhat has changed, and data is the unserialized object that has\nchanged\n\nReturns:\nlist: [(callback, Response)] that need to be called", "source": "codesearchnet"} {"code": "def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):\n lines = []\n font_attr_segs = {}\n lines.append('')\n lines.append(' %d %s(s) + %d control %s(s):' % (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))\n lines.append(' %d %s(s):' % (len(non_ctrls), neighbor_type))\n for non_ctrl in non_ctrls:\n line = ' [%s] %s' % (self._debug_dump.node_op_type(non_ctrl), non_ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(len(line) - len(non_ctrl), len(line), debugger_cli_common.MenuItem(None, 'ni -a -d -t %s' % non_ctrl))]\n if ctrls:\n lines.append('')\n lines.append(' %d control %s(s):' % (len(ctrls), neighbor_type))\n for ctrl in ctrls:\n line = ' [%s] %s' % (self._debug_dump.node_op_type(ctrl), ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(len(line) - len(ctrl), len(line), debugger_cli_common.MenuItem(None, 'ni -a -d -t %s' % ctrl))]\n return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)", "docstring": "List neighbors (inputs or recipients) of a node.\n\nArgs:\nneighbor_type: (\"input\" | \"recipient\")\nnon_ctrls: Non-control neighbor node names, as a list of str.\nctrls: Control neighbor node names, as a list of str.\n\nReturns:\nA RichTextLines object.", "source": "github-repos"} {"code": "def _get_type_name(type_):\n name = repr(type_)\n if name.startswith('<'):\n name = getattr(type_, '__qualname__', getattr(type_, '__name__', ''))\n return (name.rsplit('.', 1)[(- 1)] or repr(type_))", "docstring": "Return a displayable name for the type.\n\nArgs:\ntype_: A class object.\n\nReturns:\nA string value describing the class name that can be used in a natural\nlanguage sentence.", "source": "codesearchnet"} {"code": "def walk(self, action, user_data=None):\n action(self.index_file, self.__root, 0, user_data)\n self.__do_walk(self.__root, 1, action, user_data)", "docstring": "Walk the hierarchy, applying action to each filename.\n\nArgs:\naction: callable, the callable to invoke for each filename,\nwill be invoked with the filename, the subfiles, and\nthe level in the sitemap.", "source": "codesearchnet"} {"code": "def open(in_file, in_fmt=None):\n \n fmt = in_file.split('.')[-1]\n if in_fmt:\n fmt = in_fmt\n fmt = fmt.lower()\n\n if fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']:\n return Image.open(in_file)\n else:\n raise NotImplementedError(\"Cannot open file of type {fmt}\".format(fmt))", "docstring": "Reads in a file from disk.\n\nArguments:\nin_file: The name of the file to read in\nin_fmt: The format of in_file, if you want to be explicit\n\nReturns:\nnumpy.ndarray", "source": "juraj-google-style"} {"code": "def DEFINE_string(flag_name, default_value, docstring, required=False): \n \n _define_helper(flag_name, default_value, docstring, str, required)", "docstring": "Defines a flag of type 'string'.\nArgs:\nflag_name: The name of the flag as a string.\ndefault_value: The default value the flag should take as a string.\ndocstring: A helpful message explaining the use of the flag.", "source": "juraj-google-style"} {"code": "def lower_bound(fm, nr_subs=None, nr_imgs=None, scale_factor=1):\n nr_subs_total = len(np.unique(fm.SUBJECTINDEX))\n if (nr_subs is None):\n nr_subs = (nr_subs_total - 1)\n assert (nr_subs < nr_subs_total)\n sb_scores = []\n for measure in range(len(measures.scores)):\n res_dict = {}\n result_vectors = [(np.empty(nr_subs_total) + np.nan) for _ in np.unique(fm.category)]\n res_dict.update(list(zip(np.unique(fm.category), result_vectors)))\n sb_scores.append(res_dict)\n for fm_cat in fm.by_field('category'):\n cat = fm_cat.category[0]\n nr_imgs_cat = len(np.unique(fm_cat.filenumber))\n if (not nr_imgs):\n nr_imgs_current = (nr_imgs_cat - 1)\n else:\n nr_imgs_current = nr_imgs\n assert (nr_imgs_current < nr_imgs_cat)\n for (sub_counter, sub) in enumerate(np.unique(fm.SUBJECTINDEX)):\n image_scores = []\n for fm_single in fm_cat.by_field('filenumber'):\n fn = fm_single.filenumber[0]\n predicting_subs = np.setdiff1d(np.unique(fm_cat.SUBJECTINDEX), [sub])\n np.random.shuffle(predicting_subs)\n predicting_subs = predicting_subs[0:nr_subs]\n predicting_fns = np.setdiff1d(np.unique(fm_cat.filenumber), [fn])\n np.random.shuffle(predicting_fns)\n predicting_fns = predicting_fns[0:nr_imgs_current]\n predicting_fm = fm_cat[(ismember(fm_cat.SUBJECTINDEX, predicting_subs) & ismember(fm_cat.filenumber, predicting_fns))]\n predicted_fm = fm_single[(fm_single.SUBJECTINDEX == sub)]\n try:\n predicting_fdm = compute_fdm(predicting_fm, scale_factor=scale_factor)\n except RuntimeError:\n predicting_fdm = None\n image_scores.append(measures.prediction_scores(predicting_fdm, predicted_fm))\n for (measure, score) in enumerate(nanmean(image_scores, 0)):\n sb_scores[measure][cat][sub_counter] = score\n return sb_scores", "docstring": "Compute the spatial bias lower bound for a fixmat.\n\nInput:\nfm : a fixmat instance\nnr_subs : the number of subjects used for the prediction. Defaults\nto the total number of subjects in the fixmat minus 1\nnr_imgs : the number of images used for prediction. If given, the\nsame number will be used for every category. If not given,\nleave-one-out will be used in all categories.\nscale_factor : the scale factor of the FDMs. Default is 1.\nReturns:\nA list of spatial bias scores; the list contains one dictionary for each\nmeasure. Each dictionary contains one key for each category and\ncorresponding values is an array with scores for each subject.", "source": "codesearchnet"} {"code": "def reload(self):\n utt_ids = sorted(self.utt_ids)\n if self.shuffle:\n self.rand.shuffle(utt_ids)\n partitions = []\n current_partition = PartitionInfo()\n for utt_id in utt_ids:\n utt_size = self.utt_sizes[utt_id]\n utt_lengths = self.utt_lengths[utt_id]\n if ((current_partition.size + utt_size) > self.partition_size):\n partitions.append(current_partition)\n current_partition = PartitionInfo()\n current_partition.utt_ids.append(utt_id)\n current_partition.utt_lengths.append(utt_lengths)\n current_partition.size += utt_size\n if (current_partition.size > 0):\n partitions.append(current_partition)\n self.partitions = partitions\n return self.partitions", "docstring": "Create a new partition scheme. A scheme defines which utterances are in which partition.\nThe scheme only changes after every call if ``self.shuffle == True``.\n\nReturns:\nlist: List of PartitionInfo objects, defining the new partitions (same as ``self.partitions``)", "source": "codesearchnet"} {"code": "def to_api_repr(self):\n resource = {self.entity_type: self.entity_id}\n if (self.role is not None):\n resource['role'] = self.role\n return resource", "docstring": "Construct the API resource representation of this access entry\n\nReturns:\nDict[str, object]: Access entry represented as an API resource", "source": "codesearchnet"} {"code": "def value_from_message(self, message):\n if (not isinstance(message, self.message_type)):\n raise DecodeError(('Expected type %s, got %s: %r' % (self.message_type.__name__, type(message).__name__, message)))\n return message", "docstring": "Convert a message to a value instance.\n\nUsed by deserializers to convert from underlying messages to\nvalue of expected user type.\n\nArgs:\nmessage: A message instance of type self.message_type.\n\nReturns:\nValue of self.message_type.", "source": "codesearchnet"} {"code": "def speech_recognition_bottom(x, model_hparams, vocab_size):\n del vocab_size\n inputs = x\n p = model_hparams\n num_mel_bins = p.audio_num_mel_bins\n num_channels = (3 if p.audio_add_delta_deltas else 1)\n with tf.variable_scope('speech_recognition_modality'):\n if p.audio_preproc_in_bottom:\n with tf.variable_scope('fbanks'):\n waveforms = tf.squeeze(inputs, [2, 3])\n mel_fbanks = common_audio.compute_mel_filterbank_features(waveforms, sample_rate=p.audio_sample_rate, dither=p.audio_dither, preemphasis=p.audio_preemphasis, frame_length=p.audio_frame_length, frame_step=p.audio_frame_step, lower_edge_hertz=p.audio_lower_edge_hertz, upper_edge_hertz=p.audio_upper_edge_hertz, num_mel_bins=p.audio_num_mel_bins, apply_mask=True)\n if p.audio_add_delta_deltas:\n mel_fbanks = common_audio.add_delta_deltas(mel_fbanks)\n x = tf.reshape(mel_fbanks, (common_layers.shape_list(mel_fbanks)[:2] + [num_mel_bins, num_channels]))\n nonpadding_mask = (1.0 - common_attention.embedding_to_padding(x))\n num_of_nonpadding_elements = ((tf.reduce_sum(nonpadding_mask) * num_mel_bins) * num_channels)\n var_epsilon = 1e-09\n mean = (tf.reduce_sum(x, axis=[1], keepdims=True) / num_of_nonpadding_elements)\n variance = ((((num_of_nonpadding_elements * (mean ** 2.0)) - ((2.0 * mean) * tf.reduce_sum(x, axis=[1], keepdims=True))) + tf.reduce_sum((x ** 2), axis=[1], keepdims=True)) / num_of_nonpadding_elements)\n x = (((x - mean) * tf.rsqrt((variance + var_epsilon))) * tf.expand_dims(nonpadding_mask, (- 1)))\n else:\n x = inputs\n x.set_shape([None, None, num_mel_bins, num_channels])\n x = tf.pad(x, [[0, 0], [0, 8], [0, 0], [0, 0]])\n for _ in range(2):\n x = tf.layers.conv2d(x, 128, (3, 3), (2, 2), use_bias=False)\n x = common_layers.layer_norm(x)\n x = tf.nn.relu(x)\n xshape = common_layers.shape_list(x)\n x = tf.pad(x, [[0, 0], [0, 2], [0, 0], [0, 0]])\n x = tf.layers.conv2d(x, p.hidden_size, (3, xshape[2]), use_bias=False)\n assert (common_layers.shape_list(x)[2] == 1)\n x = common_layers.layer_norm(x)\n x = tf.nn.relu(x)\n return x", "docstring": "Use batchnorm instead of CMVN and shorten the stft with strided convs.\n\nArgs:\nx: float32 tensor with shape [batch_size, len, 1, freqs * channels]\nmodel_hparams: HParams, model hyperparmeters.\nvocab_size: int, vocabulary size.\n\nReturns:\nfloat32 tensor with shape [batch_size, shorter_len, 1, hidden_size]", "source": "codesearchnet"} {"code": "def parse_query(self, query, index, stop_current, shuffle):\n if ((index is not None) and (len(self.queue) > 0)):\n if ((index < 0) or (index >= len(self.queue))):\n if (len(self.queue) == 1):\n self.statuslog.error('Play index must be 1 (1 song in queue)')\n return\n else:\n self.statuslog.error('Play index must be between 1 and {}'.format(len(self.queue)))\n return\n try:\n yt_videos = api_music.parse_query(query, self.statuslog)\n if shuffle:\n random.shuffle(yt_videos)\n if (len(yt_videos) == 0):\n self.statuslog.error('No results for: {}'.format(query))\n return\n if (index is None):\n self.queue = (self.queue + yt_videos)\n elif (len(self.queue) > 0):\n self.queue = ((self.queue[:index] + yt_videos) + self.queue[index:])\n else:\n self.queue = yt_videos\n self.update_queue()\n if stop_current:\n if self.streamer:\n self.streamer.stop()\n except Exception as e:\n logger.exception(e)", "docstring": "Parses a query and adds it to the queue\n\nArgs:\nquery (str): Either a search term or a link\nindex (int): The index to enqueue at (None for end)\nstop_current (bool): Whether to stop the current song after the songs are queued\nshuffle (bool): Whether to shuffle the added songs", "source": "codesearchnet"} {"code": "def roulette(weights, n):\n if (n > len(weights)):\n raise Exception(\"Can't choose {} samples from {} items\".format(n, len(weights)))\n if any(map((lambda w: (w <= 0)), weights.values())):\n raise Exception(\"The weight can't be a non-positive number.\")\n items = weights.items()\n chosen = set()\n for i in range(n):\n total = sum(list(zip(*items))[1])\n dice = (random.random() * total)\n running_weight = 0\n chosen_item = None\n for (item, weight) in items:\n if (dice < (running_weight + weight)):\n chosen_item = item\n break\n running_weight += weight\n chosen.add(chosen_item)\n items = [(i, w) for (i, w) in items if (i != chosen_item)]\n return list(chosen)", "docstring": "Choose randomly the given number of items. The probability the item is\nchosen is proportionate to its weight.\n\n.. testsetup::\n\nimport random\nfrom proso.rand import roulette\n\nrandom.seed(1)\n\n.. testcode::\n\nprint(roulette({'cat': 2, 'dog': 1000}, 1))\n\n.. testoutput::\n\n['dog']\n\nArgs:\nweights (dict): item -> weight mapping, non-positive weights are forbidden\nn (int): number of chosen items\n\nReturns:\nlist: randomly chosen items", "source": "codesearchnet"} {"code": "def saml_metadata(self, client_id):\n \n\n return self.get(url='https:\n client_id))", "docstring": "Get SAML2.0 Metadata.\n\nArgs:\nclient_id (str): Client Id of the application to get the SAML metadata for.", "source": "juraj-google-style"} {"code": "def receiveds_parsing(receiveds):\n parsed = []\n receiveds = [re.sub(JUNK_PATTERN, ' ', i).strip() for i in receiveds]\n n = len(receiveds)\n log.debug('Nr. of receiveds. {}'.format(n))\n for (idx, received) in enumerate(receiveds):\n log.debug('Parsing received {}/{}'.format((idx + 1), n))\n log.debug('Try to parse {!r}'.format(received))\n try:\n values_by_clause = parse_received(received)\n except MailParserReceivedParsingError:\n parsed.append({'raw': received})\n else:\n parsed.append(values_by_clause)\n log.debug(('len(receiveds) %s, len(parsed) %s' % (len(receiveds), len(parsed))))\n if (len(receiveds) != len(parsed)):\n log.error(('len(receiveds): %s, len(parsed): %s, receiveds: %s, parsed: %s' % (len(receiveds), len(parsed), receiveds, parsed)))\n return receiveds_not_parsed(receiveds)\n else:\n return receiveds_format(parsed)", "docstring": "This function parses the receiveds headers.\n\nArgs:\nreceiveds (list): list of raw receiveds headers\n\nReturns:\na list of parsed receiveds headers with first hop in first position", "source": "codesearchnet"} {"code": "def _testReduceJoin(self, input_array, truth, truth_shape, axis, keep_dims=False, separator=''):\n with self.cached_session():\n output = string_ops.reduce_join(inputs=input_array, axis=axis, keep_dims=keep_dims, separator=separator)\n output_array = self.evaluate(output)\n self.assertAllEqualUnicode(truth, output_array)\n self.assertAllEqual(truth_shape, output.get_shape())", "docstring": "Compares the output of reduce_join to an expected result.\n\nArgs:\ninput_array: The string input to be joined.\ntruth: An array or np.array of the expected result.\ntruth_shape: An array or np.array of the expected shape.\naxis: The indices to reduce over.\nkeep_dims: Whether or not to retain reduced dimensions.\nseparator: The separator to use for joining.", "source": "github-repos"} {"code": "def plotpsd(data, dt, ndivide=1, window=hanning, overlap_half=False, ax=None, **kwargs):\n \n if ax is None:\n ax = plt.gca()\n vk, psddata = psd(data, dt, ndivide, window, overlap_half)\n ax.loglog(vk, psddata, **kwargs)\n ax.set_xlabel('Frequency [Hz]')\n ax.set_ylabel('PSD')\n ax.legend()", "docstring": "Plot PSD (Power Spectral Density).\n\nArgs:\ndata (np.ndarray): Input data.\ndt (float): Time between each data.\nndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).\noverlap_half (bool): Split data to half-overlapped regions.\nax (matplotlib.axes): Axis the figure is plotted on.\nkwargs (optional): Plot options passed to ax.plot().", "source": "juraj-google-style"} {"code": "def convert_formula_to_atomic_fractions(formula):\n \n mole_fractions = {}\n total_mole_fraction = 0.0\n\n for match in CHEMICAL_FORMULA_PATTERN.finditer(formula):\n symbol, mole_fraction = match.groups()\n\n z = pyxray.element_atomic_number(symbol.strip())\n\n if mole_fraction == '':\n mole_fraction = 1.0\n mole_fraction = float(mole_fraction)\n\n mole_fraction = float(mole_fraction)\n mole_fractions[z] = mole_fraction\n total_mole_fraction += mole_fraction\n\n \n atomic_fractions = {}\n for z, mole_fraction in mole_fractions.items():\n atomic_fractions[z] = mole_fraction / total_mole_fraction\n\n return atomic_fractions", "docstring": "Converts a chemical formula to an atomic fraction :class:`dict`.\n\nArgs:\nformula (str): chemical formula, like Al2O3. No wildcard are accepted.", "source": "juraj-google-style"} {"code": "def _instantiate_data_type(self, data_type_class, data_type_args, loc):\n assert issubclass(data_type_class, DataType), ('Expected stone.data_type.DataType, got %r' % data_type_class)\n argspec = inspect.getargspec(data_type_class.__init__)\n argspec.args.remove('self')\n num_args = len(argspec.args)\n num_defaults = len((argspec.defaults or ()))\n (pos_args, kw_args) = data_type_args\n if ((num_args - num_defaults) > len(pos_args)):\n raise InvalidSpec(('Missing positional argument %s for %s type' % (quote(argspec.args[len(pos_args)]), quote(data_type_class.__name__))), *loc)\n elif ((num_args - num_defaults) < len(pos_args)):\n raise InvalidSpec(('Too many positional arguments for %s type' % quote(data_type_class.__name__)), *loc)\n args = {}\n for (i, key) in enumerate(argspec.args):\n args[key] = (i >= (num_args - num_defaults))\n for key in kw_args:\n if (key not in args):\n raise InvalidSpec(('Unknown argument %s to %s type.' % (quote(key), quote(data_type_class.__name__))), *loc)\n if (not args[key]):\n raise InvalidSpec(('Positional argument %s cannot be specified as a keyword argument.' % quote(key)), *loc)\n del args[key]\n try:\n return data_type_class(*pos_args, **kw_args)\n except ParameterError as e:\n raise InvalidSpec(('Bad argument to %s type: %s' % (quote(data_type_class.__name__), e.args[0])), *loc)", "docstring": "Responsible for instantiating a data type with additional attributes.\nThis method ensures that the specified attributes are valid.\n\nArgs:\ndata_type_class (DataType): The class to instantiate.\ndata_type_attrs (dict): A map from str -> values of attributes.\nThese will be passed into the constructor of data_type_class\nas keyword arguments.\n\nReturns:\nstone.data_type.DataType: A parameterized instance.", "source": "codesearchnet"} {"code": "def _ensure_node_in_anf(self, parent, field, node):\n if node is None:\n return node\n if _is_trivial(node):\n return node\n if isinstance(node, list):\n return [self._ensure_node_in_anf(parent, field, n) for n in node]\n if isinstance(node, gast.keyword):\n node.value = self._ensure_node_in_anf(parent, field, node.value)\n return node\n if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):\n return self._ensure_fields_in_anf(node, parent, field)\n if self._should_transform(parent, field, node):\n return self._do_transform_node(node)\n else:\n return node", "docstring": "Puts `node` in A-normal form, by replacing it with a variable if needed.\n\nThe exact definition of A-normal form is given by the configuration. The\nparent and the incoming field name are only needed because the configuration\nmay be context-dependent.\n\nArgs:\nparent: An AST node, the parent of `node`.\nfield: The field name under which `node` is the child of `parent`.\nnode: An AST node, potentially to be replaced with a variable reference.\n\nReturns:\nnode: An AST node; the argument if transformation was not necessary,\nor the new variable reference if it was.", "source": "github-repos"} {"code": "def register(self, alias, service_class, configs=None, start_service=True):\n \n if not inspect.isclass(service_class):\n raise Error(self._device, '\"%s\" is not a class!' % service_class)\n if not issubclass(service_class, base_service.BaseService):\n raise Error(\n self._device,\n 'Class %s is not a subclass of BaseService!' % service_class)\n if alias in self._service_objects:\n raise Error(\n self._device,\n 'A service is already registered with alias \"%s\".' % alias)\n service_obj = service_class(self._device, configs)\n if start_service:\n service_obj.start()\n self._service_objects[alias] = service_obj", "docstring": "Registers a service.\n\nThis will create a service instance, starts the service, and adds the\ninstance to the mananger.\n\nArgs:\nalias: string, the alias for this instance.\nservice_class: class, the service class to instantiate.\nconfigs: (optional) config object to pass to the service class's\nconstructor.\nstart_service: bool, whether to start the service instance or not.\nDefault is True.", "source": "juraj-google-style"} {"code": "def _dump_file_name_to_datum(self, dir_name, file_name):\n debug_dump_rel_path = os.path.join(os.path.relpath(dir_name, self._dump_root), file_name)\n return DebugTensorDatum(self._dump_root, debug_dump_rel_path)", "docstring": "Obtain a DebugTensorDatum from the directory and file name.\n\nArgs:\ndir_name: (`str`) Name of the directory in which the dump file resides.\nfile_name: (`str`) Base name of the dump file.\n\nReturns:\n(`DebugTensorDatum`) The `DebugTensorDatum` loaded from the dump file.", "source": "github-repos"} {"code": "def _ParseAnalysisPluginOptions(self, options):\n analysis_plugin_info = self._analysis_manager.GetAllPluginInformation()\n analysis_plugin_names = {name.lower() for (name, _, _) in analysis_plugin_info}\n analysis_plugins = self.ParseStringOption(options, 'analysis_plugins')\n if (not analysis_plugins):\n return\n requested_plugin_names = {name.strip().lower() for name in analysis_plugins.split(',')}\n difference = requested_plugin_names.difference(analysis_plugin_names)\n if difference:\n raise errors.BadConfigOption('Non-existent analysis plugins specified: {0:s}'.format(' '.join(difference)))\n self._analysis_plugins = self._GetAnalysisPlugins(analysis_plugins)\n for analysis_plugin in self._analysis_plugins:\n helpers_manager.ArgumentHelperManager.ParseOptions(options, analysis_plugin)", "docstring": "Parses the analysis plugin options.\n\nArgs:\noptions (argparse.Namespace): command line arguments.", "source": "codesearchnet"} {"code": "def normalize(self, mode=\"max\", value=1):\n \n if mode.lower() == \"sum\":\n factor = np.sum(self.y, axis=0)\n elif mode.lower() == \"max\":\n factor = np.max(self.y, axis=0)\n else:\n raise ValueError(\"Unsupported normalization mode %s!\" % mode)\n\n self.y /= factor / value", "docstring": "Normalize the spectrum with respect to the sum of intensity\n\nArgs:\nmode (str): Normalization mode. Supported modes are \"max\" (set the\nmax y value to value, e.g., in XRD patterns), \"sum\" (set the\nsum of y to a value, i.e., like a probability density).\nvalue (float): Value to normalize to. Defaults to 1.", "source": "juraj-google-style"} {"code": "def _launch_flow(self, client, name, args):\n flow = self._check_approval_wrapper(client, client.CreateFlow, name=name, args=args)\n flow_id = flow.flow_id\n print('{0:s}: Scheduled'.format(flow_id))\n if self.keepalive:\n keepalive_flow = client.CreateFlow(name='KeepAlive', args=flows_pb2.KeepAliveArgs())\n print('KeepAlive Flow:{0:s} scheduled'.format(keepalive_flow.flow_id))\n return flow_id", "docstring": "Create specified flow, setting KeepAlive if requested.\n\nArgs:\nclient: GRR Client object on which to launch the flow.\nname: string containing flow name.\nargs: proto (*FlowArgs) for type of flow, as defined in GRR flow proto.\n\nReturns:\nstring containing ID of launched flow", "source": "codesearchnet"} {"code": "def line_iter(xo: int, yo: int, xd: int, yd: int) -> Iterator[Tuple[(int, int)]]:\n data = ffi.new('TCOD_bresenham_data_t *')\n lib.TCOD_line_init_mt(xo, yo, xd, yd, data)\n x = ffi.new('int *')\n y = ffi.new('int *')\n (yield (xo, yo))\n while (not lib.TCOD_line_step_mt(x, y, data)):\n (yield (x[0], y[0]))", "docstring": "returns an Iterable\n\nThis Iterable does not include the origin point.\n\nArgs:\nxo (int): X starting point.\nyo (int): Y starting point.\nxd (int): X destination point.\nyd (int): Y destination point.\n\nReturns:\nIterable[Tuple[int,int]]: An Iterable of (x,y) points.", "source": "codesearchnet"} {"code": "def load_map_info(self, map_file):\n if (self.ensemble_name.upper() == 'SSEF'):\n (proj_dict, grid_dict) = read_arps_map_file(map_file)\n self.dx = int(grid_dict['dx'])\n mapping_data = make_proj_grids(proj_dict, grid_dict)\n for (m, v) in mapping_data.items():\n setattr(self, m, v)\n (self.i, self.j) = np.indices(self.lon.shape)\n self.proj = get_proj_obj(proj_dict)\n elif (self.ensemble_name.upper() in ['NCAR', 'NCARSTORM', 'HRRR', 'VSE', 'HREFV2']):\n (proj_dict, grid_dict) = read_ncar_map_file(map_file)\n if (self.member_name[0:7] == '1km_pbl'):\n grid_dict['dx'] = 1000\n grid_dict['dy'] = 1000\n grid_dict['sw_lon'] = 258.697\n grid_dict['sw_lat'] = 23.999\n grid_dict['ne_lon'] = 282.868269206236\n grid_dict['ne_lat'] = 36.4822338520542\n self.dx = int(grid_dict['dx'])\n mapping_data = make_proj_grids(proj_dict, grid_dict)\n for (m, v) in mapping_data.items():\n setattr(self, m, v)\n (self.i, self.j) = np.indices(self.lon.shape)\n self.proj = get_proj_obj(proj_dict)", "docstring": "Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection.\n\nArgs:\nmap_file: File specifying the projection information.", "source": "codesearchnet"} {"code": "def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n input_shape = shape_list(inputs_embeds)[:-1]\n sequence_length = input_shape[1]\n position_ids = tf.range(start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64)\n return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape)", "docstring": "We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\nArgs:\ninputs_embeds: tf.Tensor\n\nReturns: tf.Tensor", "source": "github-repos"} {"code": "def get_mask2former_resize_output_image_size(image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int]=None, size_divisor: int=0, default_to_square: bool=True, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:\n output_size = get_resize_output_image_size(input_image=image, size=size, default_to_square=default_to_square, max_size=max_size, input_data_format=input_data_format)\n if size_divisor > 0:\n height, width = output_size\n height = int(math.ceil(height / size_divisor) * size_divisor)\n width = int(math.ceil(width / size_divisor) * size_divisor)\n output_size = (height, width)\n return output_size", "docstring": "Computes the output size given the desired size.\n\nArgs:\nimage (`np.ndarray`):\nThe input image.\nsize (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`):\nThe size of the output image.\nmax_size (`int`, *optional*):\nThe maximum size of the output image.\nsize_divisor (`int`, *optional*, defaults to 0):\nIf `size_divisor` is given, the output image size will be divisible by the number.\ndefault_to_square (`bool`, *optional*, defaults to `True`):\nWhether to default to square if no size is provided.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If unset, will use the inferred format from the input.\n\nReturns:\n`Tuple[int, int]`: The output size.", "source": "github-repos"} {"code": "def keywords_special_characters(keywords):\n invalid_chars = '!\"\n if any(((char in invalid_chars) for char in keywords)):\n raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS)", "docstring": "Confirms that the keywords don't contain special characters\n\nArgs:\nkeywords (str)\n\nRaises:\ndjango.forms.ValidationError", "source": "codesearchnet"} {"code": "def enable_type_check(enabled: bool=True) -> ContextManager[None]:\n return thread_local.thread_local_value_scope(_TLS_ENABLE_TYPE_CHECK, enabled, True)", "docstring": "Returns a context manager to enable or disable runtime type check.\n\n`enable_type_check` is thread-safe and can be nested. For example,\nin the following code, runtime type check with be `a` but not on `b`::\n\nwith pg.enable_type_check(False):\nwith pg.enable_type_check(True):\na = pg.Dict(x=1, value_spec=pg.typing.Dict([('x', pg.typing.Int())]))\nb = pg.Dict(y=1, value_spec=pg.typing.Dict([('x', pg.typing.Int())]))\n\n\nArgs:\nenabled: If True, enable runtime type check in current scope.\nOtherwise, disable runtime type check.\n\nReturns:\nA context manager for allowing/disallowing runtime type check.", "source": "github-repos"} {"code": "def set_column_count(self, count):\n current_row_count = self.row_count()\n current_column_count = self.column_count()\n if (count > current_column_count):\n cl = (TableEditableItem if self._editable else TableItem)\n for r_key in self.children.keys():\n row = self.children[r_key]\n for i in range(current_column_count, count):\n row.append(cl(), str(i))\n if self._editable:\n row.children[str(i)].onchange.connect(self.on_item_changed, int(r_key), int(i))\n self._update_first_row()\n elif (count < current_column_count):\n for row in self.children.values():\n for i in range(count, current_column_count):\n row.remove_child(row.children[str(i)])\n self._column_count = count", "docstring": "Sets the table column count.\n\nArgs:\ncount (int): column of rows", "source": "codesearchnet"} {"code": "def cache_connect(database=None):\n \n if database is None:\n database = cache_file()\n\n if os.path.isfile(database):\n \n conn = sqlite3.connect(database)\n else:\n \n conn = sqlite3.connect(database)\n conn.executescript(schema)\n\n with conn as cur:\n \n cur.execute(\"PRAGMA foreign_keys = ON;\")\n\n conn.row_factory = sqlite3.Row\n\n return conn", "docstring": "Returns a connection object to a sqlite database.\n\nArgs:\ndatabase (str, optional): The path to the database the user wishes\nto connect to. If not specified, a default is chosen using\n:func:`.cache_file`. If the special database name ':memory:'\nis given, then a temporary database is created in memory.\n\nReturns:\n:class:`sqlite3.Connection`", "source": "juraj-google-style"} {"code": "def parse_rdf_payload(self, data, headers):\n\n\t\t\n\n\t\t\n\t\tif headers['Content-Type'].startswith('text/plain'):\n\t\t\tlogger.debug('text/plain Content-Type detected, using application/n-triples for parser')\n\t\t\tparse_format = 'application/n-triples'\n\t\telse:\n\t\t\tparse_format = headers['Content-Type']\n\n\t\t\n\t\tif ';charset' in parse_format:\n\t\t\tparse_format = parse_format.split(';')[0]\n\n\t\t\n\t\tgraph = rdflib.Graph().parse(\n\t\t\tdata=data.decode('utf-8'),\n\t\t\tformat=parse_format)\n\n\t\t\n\t\treturn graph", "docstring": "small function to parse RDF payloads from various repository endpoints\n\nArgs:\ndata (response.data): data from requests response\nheaders (response.headers): headers from requests response\n\nReturns:\n(rdflib.Graph): parsed graph", "source": "juraj-google-style"} {"code": "def vc2tex(script, tex_name='TEMP3D_texture.png', tex_width=1024, tex_height=1024, overwrite_tex=False, assign_tex=False, fill_tex=True):\n filter_xml = ''.join([' \\n', ' \\n', ' \\n', ' \\n', ' \\n', ' \\n', ' \\n', ' \\n'])\n util.write_filter(script, filter_xml)\n return None", "docstring": "Transfer vertex colors to texture colors\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\ntex_name (str): The texture file to be created\ntex_width (int): The texture width\ntex_height (int): The texture height\noverwrite_tex (bool): If current mesh has a texture will be overwritten (with provided texture dimension)\nassign_tex (bool): Assign the newly created texture\nfill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black", "source": "codesearchnet"} {"code": "def create_and_register_access_db(filename: str,\n dsn: str,\n description: str) -> bool:\n \n fullfilename = os.path.abspath(filename)\n create_string = fullfilename + \" General\"\n \n return (create_user_dsn(access_driver, CREATE_DB=create_string) and\n register_access_db(filename, dsn, description))", "docstring": "(Windows only.)\nCreates a Microsoft Access database and registers it with ODBC.\n\nArgs:\nfilename: filename of the database to create\ndsn: ODBC data source name to create\ndescription: description of the database\n\nReturns:\nbool: was the DSN created?", "source": "juraj-google-style"} {"code": "def add_alias(self, alias, index):\n \n if index >= len(self._datasets):\n raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n self._aliases[alias] = index", "docstring": "Add an alias pointing to the specified index.\n\nArgs:\nalias (str): The alias that should point to the given index.\nindex (int): The index of the dataset for which an alias should be added.\n\nRaises:\nDataInvalidIndex: If the index does not represent a valid dataset.", "source": "juraj-google-style"} {"code": "def channel_interpolate(layer1, n_channel1, layer2, n_channel2):\n \n def inner(T):\n batch_n = T(layer1).get_shape().as_list()[0]\n arr1 = T(layer1)[..., n_channel1]\n arr2 = T(layer2)[..., n_channel2]\n weights = (np.arange(batch_n)/float(batch_n-1))\n S = 0\n for n in range(batch_n):\n S += (1-weights[n]) * tf.reduce_mean(arr1[n])\n S += weights[n] * tf.reduce_mean(arr2[n])\n return S\n return inner", "docstring": "Interpolate between layer1, n_channel1 and layer2, n_channel2.\n\nOptimize for a convex combination of layer1, n_channel1 and\nlayer2, n_channel2, transitioning across the batch.\n\nArgs:\nlayer1: layer to optimize 100% at batch=0.\nn_channel1: neuron index to optimize 100% at batch=0.\nlayer2: layer to optimize 100% at batch=N.\nn_channel2: neuron index to optimize 100% at batch=N.\n\nReturns:\nObjective", "source": "juraj-google-style"} {"code": "def __init__(self, config, verbose=True):\n \n self.best_model_found = None\n self.best_iteration = None\n self.best_score = None\n self.verbose = verbose\n\n self.checkpoint_best = config[\"checkpoint_best\"]\n self.checkpoint_every = config[\"checkpoint_every\"]\n self.checkpoint_metric = config[\"checkpoint_metric\"]\n self.checkpoint_metric_mode = config[\"checkpoint_metric_mode\"]\n self.checkpoint_dir = config[\"checkpoint_dir\"]\n self.checkpoint_runway = config[\"checkpoint_runway\"]\n\n \n if \"/\" not in self.checkpoint_metric:\n self.checkpoint_metric = \"valid/\" + self.checkpoint_metric\n\n \n if not os.path.exists(self.checkpoint_dir):\n os.makedirs(self.checkpoint_dir)\n\n \n if self.checkpoint_runway and verbose:\n print(\n f\"No checkpoints will be saved in the first \"\n f\"checkpoint_runway={self.checkpoint_runway} iterations.\"\n )", "docstring": "Saves checkpoints as applicable based on a reported metric.\n\nArgs:\ncheckpoint_runway (int): don't save any checkpoints for the first\nthis many iterations\ncheckpoint_dir (str): the directory for saving checkpoints", "source": "juraj-google-style"} {"code": "def _ComputeUniquifier(self, debuggee):\n \n uniquifier = hashlib.sha1()\n\n \n \n if ('minorversion' not in debuggee.get('labels', []) and\n 'sourceContexts' not in debuggee):\n uniquifier_computer.ComputeApplicationUniquifier(uniquifier)\n\n return uniquifier.hexdigest()", "docstring": "Computes debuggee uniquifier.\n\nThe debuggee uniquifier has to be identical on all instances. Therefore the\nuniquifier should not include any random numbers and should only be based\non inputs that are guaranteed to be the same on all instances.\n\nArgs:\ndebuggee: complete debuggee message without the uniquifier\n\nReturns:\nHex string of SHA1 hash of project information, debuggee labels and\ndebuglet version.", "source": "juraj-google-style"} {"code": "def _rpc(self, method, *args):\n with self._lock:\n apiid = next(self._counter)\n data = {'id': apiid, 'method': method, 'params': args}\n request = json.dumps(data)\n self._client_send(request)\n response = self._client_receive()\n if not response:\n raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_SERVER)\n result = json.loads(str(response, encoding='utf8'))\n if result['error']:\n raise ApiError(self._ad, result['error'])\n if result['id'] != apiid:\n raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)\n if result.get('callback') is not None:\n if self._event_client is None:\n self._event_client = self._start_event_client()\n return callback_handler.CallbackHandler(callback_id=result['callback'], event_client=self._event_client, ret_value=result['result'], method_name=method, ad=self._ad)\n return result['result']", "docstring": "Sends an rpc to the app.\n\nArgs:\nmethod: str, The name of the method to execute.\nargs: any, The args of the method.\n\nReturns:\nThe result of the rpc.\n\nRaises:\nProtocolError: Something went wrong with the protocol.\nApiError: The rpc went through, however executed with errors.", "source": "github-repos"} {"code": "def cumany(series):\n \n\n anys = series.expanding().apply(np.any).astype(bool)\n return anys", "docstring": "Calculates cumulative any of values. Equivalent to\n`series.expanding().apply(np.any).astype(bool)`.\n\nArgs:\nseries: column to compute cumulative any for.", "source": "juraj-google-style"} {"code": "def get_description(self, description_type=DescriptionTypeEnum.FULL):\n try:\n if (self._parsed is False):\n parser = ExpressionParser(self._expression, self._options)\n self._expression_parts = parser.parse()\n self._parsed = True\n choices = {DescriptionTypeEnum.FULL: self.get_full_description, DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description, DescriptionTypeEnum.HOURS: self.get_hours_description, DescriptionTypeEnum.MINUTES: self.get_minutes_description, DescriptionTypeEnum.SECONDS: self.get_seconds_description, DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description, DescriptionTypeEnum.MONTH: self.get_month_description, DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description, DescriptionTypeEnum.YEAR: self.get_year_description}\n description = choices.get(description_type, self.get_seconds_description)()\n except Exception as ex:\n if self._options.throw_exception_on_parse_error:\n raise\n else:\n description = str(ex)\n return description", "docstring": "Generates a human readable string for the Cron Expression\n\nArgs:\ndescription_type: Which part(s) of the expression to describe\nReturns:\nThe cron expression description\nRaises:\nException: if throw_exception_on_parse_error is True", "source": "codesearchnet"} {"code": "def batch_norm(x, is_training, momentum, epsilon=1e-9,\n init_zero=False, name=None):\n \n with tf.variable_scope(name, default_name=\"batch_norm\", values=[x]):\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n norm_dim = x.shape.dims[0:3]\n reduced_shape = x.shape - norm_dim\n\n scale = mtf.get_variable(\n x.mesh,\n \"batch_norm_scale\",\n reduced_shape,\n initializer=gamma_initializer,\n activation_dtype=x.dtype)\n bias = mtf.get_variable(\n x.mesh,\n \"batch_norm_bias\",\n reduced_shape,\n initializer=tf.zeros_initializer(),\n activation_dtype=x.dtype)\n\n moving_mean = mtf.get_variable(\n x.mesh, \"moving_mean\", reduced_shape,\n initializer=tf.random_normal_initializer(stddev=1.0),\n activation_dtype=x.dtype,\n trainable=False)\n moving_variance = mtf.get_variable(\n x.mesh, \"moving_variance\",\n reduced_shape, initializer=tf.ones_initializer(),\n activation_dtype=x.dtype,\n trainable=False)\n\n \n \n if is_training:\n mean = mtf.reduce_mean(x, output_shape=reduced_shape)\n variance = mtf.reduce_mean(\n mtf.square(x - mean), output_shape=reduced_shape)\n\n norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)\n\n \n moving_mean = mtf.assign(\n moving_mean, momentum * moving_mean + (1-momentum) * mean)\n moving_variance = mtf.assign(\n moving_variance,\n momentum * moving_variance + (1 - momentum) * variance)\n else:\n \n norm_x = (x - moving_mean) * mtf.rsqrt(moving_variance + epsilon)\n\n return (norm_x * scale) + bias", "docstring": "Batch normalization.\n\nArgs:\nx: a mtf.Tensor whose shape contains [batch_dim, ..., dim]\nis_training: a boolean, whether mode is training.\nmomentum: a floating point number, specifying batch norm decay value.\nepsilon: a floating point number.\ninit_zero: a boolean, whether to initialize scale with 0's or 1's.\nname: a string. variable scope.\n\nReturns:\na mtf.Tensor with same shape as x.", "source": "juraj-google-style"} {"code": "def split(self):\n if (self.package_request.conflict or (len(self.variant_slice) == 1)):\n return None\n else:\n r = self.variant_slice.split()\n if (r is None):\n return None\n else:\n (slice, next_slice) = r\n scope = self._copy(slice)\n next_scope = self._copy(next_slice)\n return (scope, next_scope)", "docstring": "Split the scope.\n\nReturns:\nA (_PackageScope, _PackageScope) tuple, where the first scope is\nguaranteed to have a common dependency. Or None, if splitting is\nnot applicable to this scope.", "source": "codesearchnet"} {"code": "def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):\n batch_size, mask_seq_len = hidden_states.shape[:2]\n indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)\n bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)\n mask = hidden_states.new_ones((batch_size, mask_seq_len))\n mask = mask.masked_fill(bool_mask, 0)\n return mask", "docstring": "Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that\nstops at the corresponding element in `seq_lens`.\n\nArgs:\nhidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):\nThe sequences to mask, where `*` is any number of sequence-specific dimensions including none.\nseq_lens (`torch.Tensor` of shape `(batch)`:\nEach element represents the length of the sequence at the same index in `hidden_states`\n\nReturns:\n`torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`", "source": "github-repos"} {"code": "def _parse_value(self, value):\n if isinstance(value, str):\n if value.upper() == 'TRUE':\n return True\n elif value.upper() == 'FALSE':\n return False\n else:\n int_value = self._convert_int(value)\n if int_value:\n return int_value\n else:\n float_value = self._convert_float(value)\n if float_value:\n return float_value\n else:\n date_value = self._convert_date(value)\n if date_value:\n return date_value\n else:\n return value\n else:\n return value", "docstring": "Parses a string value into a type specific value infering the correct type based on the data.\n\nArgs:\nvalue: The value to parse.\n\nReturns:\nThe representation of the value in the correct data type.", "source": "github-repos"} {"code": "def plot_lattice_vectors(lattice, ax=None, **kwargs):\n (ax, fig, plt) = get_ax3d_fig_plt(ax)\n if ('color' not in kwargs):\n kwargs['color'] = 'g'\n if ('linewidth' not in kwargs):\n kwargs['linewidth'] = 3\n vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0])\n vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0])\n ax.plot(*zip(vertex1, vertex2), **kwargs)\n vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0])\n ax.plot(*zip(vertex1, vertex2), **kwargs)\n vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0])\n ax.plot(*zip(vertex1, vertex2), **kwargs)\n return (fig, ax)", "docstring": "Adds the basis vectors of the lattice provided to a matplotlib Axes\n\nArgs:\nlattice: Lattice object\nax: matplotlib :class:`Axes` or None if a new figure should be created.\nkwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to green\nand linewidth to 3.\n\nReturns:\nmatplotlib figure and matplotlib ax", "source": "codesearchnet"} {"code": "def reload_workspace(source: g3_utils.Source | None=None, *, restrict: py_utils.StrOrStrList | None=None, verbose: bool=False, reload_mode: inplace_reload.ReloadMode | str=inplace_reload.ReloadMode.INVALIDATE) -> None:\n from etils import g3_utils\n from etils.ecolab import adhoc_imports\n citc_info = g3_utils.citc_info_from_source(source)\n if isinstance(citc_info, g3_utils.PendingCl):\n citc_info = citc_info.workspace\n restrict = py_utils.normalize_str_to_list(restrict)\n if not isinstance(citc_info, g3_utils.Workspace):\n if (prev_modules := _find_modules_imported_in_different_source(source)):\n prev_module_name = next(iter(prev_modules))\n prev_source = sys.modules[prev_module_name]._etils_workspace_reload_source\n raise ValueError(f'Source was changed from {prev_source!r} to {source!r}. `reload_workspace=True` cannot auto-infer which modules to reload. Please restart your kernel.')\n else:\n return\n modules_to_reload = _get_modules_to_reload(restrict=restrict, citc_info=citc_info)\n if not modules_to_reload:\n return\n with adhoc_imports.adhoc(source, reload=modules_to_reload, restrict=restrict, restrict_reload=False, reload_recursive=False, reload_mode=reload_mode, verbose=verbose, collapse_prefix=f'Reload workspace ({len(modules_to_reload)} modules): '):\n for module_name in modules_to_reload:\n module = importlib.import_module(module_name)\n module._etils_workspace_reload_source = source\n update_global_namespace(reload=modules_to_reload, verbose=verbose)", "docstring": "Reload all modified files in the current workspace.\n\nThis function look at all edited files in the given workspace which are also\nimported and reload them.\n\nThis is a no-op if the source is not a user workspace.\n\nArgs:\nsource: Same as `ecolab.adhoc`\nrestrict: Same as `ecolab.adhoc`\nverbose: Same as `ecolab.adhoc`\nreload_mode: Same as `ecolab.adhoc`", "source": "github-repos"} {"code": "def get_orbit(name, date):\n if (name not in [x.name for x in Bsp().top.list]):\n raise UnknownBodyError(name)\n for (a, b) in Bsp().top.steps(name):\n if (b.name not in _propagator_cache):\n propagator = type(('%sBspPropagator' % b.name), (GenericBspPropagator,), {'src': a, 'dst': b})\n center = Pck()[b.full_name.title()]\n propagator.propagate(date).as_frame(b.name, center=center)\n _propagator_cache[b.name] = propagator\n if (Bsp().top not in _propagator_cache):\n _propagator_cache[Bsp().top.name] = EarthPropagator()\n return _propagator_cache[name].propagate(date)", "docstring": "Retrieve the orbit of a solar system object\n\nArgs:\nname (str): The name of the body desired. For exact nomenclature, see\n:py:func:`available_planets`\ndate (Date): Date at which the state vector will be extracted\nReturn:\nOrbit: Orbit of the desired object, in the reference frame in which it is declared in\nthe .bsp file", "source": "codesearchnet"} {"code": "def get_structural_variant(self, variant):\n query = {'chrom': variant['chrom'], 'end_chrom': variant['end_chrom'], 'sv_type': variant['sv_type'], '$and': [{'pos_left': {'$lte': variant['pos']}}, {'pos_right': {'$gte': variant['pos']}}]}\n res = self.db.structural_variant.find(query).sort('pos_left', 1)\n match = None\n distance = None\n closest_hit = None\n for hit in res:\n if (hit['end_left'] > variant['end']):\n continue\n if (hit['end_right'] < variant['end']):\n continue\n distance = (abs((variant['pos'] - ((hit['pos_left'] + hit['pos_right']) / 2))) + abs((variant['end'] - ((hit['end_left'] + hit['end_right']) / 2))))\n if (closest_hit is None):\n match = hit\n closest_hit = distance\n continue\n if (distance < closest_hit):\n match = hit\n closest_hit = distance\n return match", "docstring": "Check if there are any overlapping sv clusters\n\nSearch the sv variants with chrom start end_chrom end and sv_type\n\nArgs:\nvariant (dict): A variant dictionary\n\nReturns:\nvariant (dict): A variant dictionary", "source": "codesearchnet"} {"code": "def post_path(self, path: str, path_data: Union[dict, None], post_data: Any) -> dict:\n \n path = self._insert_vars(path, path_data or {})\n path = self.BASE_URL + path\n self._try_refresh_access_token()\n return self.session.post(path, json=post_data).json()", "docstring": "Modifies the ESI by an endpoint URL.\n\nThis method is not marked \"private\" as it _can_ be used\nby consuming code, but it's probably easier to call the\n`get_op` method instead.\n\nArgs:\npath: raw ESI URL path\npath_data: data to format the path with (can be None)\npost_data: data to send to ESI\n\nReturns:\nESI data", "source": "juraj-google-style"} {"code": "def remove(self, block_id):\n \n with self._mutex:\n entry = self._block_map[block_id]\n self._queue.remove(entry)", "docstring": "Remove a Processing Block from the queue.\n\nArgs:\nblock_id (str):", "source": "juraj-google-style"} {"code": "def initialize(self):\n \n self.fonttype = self.font_types['bitmap']\n self.send(chr(27)+chr(64))", "docstring": "Calling this function initializes the printer.\n\nArgs:\nNone\nReturns:\nNone\nRaises:\nNone", "source": "juraj-google-style"} {"code": "def get_decomposition(self, comp):\n \n facet, simplex = self._get_facet_and_simplex(comp)\n decomp_amts = simplex.bary_coords(self.pd_coords(comp))\n return {self.qhull_entries[f]: amt\n for f, amt in zip(facet, decomp_amts)\n if abs(amt) > PhaseDiagram.numerical_tol}", "docstring": "Provides the decomposition at a particular composition.\n\nArgs:\ncomp: A composition\n\nReturns:\nDecomposition as a dict of {Entry: amount}", "source": "juraj-google-style"} {"code": "def _dqdv_combinded_frame(cell, **kwargs):\n cycles = cell.get_cap(method='forth-and-forth', categorical_column=True, label_cycle_number=True)\n ica_df = dqdv_cycles(cycles, **kwargs)\n assert isinstance(ica_df, pd.DataFrame)\n return ica_df", "docstring": "Returns full cycle dqdv data for all cycles as one pd.DataFrame.\n\nArgs:\ncell: CellpyData-object\n\nReturns:\npandas.DataFrame with the following columns:\ncycle: cycle number\nvoltage: voltage\ndq: the incremental capacity", "source": "codesearchnet"} {"code": "def _prepare_run_watch_config(self, fetches, feed_dict):\n debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)\n if self._watch_fn is None:\n watch_options = WatchOptions()\n else:\n watch_options = self._watch_fn(fetches, feed_dict)\n if isinstance(watch_options, tuple):\n watch_options = WatchOptions(*watch_options)\n return (debug_urls, watch_options)", "docstring": "Get the debug_urls, and node/op allowlists for the current run() call.\n\nArgs:\nfetches: Same as the `fetches` argument to `Session.run()`.\nfeed_dict: Same as the `feed_dict argument` to `Session.run()`.\n\nReturns:\ndebug_urls: (str or list of str) Debug URLs for the current run() call.\nCurrently, the list consists of only one URL that is a file:// URL.\nwatch_options: (WatchOptions) The return value of a watch_fn, containing\noptions including debug_ops, and allowlists.", "source": "github-repos"} {"code": "def _map_condition(self, wire_map, condition):\n \n if condition is None:\n new_condition = None\n else:\n \n \n \n bit0 = (condition[0], 0)\n new_condition = (wire_map.get(bit0, bit0)[0], condition[1])\n return new_condition", "docstring": "Use the wire_map dict to change the condition tuple's creg name.\n\nArgs:\nwire_map (dict): a map from wires to wires\ncondition (tuple): (ClassicalRegister,int)\nReturns:\ntuple(ClassicalRegister,int): new condition", "source": "juraj-google-style"} {"code": "def in_cache(self, objpath, metahash):\n try:\n self.path_in_cache(objpath, metahash)\n return True\n except CacheMiss:\n return False", "docstring": "Returns true if object is cached.\n\nArgs:\nobjpath: Filename relative to buildroot.\nmetahash: hash object", "source": "codesearchnet"} {"code": "def stop_capture(self):\n raise NotImplementedError('Base class should not be called directly!')", "docstring": "This function stops a capture and guarantees that the capture is\nsaved to the capture file configured during the start_capture() method.\nDepending on the type of the sniffer the file may previously contain\npartial results (e.g. for a local sniffer) or may not exist until the\nstop_capture() method is executed (e.g. for a remote sniffer).\n\nDepending on the type/subtype and configuration of the sniffer the\ncapture may terminate on its own without requiring a call to this\nfunction. In such a case it is still necessary to call either this\nfunction or the wait_for_capture() function to make sure that the\ncapture file is moved to the correct location.\n\nRaises:\nNoPermissionError: No permission when trying to stop a capture\nand save the capture file.", "source": "github-repos"} {"code": "def add_space(self, line):\n \n if not isinstance(self.last_item, Space):\n space = Space(self._structure)\n self._structure.append(space)\n self.last_item.add_line(line)\n return self", "docstring": "Add a Space object to the section\n\nUsed during initial parsing mainly\n\nArgs:\nline (str): one line that defines the space, maybe whitespaces", "source": "juraj-google-style"} {"code": "def get_or_create_table(self, project_id, dataset_id, table_id, schema, create_disposition, write_disposition, additional_create_parameters=None):\n from apache_beam.io.gcp.bigquery import BigQueryDisposition\n found_table = None\n try:\n found_table = self.get_table(project_id, dataset_id, table_id)\n except HttpError as exn:\n if exn.status_code == 404:\n if create_disposition == BigQueryDisposition.CREATE_NEVER:\n raise RuntimeError('Table %s:%s.%s not found but create disposition is CREATE_NEVER.' % (project_id, dataset_id, table_id))\n else:\n raise\n if found_table and write_disposition in (BigQueryDisposition.WRITE_EMPTY, BigQueryDisposition.WRITE_TRUNCATE):\n if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:\n self._delete_table(project_id, dataset_id, table_id)\n elif write_disposition == BigQueryDisposition.WRITE_EMPTY and (not self._is_table_empty(project_id, dataset_id, table_id)):\n raise RuntimeError('Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.' % (project_id, dataset_id, table_id))\n if schema is None and found_table is None:\n raise RuntimeError('Table %s:%s.%s requires a schema. None can be inferred because the table does not exist.' % (project_id, dataset_id, table_id))\n if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:\n return found_table\n else:\n created_table = None\n try:\n created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema, additional_parameters=additional_create_parameters)\n except HttpError as exn:\n if exn.status_code == 409:\n _LOGGER.debug('Skipping Creation. Table %s:%s.%s already exists.' % (project_id, dataset_id, table_id))\n created_table = self.get_table(project_id, dataset_id, table_id)\n else:\n raise\n _LOGGER.info('Created table %s.%s.%s with schema %s. Result: %s.', project_id, dataset_id, table_id, schema or found_table.schema, created_table)\n if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:\n _LOGGER.warning('Sleeping for 150 seconds before the write as ' + 'BigQuery inserts can be routed to deleted table ' + 'for 2 mins after the delete and create.')\n time.sleep(150)\n return created_table\n else:\n return created_table", "docstring": "Gets or creates a table based on create and write dispositions.\n\nThe function mimics the behavior of BigQuery import jobs when using the\nsame create and write dispositions.\n\nArgs:\nproject_id: The project id owning the table.\ndataset_id: The dataset id owning the table.\ntable_id: The table id.\nschema: A bigquery.TableSchema instance or None.\ncreate_disposition: CREATE_NEVER or CREATE_IF_NEEDED.\nwrite_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.\n\nReturns:\nA bigquery.Table instance if table was found or created.\n\nRaises:\n`RuntimeError`: For various mismatches between the state of the table and\nthe create/write dispositions passed in. For example if the table is not\nempty and WRITE_EMPTY was specified then an error will be raised since\nthe table was expected to be empty.", "source": "github-repos"} {"code": "def _add_doc_value(self, field_name: str, jsonpath: str) -> None:\n path = self.origin_doc.etk.parse_json_path(jsonpath)\n matches = path.find(self.origin_doc.value)\n all_valid = True\n invalid = []\n for a_match in matches:\n if a_match.value:\n valid = self._add_value(field_name, a_match.value, provenance_path=str(a_match.full_path))\n if (not valid):\n invalid.append(((field_name + ':') + str(a_match.value)))\n all_valid = (all_valid and valid)\n if (not all_valid):\n raise KgValueError(('Some kg value type invalid according to schema: ' + json.dumps(invalid)))", "docstring": "Add a value to knowledge graph by giving a jsonpath\n\nArgs:\nfield_name: str\njsonpath: str\n\nReturns:", "source": "codesearchnet"} {"code": "def HashFile(self, fd, byte_count):\n \n while byte_count > 0:\n buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE)\n buf = fd.read(buf_size)\n if not buf:\n break\n\n self.HashBuffer(buf)\n byte_count -= buf_size", "docstring": "Updates underlying hashers with a given file.\n\nArgs:\nfd: A file object that is going to be fed to the hashers.\nbyte_count: A maximum number of bytes that are going to be processed.", "source": "juraj-google-style"} {"code": "def op_and(self, *elements):\n \n expression = self.add_operator(Operator(';'))\n for element in elements:\n expression.add_element(element)\n return expression", "docstring": "Update the ``Expression`` by joining the specified additional\n``elements`` using an \"AND\" ``Operator``\n\nArgs:\n*elements (BaseExpression): The ``Expression`` and/or\n``Constraint`` elements which the \"AND\" ``Operator`` applies\nto.\n\nReturns:\nExpression: ``self`` or related ``Expression``.", "source": "juraj-google-style"} {"code": "def add_dataset(self, task_name, dataset=None, *, aliases=None):\n self._datasets.append((dataset if (dataset is not None) else TaskData()))\n last_index = (len(self._datasets) - 1)\n self._aliases[task_name] = last_index\n if (aliases is not None):\n for alias in aliases:\n self._aliases[alias] = last_index\n if (len(self._datasets) == 1):\n self._default_index = 0", "docstring": "Add a new dataset to the MultiTaskData.\n\nArgs:\ntask_name (str): The name of the task from which the dataset was received.\ndataset (TaskData): The dataset that should be added.\naliases (list): A list of aliases that should be registered with the dataset.", "source": "codesearchnet"} {"code": "def setOutputHandler(self, outputhandler):\n\n class OutputHandlerInternal(amplpython.OutputHandler):\n\n def output(self, kind, msg):\n outputhandler.output(kind, msg)\n self._outputhandler = outputhandler\n self._outputhandler_internal = OutputHandlerInternal()\n lock_and_call((lambda : self._impl.setOutputHandler(self._outputhandler_internal)), self._lock)", "docstring": "Sets a new output handler.\n\nArgs:\noutputhandler: The function handling the AMPL output derived from\ninterpreting user commands.", "source": "codesearchnet"} {"code": "def handle_subscribed_event(self, event_obj, event_name):\n (handler, args) = self.handlers[event_name]\n self.executor.submit(handler, event_obj, *args)", "docstring": "Execute the registered handler of an event.\n\nRetrieve the handler and its arguments, and execute the handler in a\nnew thread.\n\nArgs:\nevent_obj: Json object of the event.\nevent_name: Name of the event to call handler for.", "source": "codesearchnet"} {"code": "def clear_tc(self, owner, data, clear_type):\n \n batch = self.tcex.batch(owner, action='Delete')\n tc_type = data.get('type')\n path = data.get('path')\n if tc_type in self.tcex.group_types:\n name = self.tcex.playbook.read(data.get('name'))\n name = self.path_data(name, path)\n if name is not None:\n print(\n 'Deleting ThreatConnect Group: {}{}{}'.format(\n c.Style.BRIGHT, c.Fore.MAGENTA, name\n )\n )\n self.log.info(\n '[{}] Deleting ThreatConnect {} with name: {}.'.format(\n clear_type, tc_type, name\n )\n )\n batch.group(tc_type, name)\n elif tc_type in self.tcex.indicator_types:\n if data.get('summary') is not None:\n summary = self.tcex.playbook.read(data.get('summary'))\n else:\n resource = self.tcex.resource(tc_type)\n summary = resource.summary(data)\n summary = self.path_data(summary, path)\n if summary is not None:\n print(\n 'Deleting ThreatConnect Indicator: {}{}{}'.format(\n c.Style.BRIGHT, c.Fore.MAGENTA, summary\n )\n )\n self.log.info(\n '[{}] Deleting ThreatConnect {} with value: {}.'.format(\n clear_type, tc_type, summary\n )\n )\n batch.indicator(tc_type, summary)\n batch_results = batch.submit()\n self.log.debug('[{}] Batch Results: {}'.format(clear_type, batch_results))\n for error in batch_results.get('errors') or []:\n self.log.error('[{}] Batch Error: {}'.format(clear_type, error))", "docstring": "Delete threat intel from ThreatConnect platform.\n\nArgs:\nowner (str): The ThreatConnect owner.\ndata (dict): The data for the threat intel to clear.\nclear_type (str): The type of clear action.", "source": "juraj-google-style"} {"code": "def AddRun(self, path, name=None):\n name = (name or path)\n accumulator = None\n with self._accumulators_mutex:\n if ((name not in self._accumulators) or (self._paths[name] != path)):\n if ((name in self._paths) and (self._paths[name] != path)):\n logger.warn('Conflict for name %s: old path %s, new path %s', name, self._paths[name], path)\n logger.info('Constructing EventAccumulator for %s', path)\n accumulator = event_accumulator.EventAccumulator(path, size_guidance=self._size_guidance, tensor_size_guidance=self._tensor_size_guidance, purge_orphaned_data=self.purge_orphaned_data)\n self._accumulators[name] = accumulator\n self._paths[name] = path\n if accumulator:\n if self._reload_called:\n accumulator.Reload()\n return self", "docstring": "Add a run to the multiplexer.\n\nIf the name is not specified, it is the same as the path.\n\nIf a run by that name exists, and we are already watching the right path,\ndo nothing. If we are watching a different path, replace the event\naccumulator.\n\nIf `Reload` has been called, it will `Reload` the newly created\naccumulators.\n\nArgs:\npath: Path to the event files (or event directory) for given run.\nname: Name of the run to add. If not provided, is set to path.\n\nReturns:\nThe `EventMultiplexer`.", "source": "codesearchnet"} {"code": "def parse_hgnc_line(line, header):\n hgnc_gene = {}\n line = line.rstrip().split('\\t')\n raw_info = dict(zip(header, line))\n if ('Withdrawn' in raw_info['status']):\n return hgnc_gene\n hgnc_symbol = raw_info['symbol']\n hgnc_gene['hgnc_symbol'] = hgnc_symbol\n hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[(- 1)])\n hgnc_gene['description'] = raw_info['name']\n aliases = set([hgnc_symbol, hgnc_symbol.upper()])\n previous_names = raw_info['prev_symbol']\n if previous_names:\n for alias in previous_names.strip('\"').split('|'):\n aliases.add(alias)\n alias_symbols = raw_info['alias_symbol']\n if alias_symbols:\n for alias in alias_symbols.strip('\"').split('|'):\n aliases.add(alias)\n hgnc_gene['previous_symbols'] = list(aliases)\n hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id')\n omim_id = raw_info.get('omim_id')\n if omim_id:\n hgnc_gene['omim_id'] = int(omim_id.strip('\"').split('|')[0])\n else:\n hgnc_gene['omim_id'] = None\n entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id')\n if entrez_id:\n hgnc_gene['entrez_id'] = int(entrez_id)\n else:\n hgnc_gene['entrez_id'] = None\n ref_seq = raw_info.get('refseq_accession')\n if ref_seq:\n hgnc_gene['ref_seq'] = ref_seq.strip('\"').split('|')\n else:\n hgnc_gene['ref_seq'] = []\n uniprot_ids = raw_info.get('uniprot_ids')\n if uniprot_ids:\n hgnc_gene['uniprot_ids'] = uniprot_ids.strip('\"\"').split('|')\n else:\n hgnc_gene['uniprot_ids'] = []\n ucsc_id = raw_info.get('ucsc_id')\n if ucsc_id:\n hgnc_gene['ucsc_id'] = ucsc_id\n else:\n hgnc_gene['ucsc_id'] = None\n vega_id = raw_info.get('vega_id')\n if vega_id:\n hgnc_gene['vega_id'] = vega_id\n else:\n hgnc_gene['vega_id'] = None\n return hgnc_gene", "docstring": "Parse an hgnc formated line\n\nArgs:\nline(list): A list with hgnc gene info\nheader(list): A list with the header info\n\nReturns:\nhgnc_info(dict): A dictionary with the relevant info", "source": "codesearchnet"} {"code": "def insert(self, part):\n \n params = {k: str(v) for k,v in part.params.items()}\n res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params)\n return res", "docstring": "Insert a part into this assembly.\n\nArgs:\n- part (onshapepy.part.Part) A Part instance that will be inserted.\n\nReturns:\n- requests.Response: Onshape response data", "source": "juraj-google-style"} {"code": "def __init__(self, structure, element):\n \n self.structure = structure\n self.element = element\n\n framework = list(self.structure.symbol_set)\n get_voronoi = TopographyAnalyzer(self.structure, framework, [], check_volume=False)\n get_voronoi.cluster_nodes()\n get_voronoi.remove_collisions()\n\n \n struct_to_trim = self.structure.copy()\n for poss_inter in get_voronoi.vnodes:\n struct_to_trim.append(self.element, poss_inter.frac_coords, coords_are_cartesian=False)\n\n symmetry_finder = SpacegroupAnalyzer(struct_to_trim, symprec=1e-1)\n equiv_sites_list = symmetry_finder.get_symmetrized_structure().equivalent_sites\n\n \n \n pdc = PointDefectComparator()\n self.unique_defect_seq = []\n for poss_site_list in equiv_sites_list:\n poss_site = poss_site_list[0]\n if poss_site not in self.structure:\n now_defect = Interstitial( self.structure, poss_site)\n append_defect = True\n for unique_defect in self.unique_defect_seq:\n if pdc.are_equal( now_defect, unique_defect):\n append_defect = False\n if append_defect:\n self.unique_defect_seq.append( now_defect)\n\n self.count_def = 0", "docstring": "Initializes an Interstitial generator using Voronoi sites\nArgs:\nstructure (Structure): pymatgen structure object\nelement (str or Element or Specie): element for the interstitial", "source": "juraj-google-style"} {"code": "def ParseOptions(cls, options, configuration_object):\n \n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n yara_rules_string = None\n\n path = getattr(options, 'yara_rules_path', None)\n if path:\n try:\n with io.open(path, 'rt', encoding='utf-8') as rules_file:\n yara_rules_string = rules_file.read()\n\n except IOError as exception:\n raise errors.BadConfigObject(\n 'Unable to read Yara rules file: {0:s} with error: {1!s}'.format(\n path, exception))\n\n try:\n \n \n \n yara.compile(source=yara_rules_string)\n\n except yara.Error as exception:\n raise errors.BadConfigObject(\n 'Unable to parse Yara rules in: {0:s} with error: {1!s}'.format(\n path, exception))\n\n setattr(configuration_object, '_yara_rules_string', yara_rules_string)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.", "source": "juraj-google-style"} {"code": "def cancelRealTimeBars(self, bars: RealTimeBarList):\n self.client.cancelRealTimeBars(bars.reqId)\n self.wrapper.endSubscription(bars)", "docstring": "Cancel the realtime bars subscription.\n\nArgs:\nbars: The bar list that was obtained from ``reqRealTimeBars``.", "source": "codesearchnet"} {"code": "def save(self, path):\n \n with open(path, 'w') as f:\n f.write(self.contents())", "docstring": "Save svg as file(.svg)\n\nArgs:\npath (str): destination to save file", "source": "juraj-google-style"} {"code": "def triangulate(points): \n \n \n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n \n triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)\n\n \n triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]\n \n return triangles", "docstring": "Connects an input list of xy tuples with lines forming a set of\nsmallest possible Delauney triangles between them.\n\nArguments:\n\n- **points**: A list of xy or xyz point tuples to triangulate.\n\nReturns:\n\n- A list of triangle polygons. If the input coordinate points contained\na third z value then the output triangles will also have these z values.", "source": "juraj-google-style"} {"code": "def _refresh_grpc(operations_stub, operation_name):\n request_pb = operations_pb2.GetOperationRequest(name=operation_name)\n return operations_stub.GetOperation(request_pb)", "docstring": "Refresh an operation using a gRPC client.\n\nArgs:\noperations_stub (google.longrunning.operations_pb2.OperationsStub):\nThe gRPC operations stub.\noperation_name (str): The name of the operation.\n\nReturns:\ngoogle.longrunning.operations_pb2.Operation: The operation.", "source": "codesearchnet"} {"code": "def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: torch.LongTensor=None):\n batch_size, num_images, num_channels, height, width = pixel_values.shape\n pixel_values = pixel_values.to(dtype=self.dtype)\n pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])\n nb_values_per_image = pixel_values.shape[1:].numel()\n real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image\n pixel_values = pixel_values[real_images_inds].contiguous()\n if pixel_attention_mask is None:\n pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device)\n else:\n pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:])\n pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()\n patch_size = self.config.vision_config.patch_size\n patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)\n patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)\n patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) == patch_size * patch_size).bool()\n image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)\n image_hidden_states = image_hidden_states.last_hidden_state\n image_hidden_states = self.connector(image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1))\n image_hidden_states = image_hidden_states.view(-1, image_hidden_states.shape[-1])\n return image_hidden_states", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input images.\npixel_attention_mask (`torch.LongTensor`, *optional*):\nThe attention mask indicating padded regions in the image.", "source": "github-repos"} {"code": "def __init__(self, query, num_splits=0):\n super().__init__()\n if not query.project:\n raise ValueError('query.project cannot be empty')\n if not query:\n raise ValueError('query cannot be empty')\n if num_splits < 0:\n raise ValueError('num_splits must be greater than or equal 0')\n self._project = query.project\n self._datastore_namespace = query.namespace\n self._query = query\n self._num_splits = num_splits", "docstring": "Initialize the `ReadFromDatastore` transform.\n\nThis transform outputs elements of type\n:class:`~apache_beam.io.gcp.datastore.v1new.types.Entity`.\n\nArgs:\nquery: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Query`) query\nused to fetch entities.\nnum_splits: (:class:`int`) (optional) Number of splits for the query.", "source": "github-repos"} {"code": "def ParseFileObject(self, parser_mediator, file_object):\n \n volume = pyfsntfs.volume()\n try:\n volume.open_file_object(file_object)\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to open NTFS volume with error: {0!s}'.format(exception))\n\n try:\n usn_change_journal = volume.get_usn_change_journal()\n self._ParseUSNChangeJournal(parser_mediator, usn_change_journal)\n finally:\n volume.close()", "docstring": "Parses a NTFS $UsnJrnl metadata file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): file-like object.", "source": "juraj-google-style"} {"code": "def conversations_setTopic(\n self, *, channel: str, topic: str, **kwargs\n ) -> SlackResponse:\n \n kwargs.update({\"channel\": channel, \"topic\": topic})\n return self.api_call(\"conversations.setTopic\", json=kwargs)", "docstring": "Sets the topic for a conversation.\n\nArgs:\nchannel (str): The channel id. e.g. 'C1234567890'\ntopic (str): The new topic for the channel. e.g. 'My Topic'", "source": "juraj-google-style"} {"code": "def setProperty(self, orgresource, protect, dummy = 7046):\n \n\n url = nurls['setProperty']\n\n data = {'userid': self.user_id,\n 'useridx': self.useridx,\n 'orgresource': orgresource,\n 'protect': protect,\n 'dummy': dummy,\n }\n\n r = self.session.post(url = url, data = data)\n\n return resultManager(r.text)", "docstring": "SetProperty\n\nArgs:\norgresource: File path\nprotect: 'Y' or 'N', 중요 표시\n\nReturns:\nInteger number: # of version list\nFalse: Failed to get property", "source": "juraj-google-style"} {"code": "def new(namespace, name, wdl, synopsis,\n documentation=None, api_url=fapi.PROD_API_ROOT):\n \n r = fapi.update_workflow(namespace, name, synopsis,\n wdl, documentation, api_url)\n fapi._check_response_code(r, 201)\n d = r.json()\n return Method(namespace, name, d[\"snapshotId\"])", "docstring": "Create new FireCloud method.\n\nIf the namespace + name already exists, a new snapshot is created.\n\nArgs:\nnamespace (str): Method namespace for this method\nname (str): Method name\nwdl (file): WDL description\nsynopsis (str): Short description of task\ndocumentation (file): Extra documentation for method", "source": "juraj-google-style"} {"code": "def _update_input(self, index, tensor) -> None:\n if not isinstance(tensor, tensor_lib.Tensor):\n raise TypeError('tensor must be a Tensor: %s' % tensor)\n _assert_same_graph(self, tensor)\n self._inputs_val = None\n with self.graph._c_graph.get() as c_graph:\n pywrap_tf_session.UpdateEdge(c_graph, tensor._as_tf_output(), self._tf_input(index))", "docstring": "Update the input to this operation at the given index.\n\nNOTE: This is for TF internal use only. Please don't use it.\n\nArgs:\nindex: the index of the input to update.\ntensor: the Tensor to be used as the input at the given index.\n\nRaises:\nTypeError: if tensor is not a Tensor,\nor if input tensor type is not convertible to dtype.\nValueError: if the Tensor is from a different graph.", "source": "github-repos"} {"code": "def get_submission_ids(self, tournament=1):\n query = '\\n query($tournament: Int!) {\\n rounds(tournament: $tournament\\n number: 0) {\\n leaderboard {\\n username\\n submissionId\\n }\\n }\\n }\\n '\n arguments = {'tournament': tournament}\n data = self.raw_query(query, arguments)['data']['rounds'][0]\n if (data is None):\n return None\n mapping = {item['username']: item['submissionId'] for item in data['leaderboard']}\n return mapping", "docstring": "Get dict with username->submission_id mapping.\n\nArgs:\ntournament (int): ID of the tournament (optional, defaults to 1)\n\nReturns:\ndict: username->submission_id mapping, string->string\n\nExample:\n>>> NumerAPI().get_submission_ids()\n{'1337ai': '93c46857-fed9-4594-981e-82db2b358daf',\n'1x0r': '108c7601-822c-4910-835d-241da93e2e24',\n...\n}", "source": "codesearchnet"} {"code": "def __init__(self, meshes: List[layout_lib.Mesh], is_async=True, in_flight_nodes_limit=8):\n if any((not isinstance(mesh, layout_lib.Mesh) for mesh in meshes)):\n raise TypeError('Expected a flat list of Mesh objects, got {}'.format(meshes))\n global _next_device_number\n ctx = context.context()\n with _next_device_number_lock:\n self.name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)\n _next_device_number += 1\n device, device_info = _pywrap_dtensor_device.Allocate(self.name, is_async, in_flight_nodes_limit)\n context.register_custom_device(device, self.name, device_info)\n self._device_info = device_info\n self._current_output_layout = None\n self._current_default_mesh = None\n self._meshes = set()\n self._mesh_lock = threading.Lock()\n for mesh in meshes:\n self._register_mesh(mesh)", "docstring": "Create a new DTensorDevice which executes ops on `underlying_device`.\n\nArgs:\nmeshes: A list of `Mesh` objects indicating groups of devices to execute\non. These may also be registered lazily.\nis_async: Indicates whether DTensor operations on this client will return\nimmediately (with \"non-ready\" handles) or block until executed. This is\non by default and is exposed as an option for ease of debugging.\nin_flight_nodes_limit: Indicates the limit of in-flight nodes before\nenqueueing of async operations to DTensorDevice is blocked. This limit\nis per mesh. 0 for no limits from DTensor. Default is 8.", "source": "github-repos"} {"code": "def sign(self, message):\n \n message = _helpers._to_bytes(message, encoding='utf-8')\n return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))", "docstring": "Signs a message.\n\nArgs:\nmessage: string, Message to be signed.\n\nReturns:\nstring, The signature of the message for the given key.", "source": "juraj-google-style"} {"code": "def add_pagination_meta(self, params, meta):\n meta['page_size'] = params['page_size']\n meta['page'] = params['page']\n meta['prev'] = ('page={0}&page_size={1}'.format((params['page'] - 1), params['page_size']) if (meta['page'] > 0) else None)\n meta['next'] = ('page={0}&page_size={1}'.format((params['page'] + 1), params['page_size']) if meta.get('has_more', True) else None)", "docstring": "Extend default meta dictionary value with pagination hints.\n\nNote:\nThis method handler attaches values to ``meta`` dictionary without\nchanging it's reference. This means that you should never replace\n``meta`` dictionary with any other dict instance but simply modify\nits content.\n\nArgs:\nparams (dict): dictionary of decoded parameter values\nmeta (dict): dictionary of meta values attached to response", "source": "codesearchnet"} {"code": "def _get_class_frame_source(class_name):\n for frame_info in inspect.stack():\n try:\n with open(frame_info[1]) as fp:\n src = ''.join(fp.readlines()[(frame_info[2] - 1):])\n except IOError:\n continue\n if re.search('\\\\bclass\\\\b\\\\s+\\\\b{}\\\\b'.format(class_name), src):\n reader = six.StringIO(src).readline\n tokens = tokenize.generate_tokens(reader)\n source_tokens = []\n indent_level = 0\n base_indent_level = 0\n has_base_level = False\n for (token, value, _, _, _) in tokens:\n source_tokens.append((token, value))\n if (token == tokenize.INDENT):\n indent_level += 1\n elif (token == tokenize.DEDENT):\n indent_level -= 1\n if (has_base_level and (indent_level <= base_indent_level)):\n return (tokenize.untokenize(source_tokens), frame_info[0].f_globals, frame_info[0].f_locals)\n elif (not has_base_level):\n has_base_level = True\n base_indent_level = indent_level\n raise TypeError('Unable to retrieve source for class \"{}\"'.format(class_name))", "docstring": "Return the source code for a class by checking the frame stack.\n\nThis is necessary because it is not possible to get the source of a class\nbeing created by a metaclass directly.\n\nArgs:\nclass_name: The class to look for on the stack.\n\nReturns:\nThe source code for the requested class if the class was found and the\nsource was accessible.", "source": "codesearchnet"} {"code": "def defaultset(self, annotationtype):\n if (inspect.isclass(annotationtype) or isinstance(annotationtype, AbstractElement)):\n annotationtype = annotationtype.ANNOTATIONTYPE\n try:\n return list(self.annotationdefaults[annotationtype].keys())[0]\n except KeyError:\n raise NoDefaultError\n except IndexError:\n raise NoDefaultError", "docstring": "Obtain the default set for the specified annotation type.\n\nArguments:\nannotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.\n\nReturns:\nthe set (str)\n\nRaises:\n:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)", "source": "codesearchnet"} {"code": "def add(self, pattern: Pattern) -> int:\n inner = pattern.expression\n if (self.operation is None):\n if ((not isinstance(inner, Operation)) or isinstance(inner, CommutativeOperation)):\n raise TypeError('Pattern must be a non-commutative operation.')\n self.operation = type(inner)\n elif (not isinstance(inner, self.operation)):\n raise TypeError('All patterns must be the same operation, expected {} but got {}'.format(self.operation, type(inner)))\n if (op_len(inner) < 3):\n raise ValueError('Pattern has not enough operands.')\n operands = list(op_iter(inner))\n first_name = self._check_wildcard_and_get_name(operands[0])\n last_name = self._check_wildcard_and_get_name(operands[(- 1)])\n index = len(self._patterns)\n self._patterns.append((pattern, first_name, last_name))\n flatterm = FlatTerm.merged(*(FlatTerm(o) for o in operands[1:(- 1)]))\n self._net.add(flatterm, index)\n return index", "docstring": "Add a pattern that will be recognized by the matcher.\n\nArgs:\npattern:\nThe pattern to add.\n\nReturns:\nAn internal index for the pattern.\n\nRaises:\nValueError:\nIf the pattern does not have the correct form.\nTypeError:\nIf the pattern is not a non-commutative operation.", "source": "codesearchnet"} {"code": "def assert_type(tensor, tf_type, message=None, name=None):\n tf_type = dtypes.as_dtype(tf_type)\n with ops.name_scope(name, 'assert_type', [tensor]):\n if not isinstance(tensor, sparse_tensor.SparseTensor):\n tensor = ops.convert_to_tensor(tensor, name='tensor')\n if tensor.dtype != tf_type:\n raise TypeError(f'{_message_prefix(message)}{getattr(tensor, 'name', 'tensor')} must be of type {tf_type!r}; got {tensor.dtype!r}')\n return control_flow_ops.no_op('statically_determined_correct_type')", "docstring": "Statically asserts that the given `Tensor` is of the specified type.\n\nArgs:\ntensor: A `Tensor` or `SparseTensor`.\ntf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,\netc).\nmessage: A string to prefix to the default message.\nname: A name to give this `Op`. Defaults to \"assert_type\"\n\nRaises:\nTypeError: If the tensors data type doesn't match `tf_type`.\n\nReturns:\nA `no_op` that does nothing. Type can be determined statically.", "source": "github-repos"} {"code": "async def _get_or_fetch_conversation(self, conv_id):\n conv = self._conv_dict.get(conv_id, None)\n if (conv is None):\n logger.info('Fetching unknown conversation %s', conv_id)\n res = (await self._client.get_conversation(hangouts_pb2.GetConversationRequest(request_header=self._client.get_request_header(), conversation_spec=hangouts_pb2.ConversationSpec(conversation_id=hangouts_pb2.ConversationId(id=conv_id)), include_event=False)))\n conv_state = res.conversation_state\n event_cont_token = None\n if conv_state.HasField('event_continuation_token'):\n event_cont_token = conv_state.event_continuation_token\n return self._add_conversation(conv_state.conversation, event_cont_token=event_cont_token)\n else:\n return conv", "docstring": "Get a cached conversation or fetch a missing conversation.\n\nArgs:\nconv_id: string, conversation identifier\n\nRaises:\nNetworkError: If the request to fetch the conversation fails.\n\nReturns:\n:class:`.Conversation` with matching ID.", "source": "codesearchnet"} {"code": "def graph(self, node_source, edge_source, layout_provider, **kwargs):\n kw = _graph(node_source, edge_source, **kwargs)\n graph_renderer = GraphRenderer(layout_provider=layout_provider, **kw)\n self.renderers.append(graph_renderer)\n return graph_renderer", "docstring": "Creates a network graph using the given node, edge and layout provider.\n\nArgs:\nnode_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source\nfor the graph nodes. An attempt will be made to convert the object to\n:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created\nfor the user automatically.\n\nedge_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source\nfor the graph edges. An attempt will be made to convert the object to\n:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created\nfor the user automatically.\n\nlayout_provider (:class:`~bokeh.models.graphs.LayoutProvider`) : a ``LayoutProvider`` instance to\nprovide the graph coordinates in Cartesian space.\n\n**kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties`", "source": "codesearchnet"} {"code": "def CheckConversion(self, data_old, data_expected):\n converter = upgrade_schema_lib.Converter()\n with tempfile.NamedTemporaryFile(suffix='.json', mode='w+') as in_json, tempfile.NamedTemporaryFile(suffix='.json', mode='w+') as out_json, tempfile.NamedTemporaryFile(suffix='.bin', mode='w+b') as out_bin, tempfile.NamedTemporaryFile(suffix='.tflite', mode='w+b') as out_tflite:\n JsonDumpAndFlush(data_old, in_json)\n converter.Convert(in_json.name, out_json.name)\n converter.Convert(in_json.name, out_tflite.name)\n converter.Convert(out_tflite.name, out_bin.name)\n self.assertEqual(open(out_bin.name, 'rb').read(), open(out_tflite.name, 'rb').read())\n converted_schema = json.load(out_json)\n self.assertEqual(converted_schema, data_expected)", "docstring": "Given a data dictionary, test upgrading to current version.\n\nArgs:\ndata_old: TFLite model as a dictionary (arbitrary version).\ndata_expected: TFLite model as a dictionary (upgraded).", "source": "github-repos"} {"code": "def estimate_univariate_ess_standard_error(chain, batch_size_generator=None, compute_method=None):\n sigma = ((monte_carlo_standard_error(chain, batch_size_generator=batch_size_generator, compute_method=compute_method) ** 2) * len(chain))\n lambda_ = np.var(chain, dtype=np.float64)\n return (len(chain) * (lambda_ / sigma))", "docstring": "r\"\"\"Compute the univariate ESS using the standard error method.\n\nThis computes the ESS using:\n\n.. math::\n\nESS(X) = n * \\frac{\\lambda^{2}}{\\sigma^{2}}\n\nWhere :math:`\\lambda` is the standard deviation of the chain and :math:`\\sigma` is estimated using the monte carlo\nstandard error (which in turn is, by default, estimated using a batch means estimator).\n\nArgs:\nchain (ndarray): the Markov chain\nbatch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes\nwe will use. Per default it uses the :class:`SquareRootSingleBatch` method.\ncompute_method (ComputeMonteCarloStandardError): the method used to compute the standard error.\nBy default we will use the :class:`BatchMeansMCSE` method\n\nReturns:\nfloat: the estimated ESS", "source": "codesearchnet"} {"code": "def get_room_id(self, room_alias):\n content = self._send('GET', '/directory/room/{}'.format(quote(room_alias)))\n return content.get('room_id', None)", "docstring": "Get room id from its alias.\n\nArgs:\nroom_alias (str): The room alias name.\n\nReturns:\nWanted room's id.", "source": "codesearchnet"} {"code": "def do_batch_status(args):\n rest_client = RestClient(args.url, args.user)\n batch_ids = args.batch_ids.split(',')\n if (args.wait and (args.wait > 0)):\n statuses = rest_client.get_statuses(batch_ids, args.wait)\n else:\n statuses = rest_client.get_statuses(batch_ids)\n if (args.format == 'yaml'):\n fmt.print_yaml(statuses)\n elif (args.format == 'json'):\n fmt.print_json(statuses)\n else:\n raise AssertionError('Missing handler: {}'.format(args.format))", "docstring": "Runs the batch-status command, printing output to the console\n\nArgs:\nargs: The parsed arguments sent to the command at runtime", "source": "codesearchnet"} {"code": "def __init__(self, obj, methods):\n \n self._unproxied_object = obj\n self._methods = methods", "docstring": "Initialize the MongoReconnectProxy.\n\nArgs:\nobj: The object for which all calls should be wrapped in the AutoReconnect\nexception handling block.\nmethods (set): The list of method names that should be wrapped.", "source": "juraj-google-style"} {"code": "def pretty_conjunction(conjunction):\n if not conjunction:\n return 'true'\n elif len(conjunction) == 1:\n return conjunction[0]\n else:\n return '(' + ' & '.join(conjunction) + ')'", "docstring": "Pretty-print a conjunction. Use parentheses as necessary.\n\nE.g. [\"a\", \"b\"] -> \"(a & b)\"\n\nArgs:\nconjunction: List of strings.\n\nReturns:\nA pretty-printed string.", "source": "github-repos"} {"code": "def AddAttribute(self, attribute, value=None, age=None):\n \n if \"w\" not in self.mode:\n raise IOError(\"Writing attribute %s to read only object.\" % attribute)\n\n if value is None:\n value = attribute\n attribute = value.attribute_instance\n\n \n \n if self.mode != \"w\" and attribute.lock_protected and not self.transaction:\n raise IOError(\"Object must be locked to write attribute %s.\" % attribute)\n\n self._CheckAttribute(attribute, value)\n \n if attribute.versioned:\n if attribute.creates_new_object_version:\n self._new_version = True\n\n \n if age:\n value.age = age\n else:\n value.age = rdfvalue.RDFDatetime.Now()\n\n \n \n else:\n self._to_delete.add(attribute)\n self.synced_attributes.pop(attribute, None)\n self.new_attributes.pop(attribute, None)\n value.age = 0\n\n self._AddAttributeToCache(attribute, value, self.new_attributes)\n self._dirty = True", "docstring": "Add an additional attribute to this object.\n\nIf value is None, attribute is expected to be already initialized with a\nvalue. For example:\n\nfd.AddAttribute(fd.Schema.CONTAINS(\"some data\"))\n\nArgs:\nattribute: The attribute name or an RDFValue derived from the attribute.\nvalue: The value the attribute will be set to.\nage: Age (timestamp) of the attribute. If None, current time is used.\n\nRaises:\nIOError: If this object is read only.", "source": "juraj-google-style"} {"code": "def register(self, identified_with, identifier, user):\n \n self.kv_store.set(\n self._get_storage_key(identified_with, identifier),\n self.serialization.dumps(user).encode(),\n )", "docstring": "Register new key for given client identifier.\n\nThis is only a helper method that allows to register new\nuser objects for client identities (keys, tokens, addresses etc.).\n\nArgs:\nidentified_with (object): authentication middleware used\nto identify the user.\nidentifier (str): user identifier.\nuser (str): user object to be stored in the backend.", "source": "juraj-google-style"} {"code": "def apply_pending(self, panel_obj, version):\n \n\n updates = {}\n new_panel = deepcopy(panel_obj)\n new_panel['pending'] = []\n new_panel['date'] = dt.datetime.now()\n info_fields = ['disease_associated_transcripts', 'inheritance_models', 'reduced_penetrance',\n 'mosaicism', 'database_entry_version', 'comment']\n new_genes = []\n\n for update in panel_obj.get('pending', []):\n hgnc_id = update['hgnc_id']\n\n \n if update['action'] != 'add':\n updates[hgnc_id] = update\n continue\n info = update.get('info', {})\n gene_obj = {\n 'hgnc_id': hgnc_id,\n 'symbol': update['symbol']\n }\n\n for field in info_fields:\n if field in info:\n gene_obj[field] = info[field]\n new_genes.append(gene_obj)\n\n for gene in panel_obj['genes']:\n hgnc_id = gene['hgnc_id']\n\n if hgnc_id not in updates:\n new_genes.append(gene)\n continue\n\n current_update = updates[hgnc_id]\n action = current_update['action']\n info = current_update['info']\n\n \n if action == 'delete':\n continue\n\n elif action == 'edit':\n for field in info_fields:\n if field in info:\n gene[field] = info[field]\n new_genes.append(gene)\n\n new_panel['genes'] = new_genes\n new_panel['version'] = float(version)\n\n inserted_id = None\n \n if new_panel['version'] == panel_obj['version']:\n \n result = self.panel_collection.find_one_and_replace(\n {'_id':panel_obj['_id']},\n new_panel,\n return_document=pymongo.ReturnDocument.AFTER\n )\n inserted_id = result['_id']\n else: \n new_panel.pop('_id')\n\n \n panel_obj['is_archived'] = True\n self.update_panel(panel_obj=panel_obj, date_obj=panel_obj['date'])\n\n \n inserted_id = self.panel_collection.insert_one(new_panel).inserted_id\n\n return inserted_id", "docstring": "Apply the pending changes to an existing gene panel or create a new version of the same panel.\n\nArgs:\npanel_obj(dict): panel in database to update\nversion(double): panel version to update\n\nReturns:\ninserted_id(str): id of updated panel or the new one", "source": "juraj-google-style"} {"code": "def add_block(self, block):\n \n self[-1]['__blocks__'].append(block)\n self[-1]['__names__'].append(block.raw())", "docstring": "Add block element to scope\nArgs:\nblock (Block): Block object", "source": "juraj-google-style"} {"code": "def to_tensors(self, value: Any) -> List[core.Tensor]:\n del value\n return []", "docstring": "Breaks down a value of this type into Tensors.\n\nFor a TraceType instance, the number of tensors generated for corresponding\nvalue should be constant.\n\nArgs:\nvalue: A value belonging to this TraceType\n\nReturns:\nList of Tensors.", "source": "github-repos"} {"code": "def setdefault(msg_or_dict, key, value):\n if (not get(msg_or_dict, key, default=None)):\n set(msg_or_dict, key, value)", "docstring": "Set the key on a protobuf Message or dictionary to a given value if the\ncurrent value is falsy.\n\nBecause protobuf Messages do not distinguish between unset values and\nfalsy ones particularly well (by design), this method treats any falsy\nvalue (e.g. 0, empty list) as a target to be overwritten, on both Messages\nand dictionaries.\n\nArgs:\nmsg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the\nobject.\nkey (str): The key on the object in question.\nvalue (Any): The value to set.\n\nRaises:\nTypeError: If ``msg_or_dict`` is not a Message or dictionary.", "source": "codesearchnet"} {"code": "def __init__(self, optimizer, scope='meta-optimizer', summary_labels=(), **kwargs):\n \n self.optimizer = Optimizer.from_spec(spec=optimizer, kwargs=kwargs)\n\n super(MetaOptimizer, self).__init__(scope=scope, summary_labels=summary_labels)", "docstring": "Creates a new meta optimizer instance.\n\nArgs:\noptimizer: The optimizer which is modified by this meta optimizer.", "source": "juraj-google-style"} {"code": "def insert(table, columns, values):\n rows = len(values)\n cells = len(columns) * len(values)\n return _Mutator(mutation=Mutation(insert=batch._make_write_pb(table, columns, values)), operation=WriteMutation._OPERATION_INSERT, rows=rows, cells=cells, kwargs={'table': table, 'columns': columns, 'values': values})", "docstring": "Insert one or more new table rows.\n\nArgs:\ntable: Name of the table to be modified.\ncolumns: Name of the table columns to be modified.\nvalues: Values to be modified.", "source": "github-repos"} {"code": "def fprime(self, w, *args):\n \n\n x0 = args[0]\n x1 = args[1]\n\n n0 = x0.shape[0]\n n1 = x1.shape[0]\n\n \n n = max(n0, n1) * 10\n idx0 = np.random.choice(range(n0), size=n)\n idx1 = np.random.choice(range(n1), size=n)\n\n \n b = np.ones((n, 1))\n i1 = self.i + 1\n h = self.h\n h1 = h + 1\n\n w2 = w[-h1:].reshape(h1, 1)\n w1 = w[:-h1].reshape(i1, h)\n\n if sparse.issparse(x0):\n x0 = x0.tocsr()[idx0]\n x1 = x1.tocsr()[idx1]\n xb0 = sparse.hstack((x0, b))\n xb1 = sparse.hstack((x1, b))\n else:\n x0 = x0[idx0]\n x1 = x1[idx1]\n xb0 = np.hstack((x0, b))\n xb1 = np.hstack((x1, b))\n\n z0 = np.hstack((sigm(xb0.dot(w1)), b))\n z1 = np.hstack((sigm(xb1.dot(w1)), b))\n y0 = z0.dot(w2)\n y1 = z1.dot(w2)\n\n \n \n e = 1 - (y1 - y0)\n dy = e / n\n\n \n \n \n dw1 = -(xb1.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb1.dot(w1))) -\n xb0.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb0.dot(w1)))\n ).reshape(i1 * h) + self.l1 * w[:-h1] / (i1 * h)\n dw2 = -(z1 - z0).T.dot(dy).reshape(h1) + self.l2 * w[-h1:] / h1\n\n return np.append(dw1, dw2)", "docstring": "Return the derivatives of the cost function for predictions.\n\nArgs:\nw (array of float): weight vectors such that:\nw[:-h1] -- weights between the input and h layers\nw[-h1:] -- weights between the h and output layers\nargs: features (args[0]) and target (args[1])\n\nReturns:\ngradients of the cost function for predictions", "source": "juraj-google-style"} {"code": "def convert_tokens_to_ids(self, tokens: Union[str, Iterable[str]]) -> Union[int, list[int]]:\n if isinstance(tokens, str):\n return self._convert_token_to_id_with_added_voc(tokens)\n return [self._convert_token_to_id_with_added_voc(token) for token in tokens]", "docstring": "Converts a token string (or a sequence of tokens) in a single integer id (or a Iterable of ids), using the\nvocabulary.\n\nArgs:\ntokens (`str` or `Iterable[str]`): One or several token(s) to convert to token id(s).\n\nReturns:\n`int` or `List[int]`: The token id or list of token ids.", "source": "github-repos"} {"code": "def _add_wire(self, wire):\n if (wire not in self.wires):\n self.wires.append(wire)\n self._max_node_id += 1\n input_map_wire = self.input_map[wire] = self._max_node_id\n self._max_node_id += 1\n output_map_wire = self._max_node_id\n wire_name = ('%s[%s]' % (wire[0].name, wire[1]))\n inp_node = DAGNode(data_dict={'type': 'in', 'name': wire_name, 'wire': wire}, nid=input_map_wire)\n outp_node = DAGNode(data_dict={'type': 'out', 'name': wire_name, 'wire': wire}, nid=output_map_wire)\n self._id_to_node[input_map_wire] = inp_node\n self._id_to_node[output_map_wire] = outp_node\n self.input_map[wire] = inp_node\n self.output_map[wire] = outp_node\n self._multi_graph.add_node(inp_node)\n self._multi_graph.add_node(outp_node)\n self._multi_graph.add_edge(inp_node, outp_node)\n self._multi_graph.adj[inp_node][outp_node][0]['name'] = ('%s[%s]' % (wire[0].name, wire[1]))\n self._multi_graph.adj[inp_node][outp_node][0]['wire'] = wire\n else:\n raise DAGCircuitError(('duplicate wire %s' % (wire,)))", "docstring": "Add a qubit or bit to the circuit.\n\nArgs:\nwire (tuple): (Register,int) containing a register instance and index\nThis adds a pair of in and out nodes connected by an edge.\n\nRaises:\nDAGCircuitError: if trying to add duplicate wire", "source": "codesearchnet"} {"code": "def __init__(self,\n exclude_module=None,\n exclude_class=None,\n exclude_function=None,\n nested_class=False,\n missing_doc=True):\n \n self.exclude_module = exclude_module\n self.exclude_class = exclude_class\n self.exclude_function = exclude_function\n self.nested_class = nested_class\n self.missing_doc = missing_doc", "docstring": "Init method.\n\nArgs:\nexclude_module (list): list of Ex instances.\nexclude_class (list): list of Ex instances.\nexclude_function (list): list of Ex instances.\nnested_class (bool): whether to get nested classes in classes.\nmissing_doc (bool): whether to get doc even when empty.", "source": "juraj-google-style"} {"code": "def extract_archive(archive_path, dest):\n \n \n if not os.path.isdir(dest):\n os.makedirs(dest)\n\n try:\n tmpfolder = None\n\n if (not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path):\n raise ValueError('archive path %s is not a file' % archive_path)\n\n if archive_path.startswith('gs:\n \n tmpfolder = tempfile.mkdtemp()\n cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]\n _shell_process.run_and_monitor(cmd_args, os.getpid())\n archive_path = os.path.join(tmpfolder, os.path.name(archive_path))\n\n if archive_path.lower().endswith('.tar.gz'):\n flags = '-xzf'\n elif archive_path.lower().endswith('.tar'):\n flags = '-xf'\n else:\n raise ValueError('Only tar.gz or tar.Z files are supported.')\n\n cmd_args = ['tar', flags, archive_path, '-C', dest]\n _shell_process.run_and_monitor(cmd_args, os.getpid())\n finally:\n if tmpfolder:\n shutil.rmtree(tmpfolder)", "docstring": "Extract a local or GCS archive file to a folder.\n\nArgs:\narchive_path: local or gcs path to a *.tar.gz or *.tar file\ndest: local folder the archive will be extracted to", "source": "juraj-google-style"} {"code": "def _transform_feature(self, inputs):\n id_weight_pair = self.categorical_column._get_sparse_tensors(inputs)\n id_tensor = id_weight_pair.id_tensor\n weight_tensor = id_weight_pair.weight_tensor\n if weight_tensor is not None:\n weighted_column = sparse_ops.sparse_merge(sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(self._variable_shape[-1]))\n weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0], weighted_column.dense_shape)\n return array_ops.scatter_nd(weighted_column.indices, weighted_column.values, weighted_column.dense_shape)\n dense_id_tensor = sparse_ops.sparse_tensor_to_dense(id_tensor, default_value=-1)\n one_hot_id_tensor = array_ops.one_hot(dense_id_tensor, depth=self._variable_shape[-1], on_value=1.0, off_value=0.0)\n return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])", "docstring": "Returns dense `Tensor` representing feature.\n\nArgs:\ninputs: A `_LazyBuilder` object to access inputs.\n\nReturns:\nTransformed feature `Tensor`.\n\nRaises:\nValueError: if input rank is not known at graph building time.", "source": "github-repos"} {"code": "def from_string(cls, string):\n \n lines = string.split(\"\\n\")\n timestep = int(lines[1])\n natoms = int(lines[3])\n box_arr = np.loadtxt(StringIO(\"\\n\".join(lines[5:8])))\n bounds = box_arr[:, :2]\n tilt = None\n if \"xy xz yz\" in lines[4]:\n tilt = box_arr[:, 2]\n x = (0, tilt[0], tilt[1], tilt[0] + tilt[1])\n y = (0, tilt[2])\n bounds -= np.array([[min(x), max(x)], [min(y), max(y)], [0, 0]])\n box = LammpsBox(bounds, tilt)\n data_head = lines[8].replace(\"ITEM: ATOMS\", \"\").split()\n data = pd.read_csv(StringIO(\"\\n\".join(lines[9:])), names=data_head,\n delim_whitespace=True)\n return cls(timestep, natoms, box, data)", "docstring": "Constructor from string parsing.\n\nArgs:\nstring (str): Input string.", "source": "juraj-google-style"} {"code": "def facade(projectmainfn, **kwargs):\n site_url = Configuration._create(**kwargs)\n logger.info('--------------------------------------------------')\n logger.info(('> Using HDX Python API Library %s' % Configuration.apiversion))\n logger.info(('> HDX Site: %s' % site_url))\n UserAgent.user_agent = Configuration.read().user_agent\n projectmainfn()", "docstring": "Facade to simplify project setup that calls project main function\n\nArgs:\nprojectmainfn ((None) -> None): main function of project\n**kwargs: configuration parameters to pass to HDX Configuration class\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def model_loader(gem_file_path, gem_file_type):\n \n\n if gem_file_type.lower() == 'xml' or gem_file_type.lower() == 'sbml':\n model = read_sbml_model(gem_file_path)\n elif gem_file_type.lower() == 'mat':\n model = load_matlab_model(gem_file_path)\n elif gem_file_type.lower() == 'json':\n model = load_json_model(gem_file_path)\n else:\n raise ValueError('File type must be \"sbml\", \"xml\", \"mat\", or \"json\".')\n\n return model", "docstring": "Consolidated function to load a GEM using COBRApy. Specify the file type being loaded.\n\nArgs:\ngem_file_path (str): Path to model file\ngem_file_type (str): GEM model type - ``sbml`` (or ``xml``), ``mat``, or ``json`` format\n\nReturns:\nCOBRApy Model object.", "source": "juraj-google-style"} {"code": "def extract_objects_from_source(self, text, type_filter=None):\n \n objects = parse_vhdl(text)\n self._register_array_types(objects)\n\n if type_filter:\n objects = [o for o in objects if isinstance(o, type_filter)]\n\n return objects", "docstring": "Extract object declarations from a text buffer\n\nArgs:\ntext (str): Source code to parse\ntype_filter (class, optional): Object class to filter results\nReturns:\nList of parsed objects.", "source": "juraj-google-style"} {"code": "def deserialize_ndarray(d):\n \n if 'data' in d:\n x = np.fromstring(\n base64.b64decode(d['data']),\n dtype=d['dtype'])\n x.shape = d['shape']\n return x\n elif 'value' in d:\n return np.array(d['value'], dtype=d['dtype'])\n elif 'npy' in d:\n return deserialize_ndarray_npy(d)\n else:\n raise ValueError('Malformed np.ndarray encoding.')", "docstring": "Deserializes a JSONified :obj:`numpy.ndarray`. Can handle arrays serialized\nusing any of the methods in this module: :obj:`\"npy\"`, :obj:`\"b64\"`,\n:obj:`\"readable\"`.\n\nArgs:\nd (`dict`): A dictionary representation of an :obj:`ndarray` object.\n\nReturns:\nAn :obj:`ndarray` object.", "source": "juraj-google-style"} {"code": "def _ws_on_error(self, ws: websocket.WebSocketApp, error: Exception):\n self.logger.error(f'Got error from websocket connection: {str(error)}')", "docstring": "Callback for receiving errors from the websocket connection\n\nArgs:\nws: websocket connection\nerror: exception raised", "source": "codesearchnet"} {"code": "def add_site_property(self, property_name, values):\n if (len(values) != len(self.sites)):\n raise ValueError('Values must be same length as sites.')\n for (site, val) in zip(self.sites, values):\n site.properties[property_name] = val", "docstring": "Adds a property to a site.\n\nArgs:\nproperty_name (str): The name of the property to add.\nvalues (list): A sequence of values. Must be same length as\nnumber of sites.", "source": "codesearchnet"} {"code": "def _tokens_to_subtoken_ids(self, tokens):\n ret = []\n for token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\n return ret", "docstring": "Converts a list of tokens to a list of subtoken ids.\n\nArgs:\ntokens: a list of strings.\nReturns:\na list of integers in the range [0, vocab_size)", "source": "codesearchnet"} {"code": "def AddDir(self, dirpath):\n \n if dirpath not in self._dirs:\n self._dirs.add(dirpath)\n return True\n return False", "docstring": "Adds a directory path as a source.\n\nArgs:\ndirpath: a string representing a path to the directory.\n\nReturns:\nTrue if the directory is not an already existing source.", "source": "juraj-google-style"} {"code": "def send(self, request):\n \n self._connection.connection.rpush(self._request_key, pickle.dumps(request))\n resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)\n\n while True:\n if self._connection.polling_time > 0.0:\n sleep(self._connection.polling_time)\n\n response_data = self._connection.connection.get(resp_key)\n if response_data is not None:\n self._connection.connection.delete(resp_key)\n break\n\n return pickle.loads(response_data)", "docstring": "Send a request to the server and wait for its response.\n\nArgs:\nrequest (Request): Reference to a request object that is sent to the server.\n\nReturns:\nResponse: The response from the server to the request.", "source": "juraj-google-style"} {"code": "def IsHuntStarted(self):\n state = self.hunt_obj.Get(self.hunt_obj.Schema.STATE)\n if (state != 'STARTED'):\n return False\n if self.CheckExpiry():\n return False\n return True", "docstring": "Is this hunt considered started?\n\nThis method is used to check if new clients should be processed by\nthis hunt. Note that child flow responses are always processed but\nnew clients are not allowed to be scheduled unless the hunt is\nstarted.\n\nReturns:\nIf a new client is allowed to be scheduled on this hunt.", "source": "codesearchnet"} {"code": "class EncodecOutput(ModelOutput):\n audio_codes: Optional[torch.LongTensor] = None\n audio_values: Optional[torch.FloatTensor] = None", "docstring": "Args:\naudio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):\nDiscret code embeddings computed using `model.encode`.\naudio_values (`torch.FlaotTensor` of shape `(batch_size, sequence_length)`, *optional*)\nDecoded audio values, obtained using the decoder part of Encodec.", "source": "github-repos"} {"code": "def to_dc(data):\n root = odict['metadata':odict[('@xmlns:xsi':'http:\n for (key, val) in _convert_metadata(_remove_none(data)).iteritems():\n if (val is None):\n continue\n if (isinstance(val, basestring) and (not val.strip())):\n continue\n if isinstance(val, str):\n val = val.decode('utf-8')\n root['metadata'][key] = val\n return unparse(root, pretty=True, indent=' ')", "docstring": "Convert WA-KAT `data` to Dublin core XML.\n\nArgs:\ndata (dict): Nested WA-KAT data. See tests for example.\n\nReturns:\nunicode: XML with dublin core.", "source": "codesearchnet"} {"code": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:\n outputs = self.groupvit(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n return outputs", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import CLIPTokenizer, TFGroupViTTextModel\n\n>>> tokenizer = CLIPTokenizer.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n>>> model = TFGroupViTTextModel.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"tf\")\n\n>>> outputs = model(**inputs)\n>>> last_hidden_state = outputs.last_hidden_state\n>>> pooled_output = outputs.pooler_output # pooled (EOS token) states\n```", "source": "github-repos"} {"code": "def create_commit(profile, message, tree, parents):\n resource = '/commits'\n payload = {'message': message, 'tree': tree, 'parents': parents}\n data = api.post_request(profile, resource, payload)\n return prepare(data)", "docstring": "Create a commit.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nmessage\nThe commit message to give to the commit.\n\ntree\nThe SHA of the tree to assign to the commit.\n\nparents\nA list enumerating the SHAs of the new commit's parent commits.\n\nReturns:\nA dict with data about the commit.", "source": "codesearchnet"} {"code": "def add_fileobj(self, fileobj, path, compress, flags=None):\n \n f = file_iter(fileobj)\n flags = flags or os.stat(path) & 0o777\n return self.add_stream(f, path, compress, flags)", "docstring": "Add the contents of a file object to the MAR file.\n\nArgs:\nfileobj (file-like object): open file object\npath (str): name of this file in the MAR file\ncompress (str): One of 'xz', 'bz2', or None. Defaults to None.\nflags (int): permission of this file in the MAR file. Defaults to the permissions of `path`", "source": "juraj-google-style"} {"code": "def do_scan_trigger(sk, if_index, driver_id, mcid):\n _LOGGER.debug('Joining group %d.', mcid)\n ret = nl_socket_add_membership(sk, mcid)\n if (ret < 0):\n return ret\n msg = nlmsg_alloc()\n genlmsg_put(msg, 0, 0, driver_id, 0, 0, nl80211.NL80211_CMD_TRIGGER_SCAN, 0)\n nla_put_u32(msg, nl80211.NL80211_ATTR_IFINDEX, if_index)\n ssids_to_scan = nlmsg_alloc()\n nla_put(ssids_to_scan, 1, 0, b'')\n nla_put_nested(msg, nl80211.NL80211_ATTR_SCAN_SSIDS, ssids_to_scan)\n err = ctypes.c_int(1)\n results = ctypes.c_int((- 1))\n cb = libnl.handlers.nl_cb_alloc(libnl.handlers.NL_CB_DEFAULT)\n libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_VALID, libnl.handlers.NL_CB_CUSTOM, callback_trigger, results)\n libnl.handlers.nl_cb_err(cb, libnl.handlers.NL_CB_CUSTOM, error_handler, err)\n libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_ACK, libnl.handlers.NL_CB_CUSTOM, ack_handler, err)\n libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_SEQ_CHECK, libnl.handlers.NL_CB_CUSTOM, (lambda *_: libnl.handlers.NL_OK), None)\n _LOGGER.debug('Sending NL80211_CMD_TRIGGER_SCAN...')\n ret = nl_send_auto(sk, msg)\n if (ret < 0):\n return ret\n while (err.value > 0):\n _LOGGER.debug('Retrieving NL80211_CMD_TRIGGER_SCAN acknowledgement...')\n ret = nl_recvmsgs(sk, cb)\n if (ret < 0):\n return ret\n if (err.value < 0):\n error('Unknown error {0} ({1})'.format(err.value, errmsg[abs(err.value)]))\n while (results.value < 0):\n _LOGGER.debug('Retrieving NL80211_CMD_TRIGGER_SCAN final response...')\n ret = nl_recvmsgs(sk, cb)\n if (ret < 0):\n return ret\n if (results.value > 0):\n error('The kernel aborted the scan.')\n _LOGGER.debug('Leaving group %d.', mcid)\n return nl_socket_drop_membership(sk, mcid)", "docstring": "Issue a scan request to the kernel and wait for it to reply with a signal.\n\nThis function issues NL80211_CMD_TRIGGER_SCAN which requires root privileges.\n\nThe way NL80211 works is first you issue NL80211_CMD_TRIGGER_SCAN and wait for the kernel to signal that the scan is\ndone. When that signal occurs, data is not yet available. The signal tells us if the scan was aborted or if it was\nsuccessful (if new scan results are waiting). This function handles that simple signal.\n\nMay exit the program (sys.exit()) if a fatal error occurs.\n\nPositional arguments:\nsk -- nl_sock class instance (from nl_socket_alloc()).\nif_index -- interface index (integer).\ndriver_id -- nl80211 driver ID from genl_ctrl_resolve() (integer).\nmcid -- nl80211 scanning group ID from genl_ctrl_resolve_grp() (integer).\n\nReturns:\n0 on success or a negative error code.", "source": "codesearchnet"} {"code": "def _from_list_record(data):\n return [Schema._get_field_entry(('Column%d' % (i + 1)), value) for (i, value) in enumerate(data)]", "docstring": "Infer a BigQuery table schema from a list of values.\n\nArgs:\ndata: The list of values.\nReturns:\nA list of dictionaries containing field 'name' and 'type' entries, suitable for use in a\nBigQuery Tables resource schema.", "source": "codesearchnet"} {"code": "def BuildArtifactsRegistry(cls, artifact_definitions_path, custom_artifacts_path):\n if (artifact_definitions_path and (not os.path.isdir(artifact_definitions_path))):\n raise errors.BadConfigOption('No such artifacts filter file: {0:s}.'.format(artifact_definitions_path))\n if (custom_artifacts_path and (not os.path.isfile(custom_artifacts_path))):\n raise errors.BadConfigOption('No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))\n registry = artifacts_registry.ArtifactDefinitionsRegistry()\n reader = artifacts_reader.YamlArtifactsReader()\n try:\n registry.ReadFromDirectory(reader, artifact_definitions_path)\n except (KeyError, artifacts_errors.FormatError) as exception:\n raise errors.BadConfigOption('Unable to read artifact definitions from: {0:s} with error: {1!s}'.format(artifact_definitions_path, exception))\n if custom_artifacts_path:\n try:\n registry.ReadFromFile(reader, custom_artifacts_path)\n except (KeyError, artifacts_errors.FormatError) as exception:\n raise errors.BadConfigOption('Unable to read artifact definitions from: {0:s} with error: {1!s}'.format(custom_artifacts_path, exception))\n return registry", "docstring": "Build Find Specs from artifacts or filter file if available.\n\nArgs:\nartifact_definitions_path (str): path to artifact definitions file.\ncustom_artifacts_path (str): path to custom artifact definitions file.\n\nReturns:\nartifacts.ArtifactDefinitionsRegistry: artifact definitions registry.\n\nRaises:\nRuntimeError: if no valid FindSpecs are built.", "source": "codesearchnet"} {"code": "def get_input_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape')", "docstring": "Retrieves the input shape(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst time the layer was called.\n\nReturns:\nA shape tuple\n(or list of shape tuples if the layer has multiple inputs).\n\nRaises:\nRuntimeError: If called in Eager mode.", "source": "github-repos"} {"code": "def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None):\n _, key_value_length = mask.shape\n tgt_len = tgt_len if tgt_len is not None else key_value_length\n is_tracing = torch.jit.is_tracing() or isinstance(mask, torch.fx.Proxy) or is_torchdynamo_compiling()\n if not is_tracing and torch.all(mask == 1):\n return None\n else:\n return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)", "docstring": "Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`\n\nArgs:\nmask (`torch.Tensor`):\nA 2D attention mask of shape `(batch_size, key_value_length)`\ndtype (`torch.dtype`):\nThe torch dtype the created mask shall have.\ntgt_len (`int`):\nThe target length or query length the created mask shall have.", "source": "github-repos"} {"code": "def length(self):\n\n def ProcessContentRange(content_range):\n (_, _, range_spec) = content_range.partition(' ')\n (byte_range, _, _) = range_spec.partition('/')\n (start, _, end) = byte_range.partition('-')\n return ((int(end) - int(start)) + 1)\n if (('-content-encoding' in self.info) and ('content-range' in self.info)):\n return ProcessContentRange(self.info['content-range'])\n elif ('content-length' in self.info):\n return int(self.info.get('content-length'))\n elif ('content-range' in self.info):\n return ProcessContentRange(self.info['content-range'])\n return len(self.content)", "docstring": "Return the length of this response.\n\nWe expose this as an attribute since using len() directly can fail\nfor responses larger than sys.maxint.\n\nReturns:\nResponse length (as int or long)", "source": "codesearchnet"} {"code": "def _get_checksum():\n digest = hashlib.sha256()\n with open(RPM_PATH, 'rb') as rpm_db_fh:\n while True:\n buff = rpm_db_fh.read(4096)\n if (not buff):\n break\n digest.update(buff)\n return digest.hexdigest()", "docstring": "Get the checksum of the RPM Database.\n\nReturns:\nhexdigest", "source": "codesearchnet"} {"code": "def get_open_file(self, file_des):\n \n if not is_int_type(file_des):\n raise TypeError('an integer is required')\n if (file_des >= len(self.open_files) or\n self.open_files[file_des] is None):\n self.raise_os_error(errno.EBADF, str(file_des))\n return self.open_files[file_des][0]", "docstring": "Return an open file.\n\nArgs:\nfile_des: File descriptor of the open file.\n\nRaises:\nOSError: an invalid file descriptor.\nTypeError: filedes is not an integer.\n\nReturns:\nOpen file object.", "source": "juraj-google-style"} {"code": "class MMBTConfig:\n\n def __init__(self, config, num_labels=None, modal_hidden_size=2048):\n self.__dict__ = config.__dict__\n self.modal_hidden_size = modal_hidden_size\n if num_labels:\n self.num_labels = num_labels", "docstring": "This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT\nmodel according to the specified arguments, defining the model architecture.\n\nArgs:\nconfig ([`PreTrainedConfig`]):\nConfig of the underlying Transformer models. Its values are copied over to use a single config.\nnum_labels (`int`, *optional*):\nSize of final Linear layer for classification.\nmodal_hidden_size (`int`, *optional*, defaults to 2048):\nEmbedding dimension of the non-text modality encoder.", "source": "github-repos"} {"code": "def _restore_and_log_checkpoint(self, actor):\n \n actor_id = self._worker.actor_id\n try:\n checkpoints = ray.actor.get_checkpoints_for_actor(actor_id)\n if len(checkpoints) > 0:\n \n \n checkpoint_id = actor.load_checkpoint(actor_id, checkpoints)\n if checkpoint_id is not None:\n \n \n msg = (\n \"`load_checkpoint` must return a checkpoint id that \" +\n \"exists in the `available_checkpoints` list, or eone.\")\n assert any(checkpoint_id == checkpoint.checkpoint_id\n for checkpoint in checkpoints), msg\n \n \n (self._worker.raylet_client.\n notify_actor_resumed_from_checkpoint(\n actor_id, checkpoint_id))\n except Exception:\n \n traceback_str = ray.utils.format_error_message(\n traceback.format_exc())\n ray.utils.push_error_to_driver(\n self._worker,\n ray_constants.CHECKPOINT_PUSH_ERROR,\n traceback_str,\n driver_id=self._worker.task_driver_id)", "docstring": "Restore an actor from a checkpoint if available and log any errors.\n\nThis should only be called on workers that have just executed an actor\ncreation task.\n\nArgs:\nactor: The actor to restore from a checkpoint.", "source": "juraj-google-style"} {"code": "def GetPasswdMap(self, since=None):\n return PasswdUpdateGetter().GetUpdates(self, self.conf['passwd_url'], since)", "docstring": "Return the passwd map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of passwd.PasswdMap", "source": "github-repos"} {"code": "def evaluate(self, node: InstanceNode) -> XPathValue:\n return self._eval(XPathContext(node, node, 1, 1))", "docstring": "Evaluate the receiver and return the result.\n\nArgs:\nnode: Context node for XPath evaluation.\n\nRaises:\nXPathTypeError: If a subexpression of the receiver is of a wrong\ntype.", "source": "codesearchnet"} {"code": "def __init__(self, cookie_identifier):\n \n data_type = '{0:s}:{1:s}'.format(self.DATA_TYPE, cookie_identifier)\n\n super(GoogleAnalyticsEventData, self).__init__(data_type=data_type)\n self.cookie_name = None\n self.domain_hash = None\n self.pages_viewed = None\n self.sessions = None\n self.sources = None\n self.url = None\n self.visitor_id = None", "docstring": "Initializes event data.\n\nArgs:\ncookie_identifier (str): unique identifier of the cookie.", "source": "juraj-google-style"} {"code": "def wigner_data(q_result, meas_qubits, labels, shots=None):\n num = len(meas_qubits)\n dim = (2 ** num)\n p = [(0.5 + (0.5 * np.sqrt(3))), (0.5 - (0.5 * np.sqrt(3)))]\n parity = 1\n for i in range(num):\n parity = np.kron(parity, p)\n w = ([0] * len(labels))\n wpt = 0\n counts = [marginal_counts(q_result.get_counts(circ), meas_qubits) for circ in labels]\n for entry in counts:\n x = ([0] * dim)\n for i in range(dim):\n if (bin(i)[2:].zfill(num) in entry):\n x[i] = float(entry[bin(i)[2:].zfill(num)])\n if (shots is None):\n shots = np.sum(x)\n for i in range(dim):\n w[wpt] = (w[wpt] + ((x[i] / shots) * parity[i]))\n wpt += 1\n return w", "docstring": "Get the value of the Wigner function from measurement results.\n\nArgs:\nq_result (Result): Results from execution of a state tomography\ncircuits on a backend.\nmeas_qubits (list[int]): a list of the qubit indexes measured.\nlabels (list[str]): a list of names of the circuits\nshots (int): number of shots\n\nReturns:\nlist: The values of the Wigner function at measured points in\nphase space", "source": "codesearchnet"} {"code": "def scrape_bandcamp_url(url, num_tracks=sys.maxsize, folders=False, custom_path=''):\n filenames = []\n album_data = get_bandcamp_metadata(url)\n if (type(album_data) is list):\n for album_url in album_data:\n filenames.append(scrape_bandcamp_url(album_url, num_tracks, folders, custom_path))\n return filenames\n artist = album_data['artist']\n album_name = album_data['album_name']\n if folders:\n if album_name:\n directory = ((artist + ' - ') + album_name)\n else:\n directory = artist\n directory = sanitize_filename(directory)\n directory = join(custom_path, directory)\n if (not exists(directory)):\n mkdir(directory)\n for (i, track) in enumerate(album_data['trackinfo']):\n if (i > (num_tracks - 1)):\n continue\n try:\n track_name = track['title']\n if track['track_num']:\n track_number = str(track['track_num']).zfill(2)\n else:\n track_number = None\n if (track_number and folders):\n track_filename = ('%s - %s.mp3' % (track_number, track_name))\n else:\n track_filename = ('%s.mp3' % track_name)\n track_filename = sanitize_filename(track_filename)\n if folders:\n path = join(directory, track_filename)\n else:\n path = join(custom_path, ((sanitize_filename(artist) + ' - ') + track_filename))\n if exists(path):\n puts_safe((colored.yellow('Track already downloaded: ') + colored.white(track_name)))\n continue\n if (not track['file']):\n puts_safe((colored.yellow('Track unavailble for scraping: ') + colored.white(track_name)))\n continue\n puts_safe((colored.green('Downloading') + colored.white((': ' + track_name))))\n path = download_file(track['file']['mp3-128'], path)\n album_year = album_data['album_release_date']\n if album_year:\n album_year = datetime.strptime(album_year, '%d %b %Y %H:%M:%S GMT').year\n tag_file(path, artist, track_name, album=album_name, year=album_year, genre=album_data['genre'], artwork_url=album_data['artFullsizeUrl'], track_number=track_number, url=album_data['url'])\n filenames.append(path)\n except Exception as e:\n puts_safe((colored.red('Problem downloading ') + colored.white(track_name)))\n print(e)\n return filenames", "docstring": "Pull out artist and track info from a Bandcamp URL.\n\nReturns:\nlist: filenames to open", "source": "codesearchnet"} {"code": "def get_categorical_feature_names(example):\n features = get_example_features(example)\n return sorted([feature_name for feature_name in features if (features[feature_name].WhichOneof('kind') == 'bytes_list')])", "docstring": "Returns a list of feature names for byte type features.\n\nArgs:\nexample: An example.\n\nReturns:\nA list of categorical feature names (e.g. ['education', 'marital_status'] )", "source": "codesearchnet"} {"code": "def validate(self, institute, case, user, link, variant, validate_type):\n if (not (validate_type in SANGER_OPTIONS)):\n LOG.warning('Invalid validation string: %s', validate_type)\n LOG.info('Validation options: %s', ', '.join(SANGER_OPTIONS))\n return\n updated_variant = self.variant_collection.find_one_and_update({'_id': variant['_id']}, {'$set': {'validation': validate_type}}, return_document=pymongo.ReturnDocument.AFTER)\n self.create_event(institute=institute, case=case, user=user, link=link, category='variant', verb='validate', variant=variant, subject=variant['display_name'])\n return updated_variant", "docstring": "Mark validation status for a variant.\n\nArguments:\ninstitute (dict): A Institute object\ncase (dict): Case object\nuser (dict): A User object\nlink (str): The url to be used in the event\nvariant (dict): A variant object\nvalidate_type(str): The outcome of validation.\nchoices=('True positive', 'False positive')\n\nReturns:\nupdated_variant(dict)", "source": "codesearchnet"} {"code": "def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):\n \n \n \n \n \n \n if (self._last_header > header_path and\n Match(r'^\\s*\n return False\n return True", "docstring": "Check if a header is in alphabetical order with the previous header.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nheader_path: Canonicalized header to be checked.\n\nReturns:\nReturns true if the header is in alphabetical order.", "source": "juraj-google-style"} {"code": "def __init__(self, coder=coders.FastPrimitivesCoder(), events=None, output_tags=None, endpoint=None):\n super().__init__()\n assert coder is not None\n self.coder = coder\n self.watermarks = {None: timestamp.MIN_TIMESTAMP}\n self.output_tags = set(output_tags) if output_tags else set()\n self._events = [] if events is None else list(events)\n self._endpoint = endpoint\n event_tags = set((e.tag for e in self._events if isinstance(e, (WatermarkEvent, ElementEvent))))\n assert event_tags.issubset(self.output_tags), '{} is not a subset of {}'.format(event_tags, output_tags)\n assert not (self._events and self._endpoint), 'Only either events or an endpoint can be given at once.'", "docstring": "Args:\ncoder: (apache_beam.Coder) the coder to encode/decode elements.\nevents: (List[Event]) a list of instructions for the TestStream to\nexecute. If specified, the events tags must exist in the output_tags.\noutput_tags: (List[str]) Initial set of outputs. If no event references an\noutput tag, no output will be produced for that tag.\nendpoint: (str) a URL locating a TestStreamService.", "source": "github-repos"} {"code": "def save_image(image_url, image_directory, image_name):\n image_type = get_image_type(image_url)\n if (image_type is None):\n raise ImageErrorException(image_url)\n full_image_file_name = os.path.join(image_directory, ((image_name + '.') + image_type))\n if os.path.exists(image_url):\n shutil.copy(image_url, full_image_file_name)\n return image_type\n try:\n with open(full_image_file_name, 'wb') as f:\n user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'\n request_headers = {'User-Agent': user_agent}\n requests_object = requests.get(image_url, headers=request_headers)\n try:\n content = requests_object.content\n f.write(content)\n except AttributeError:\n raise ImageErrorException(image_url)\n except IOError:\n raise ImageErrorException(image_url)\n return image_type", "docstring": "Saves an online image from image_url to image_directory with the name image_name.\nReturns the extension of the image saved, which is determined dynamically.\n\nArgs:\nimage_url (str): The url of the image.\nimage_directory (str): The directory to save the image in.\nimage_name (str): The file name to save the image as.\n\nRaises:\nImageErrorException: Raised if unable to save the image at image_url", "source": "codesearchnet"} {"code": "def GetProcessedTaskByIdentifier(self, task_identifier):\n \n with self._lock:\n task = self._tasks_processing.get(task_identifier, None)\n if not task:\n task = self._tasks_queued.get(task_identifier, None)\n if not task:\n task = self._tasks_abandoned.get(task_identifier, None)\n if not task:\n raise KeyError('Status of task {0:s} is unknown'.format(\n task_identifier))\n\n return task", "docstring": "Retrieves a task that has been processed.\n\nArgs:\ntask_identifier (str): unique identifier of the task.\n\nReturns:\nTask: a task that has been processed.\n\nRaises:\nKeyError: if the task was not processing, queued or abandoned.", "source": "juraj-google-style"} {"code": "def delete(self, key):\n self._cur_batch.delete(key)\n self._num_mutations += 1\n if (self._num_mutations >= MAX_MUTATIONS_IN_BATCH):\n self.commit()\n self.begin()", "docstring": "Adds deletion of the entity with given key to the mutation buffer.\n\nIf mutation buffer reaches its capacity then this method commit all pending\nmutations from the buffer and emties it.\n\nArgs:\nkey: key of the entity which should be deleted", "source": "codesearchnet"} {"code": "def output_stream(self, instruction_id, transform_id):\n raise NotImplementedError(type(self))", "docstring": "Returns an output stream writing elements to transform_id.\n\nArgs:\ninstruction_id: which instruction this stream belongs to\ntransform_id: the transform_id of the returned stream", "source": "github-repos"} {"code": "def listup_sentence(self, data, counter=0):\n delimiter = self.delimiter_list[counter]\n sentence_list = []\n [sentence_list.append((sentence + delimiter)) for sentence in data.split(delimiter) if (sentence != '')]\n if ((counter + 1) < len(self.delimiter_list)):\n sentence_list_r = []\n [sentence_list_r.extend(self.listup_sentence(sentence, (counter + 1))) for sentence in sentence_list]\n sentence_list = sentence_list_r\n return sentence_list", "docstring": "Divide string into sentence list.\n\nArgs:\ndata: string.\ncounter: recursive counter.\n\nReturns:\nList of sentences.", "source": "codesearchnet"} {"code": "def _create_plugin(self, config):\n \n if config is None:\n raise ValueError('No plugin config to create plugin from.')\n\n name = config.pop('name', None)\n if name is None:\n raise(cfg.AitConfigMissing('plugin name'))\n\n \n \n module_name = name.rsplit('.', 1)[0]\n class_name = name.rsplit('.', 1)[-1]\n if class_name in [x.name for x in (self.outbound_streams +\n self.inbound_streams +\n self.servers +\n self.plugins)]:\n raise ValueError(\n 'Plugin \"{}\" already loaded. Only one plugin of a given name is allowed'.\n format(class_name)\n )\n\n plugin_inputs = config.pop('inputs', None)\n if plugin_inputs is None:\n log.warn('No plugin inputs specified for {}'.format(name))\n plugin_inputs = [ ]\n\n subscribers = config.pop('outputs', None)\n if subscribers is None:\n log.warn('No plugin outputs specified for {}'.format(name))\n subscribers = [ ]\n\n \n module = import_module(module_name)\n plugin_class = getattr(module, class_name)\n instance = plugin_class(plugin_inputs,\n subscribers,\n zmq_args={'zmq_context': self.broker.context,\n 'zmq_proxy_xsub_url': self.broker.XSUB_URL,\n 'zmq_proxy_xpub_url': self.broker.XPUB_URL},\n **config\n )\n\n return instance", "docstring": "Creates a plugin from its config.\n\nParams:\nconfig: plugin configuration as read by ait.config\nReturns:\nplugin: a Plugin\nRaises:\nValueError: if any of the required config values are missing", "source": "juraj-google-style"} {"code": "def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS, module_name=None, **args):\n DEFINE_flag(EnumFlag(name, default, help, enum_values, **args), flag_values, module_name)", "docstring": "Registers a flag whose value can be any string from enum_values.\n\nArgs:\nname: A string, the flag name.\ndefault: The default value of the flag.\nenum_values: A list of strings with the possible values for the flag.\nhelp: A help string.\nflag_values: FlagValues object with which the flag will be registered.\nmodule_name: A string, the name of the Python module declaring this flag.\nIf not provided, it will be computed using the stack trace of this call.\n**args: Dictionary with extra keyword args that are passed to the\nFlag __init__.", "source": "codesearchnet"} {"code": "def ParseFileObject(self, parser_mediator, file_object):\n \n pe_data = file_object.read()\n try:\n pefile_object = pefile.PE(data=pe_data, fast_load=True)\n pefile_object.parse_data_directories(\n directories=[\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_RESOURCE'],\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT'],])\n except:\n raise errors.UnableToParseFile()\n\n event_data = PEEventData()\n event_data.imphash = pefile_object.get_imphash()\n event_data.pe_type = self._GetPEType(pefile_object)\n event_data.section_names = self._GetSectionNames(pefile_object)\n\n \n event_data.data_type = 'pe:compilation:compilation_time'\n\n timestamp = getattr(pefile_object.FILE_HEADER, 'TimeDateStamp', None)\n \n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n for dll_name, timestamp in self._GetImportTimestamps(pefile_object):\n if timestamp:\n event_data.dll_name = dll_name\n event_data.data_type = 'pe:import:import_time'\n\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n for dll_name, timestamp in self._GetDelayImportTimestamps(pefile_object):\n if timestamp:\n event_data.dll_name = dll_name\n event_data.data_type = 'pe:delay_import:import_time'\n\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n event_data.dll_name = None\n\n for timestamp in self._GetResourceTimestamps(pefile_object):\n if timestamp:\n event_data.data_type = 'pe:resource:creation_time'\n\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n timestamp = self._GetLoadConfigTimestamp(pefile_object)\n if timestamp:\n event_data.data_type = 'pe:load_config:modification_time'\n\n date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Portable Executable (PE) file-like object.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nfile_object (dfvfs.FileIO): a file-like object.\n\nRaises:\nUnableToParseFile: when the file cannot be parsed.", "source": "juraj-google-style"} {"code": "def get(self, page=0, size=10):\n \n dash_list = r_db.zrevrange(config.DASH_ID_KEY, 0, -1, True)\n id_list = dash_list[page * size : page * size + size]\n dash_meta = []\n data = []\n if id_list:\n dash_meta = r_db.hmget(config.DASH_META_KEY, [i[0] for i in id_list])\n data = [json.loads(i) for i in dash_meta]\n\n return build_response(dict(data=data, code=200))", "docstring": "Get dashboard meta info from in page `page` and page size is `size`.\n\nArgs:\npage: page number.\nsize: size number.\n\nReturns:\nlist of dict containing the dash_id and accordingly meta info.\nmaybe empty list [] when page * size > total dashes in db. that's reasonable.", "source": "juraj-google-style"} {"code": "def name(self, value):\n \n if value == self._defaults['name'] and 'name' in self._values:\n del self._values['name']\n else:\n self._values['name'] = value", "docstring": "The name property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def clean_video_data(_data):\n \n\n data = _data.copy()\n\n \n title = data.get('title')\n if title:\n data['title'] = clean_title(title)\n\n return data", "docstring": "Clean video data:\n-> cleans title\n-> ...\n\nArgs:\n_data (dict): Information about the video.\n\nReturns:\ndict: Refined video data.", "source": "juraj-google-style"} {"code": "def _parse_logging(log_values: dict, service_config: dict):\n for (log_key, log_value) in log_values.items():\n if ('driver' in log_key):\n service_config['log_driver'] = log_value\n if ('options' in log_key):\n service_config['log_driver_options'] = log_value", "docstring": "Parse log key.\n\nArgs:\nlog_values (dict): logging configuration values\nservice_config (dict): Service specification", "source": "codesearchnet"} {"code": "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Cancels a build in progress.\n\nArgs:\nrequest: (CancelBuildRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(Build) The response message.", "source": "github-repos"} {"code": "def __init__(self,\n *packages,\n build_tree=True,\n build_dependencies=True,\n enforce_init=True):\n \n self.finder = Finder()\n self.specs = []\n self.not_found = []\n self.enforce_init = enforce_init\n\n specs = []\n for package in packages:\n spec = self.finder.find(package, enforce_init=enforce_init)\n if spec:\n specs.append(spec)\n else:\n self.not_found.append(package)\n\n if not specs:\n print('** dependenpy: DSM empty.', file=sys.stderr)\n\n self.specs = PackageSpec.combine(specs)\n\n for m in self.not_found:\n print('** dependenpy: Not found: %s.' % m, file=sys.stderr)\n\n super().__init__(build_tree)\n\n if build_tree and build_dependencies:\n self.build_dependencies()", "docstring": "Initialization method.\n\nArgs:\n*packages (args): list of packages to search for.\nbuild_tree (bool): auto-build the tree or not.\nbuild_dependencies (bool): auto-build the dependencies or not.\nenforce_init (bool):\nif True, only treat directories if they contain an\n``__init__.py`` file.", "source": "juraj-google-style"} {"code": "def AssertType(value, expected_type):\n \n if not isinstance(value, expected_type):\n message = \"Expected type `%r`, but got value `%r` of type `%s`\"\n message %= (expected_type, value, type(value))\n raise TypeError(message)", "docstring": "Ensures that given value has certain type.\n\nArgs:\nvalue: A value to assert the type for.\nexpected_type: An expected type for the given value.\n\nRaises:\nTypeError: If given value does not have the expected type.", "source": "juraj-google-style"} {"code": "def get_simple_date(datestring):\n simple_date = re.compile('\\\\d{1,2}(\\\\.)\\\\d{1,2}')\n date = simple_date.search(datestring)\n if date:\n dates = date.group().split('.')\n if (len(dates[0]) == 1):\n dates[0] = add_zero(dates[0])\n if (len(dates[1]) == 1):\n dates[1] = add_zero(dates[1])\n if date_is_valid(dates):\n return ('.'.join(dates) + '.')\n return 'Failed'", "docstring": "Transforms a datestring into shorter date 7.9.2017 > 07.09\n\nExpects the datestring to be format 07.09.2017. If this is not the\ncase, returns string \"Failed\".\n\nKeyword arguments:\ndatestring -- a string\n\nReturns:\nString -- The date in format \"dd.MM.\" or \"Failed\"", "source": "codesearchnet"} {"code": "def _prepare_method(self, pandas_func, **kwargs):\n \n if self._is_transposed:\n\n def helper(df, internal_indices=[]):\n if len(internal_indices) > 0:\n return pandas_func(\n df.T, internal_indices=internal_indices, **kwargs\n )\n return pandas_func(df.T, **kwargs)\n\n else:\n\n def helper(df, internal_indices=[]):\n if len(internal_indices) > 0:\n return pandas_func(df, internal_indices=internal_indices, **kwargs)\n return pandas_func(df, **kwargs)\n\n return helper", "docstring": "Prepares methods given various metadata.\nArgs:\npandas_func: The function to prepare.\n\nReturns\nHelper function which handles potential transpose.", "source": "juraj-google-style"} {"code": "def _parse_by_pattern(self, lines, pattern):\n for line in lines:\n match = pattern.match(line)\n if match:\n params = match.groupdict()\n if (not params):\n params = match.groups()\n (yield self._create_output_from_match(params))", "docstring": "Match pattern line by line and return Results.\n\nUse ``_create_output_from_match`` to convert pattern match groups to\nResult instances.\n\nArgs:\nlines (iterable): Output lines to be parsed.\npattern: Compiled pattern to match against lines.\nresult_fn (function): Receive results of one match and return a\nResult.\n\nReturn:\ngenerator: Result instances.", "source": "codesearchnet"} {"code": "def _required_constraint_name(table: str, field, key):\n return '{table}_{field}_required_{postfix}'.format(table=table, field=field.column, postfix=key)", "docstring": "Gets the name for a CONSTRAINT that applies\nto a single hstore key.\n\nArguments:\ntable:\nThe name of the table the field is\na part of.\n\nfield:\nThe hstore field to create a\nUNIQUE INDEX for.\n\nkey:\nThe name of the hstore key\nto create the name for.\n\nReturns:\nThe name for the UNIQUE index.", "source": "codesearchnet"} {"code": "def _ValidateCacheFileMetadataHeader(self, cache_file_metadata_header):\n return ((cache_file_metadata_header.key_size > 0) and (cache_file_metadata_header.key_size < self._MAXIMUM_URL_LENGTH) and (cache_file_metadata_header.format_version == 1) and (cache_file_metadata_header.last_fetched_time > 0) and (cache_file_metadata_header.fetch_count > 0))", "docstring": "Determines whether the cache file metadata header is valid.\n\nArgs:\ncache_file_metadata_header (firefox_cache2_file_metadata_header): cache\nfile metadata header.\n\nReturns:\nbool: True if the cache file metadata header is valid.", "source": "codesearchnet"} {"code": "def strip_alias(data_type):\n \n while hasattr(data_type, 'data_type'):\n if is_alias(data_type.data_type):\n data_type.data_type = data_type.data_type.data_type\n break\n data_type = data_type.data_type", "docstring": "Strip alias from a data_type chain - this function should be\nused *after* aliases are resolved (see resolve_aliases fn):\n\nLoops through given data type chain (unwraps types), replaces\nfirst alias with underlying type, and then terminates.\n\nNote: Stops on encountering the first alias as it assumes\nintermediate aliases are already removed.\n\nArgs:\ndata_type (DataType): The target DataType chain to strip.\nReturn:\nNone", "source": "juraj-google-style"} {"code": "def times(self, factor):\n if (factor == 0):\n return self.__class__()\n if (factor < 0):\n raise ValueError('The factor must no be negative.')\n result = self.__copy__()\n _elements = result._elements\n for element in _elements:\n _elements[element] *= factor\n result._total *= factor\n return result", "docstring": "Return a new set with each element's multiplicity multiplied with the given scalar factor.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms.times(2))\n['a', 'a', 'a', 'a', 'b', 'b']\n\nYou can also use the ``*`` operator for the same effect:\n\n>>> sorted(ms * 3)\n['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']\n\nFor a variant of the operation which modifies the multiset in place see\n:meth:`times_update`.\n\nArgs:\nfactor: The factor to multiply each multiplicity with.", "source": "codesearchnet"} {"code": "def perturb_function(f, var, k, delta):\n num_elts = tf.size(var)\n old_value = var.read_value()\n perturbation_direction = tf.one_hot(k, num_elts, 1.0, 0.0, None, var.dtype)\n perturbation = tf.reshape(tf.cast(delta, var.dtype) * perturbation_direction, tf.shape(var))\n var.assign(old_value + perturbation)\n f_value = f()\n var.assign(old_value)\n return f_value", "docstring": "Evaluates the function with a specified variable perturbed.\n\nArgs:\nf: Callable taking no arguments and returning a possibly nested structure\nwhose atomic elements are `tf.Tensor`.\nvar: `tf.Variable` to perturb.\nk: Entry of `var` to perturb.\ndelta: Amount to perturb entry `k` of `var`.\n\nReturn:\nf_value: Return of `f()` evaluated while `var` is perturbed.", "source": "github-repos"} {"code": "def noise_get_fbm(\n n: tcod.noise.Noise,\n f: Sequence[float],\n oc: float,\n typ: int = NOISE_DEFAULT,\n) -> float:\n \n return float(\n lib.TCOD_noise_get_fbm_ex(n.noise_c, ffi.new(\"float[4]\", f), oc, typ)\n )", "docstring": "Return the fractal Brownian motion sampled from the ``f`` coordinate.\n\nArgs:\nn (Noise): A Noise instance.\nf (Sequence[float]): The point to sample the noise from.\ntyp (int): The noise algorithm to use.\noctaves (float): The level of level. Should be more than 1.\n\nReturns:\nfloat: The sampled noise value.", "source": "juraj-google-style"} {"code": "def add(self, text, checked=False, sort=None):\n \n if self.parent is None:\n raise exception.InvalidException('Item has no parent')\n node = self.parent.add(text, checked, sort)\n self.indent(node)\n return node", "docstring": "Add a new sub item to the list. This item must already be attached to a list.\n\nArgs:\ntext (str): The text.\nchecked (bool): Whether this item is checked.\nsort (int): Item id for sorting.", "source": "juraj-google-style"} {"code": "def add_gene_ids(self, genes_list):\n \n orig_num_genes = len(self.genes)\n\n for g in list(set(genes_list)):\n if not self.genes.has_id(g):\n new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)\n if self.model:\n self.model.genes.append(new_gene)\n else:\n self.genes.append(new_gene)\n\n log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))", "docstring": "Add gene IDs manually into the GEM-PRO project.\n\nArgs:\ngenes_list (list): List of gene IDs as strings.", "source": "juraj-google-style"} {"code": "async def runCmdLine(self, line):\n \n opts = self.getCmdOpts(line)\n return await self.runCmdOpts(opts)", "docstring": "Run a line of command input for this command.\n\nArgs:\nline (str): Line to execute\n\nExamples:\nRun the foo command with some arguments:\n\nawait foo.runCmdLine('foo --opt baz woot.com')", "source": "juraj-google-style"} {"code": "def getenv(key, value=None):\n \n\n key = path2fsn(key)\n if is_win and PY2:\n return environ.get(key, value)\n return os.getenv(key, value)", "docstring": "Like `os.getenv` but returns unicode under Windows + Python 2\n\nArgs:\nkey (pathlike): The env var to get\nvalue (object): The value to return if the env var does not exist\nReturns:\n`fsnative` or `object`:\nThe env var or the passed value if it doesn't exist", "source": "juraj-google-style"} {"code": "def empty(shape, dtype=None):\n return backend.numpy.empty(shape, dtype=dtype)", "docstring": "Return a tensor of given shape and type filled with uninitialized data.\n\nArgs:\nshape: Shape of the empty tensor.\ndtype: Desired data type of the empty tensor.\n\nReturns:\nThe empty tensor.", "source": "github-repos"} {"code": "def get_jax_iterator(self):\n raise NotImplementedError", "docstring": "Get a Python iterable for the `DataAdapter`, that yields arrays that\nthat can be fed to JAX. NumPy arrays are preferred for performance.\n\nReturns:\nA Python iterator.", "source": "github-repos"} {"code": "def add_file(self, file_obj):\n BalancedDiscStorage._check_interface(file_obj)\n file_hash = self._get_hash(file_obj)\n dir_path = self._create_dir_path(file_hash)\n final_path = os.path.join(dir_path, file_hash)\n\n def copy_to_file(from_file, to_path):\n with open(to_path, 'wb') as out_file:\n for part in self._get_file_iterator(from_file):\n out_file.write(part)\n try:\n copy_to_file(from_file=file_obj, to_path=final_path)\n except Exception:\n os.unlink(final_path)\n raise\n return PathAndHash(path=final_path, hash=file_hash)", "docstring": "Add new file into the storage.\n\nArgs:\nfile_obj (file): Opened file-like object.\n\nReturns:\nobj: Path where the file-like object is stored contained with hash\\\nin :class:`.PathAndHash` object.\n\nRaises:\nAssertionError: If the `file_obj` is not file-like object.\nIOError: If the file couldn't be added to storage.", "source": "codesearchnet"} {"code": "def _add_extension_value_to_message(extension: message.Message, msg: message.Message, message_field: descriptor.FieldDescriptor) -> None:\n if message_field.type != descriptor.FieldDescriptor.TYPE_MESSAGE:\n raise fhir_errors.InvalidFhirError(f'{msg.DESCRIPTOR.full_name} is not a FHIR extension type.')\n extension_field = extension.DESCRIPTOR.fields_by_name['extension']\n if proto_utils.field_content_length(extension, extension_field) > 0:\n raise fhir_errors.InvalidFhirError(f'No child extensions should be set on {extension.DESCRIPTOR.full_name}.')\n value_field = _get_populated_extension_value_field(extension)\n if annotation_utils.is_choice_type_field(message_field):\n choice_message = proto_utils.get_value_at_field(msg, message_field)\n choice_descriptor = choice_message.DESCRIPTOR\n for choice_field in choice_descriptor.fields:\n if value_field.message_type.full_name == choice_field.message_type.full_name:\n _add_extension_value_to_message(extension, choice_message, choice_field)\n return\n raise ValueError(f'No field on Choice Type {choice_descriptor.full_name} for extension {extension.DESCRIPTOR.full_name}.')\n if annotation_utils.has_fhir_valueset_url(message_field.message_type):\n typed_code = proto_utils.set_in_parent_or_add(msg, message_field)\n codes.copy_code(cast(Any, extension).value.code, typed_code)\n return\n if fhir_types.is_type_or_profile_of_coding(message_field.message_type):\n typed_coding = proto_utils.set_in_parent_or_add(msg, message_field)\n codes.copy_coding(cast(Any, extension).value.coding, typed_coding)\n return\n if not proto_utils.are_same_message_type(value_field.message_type, message_field.message_type):\n raise ValueError(f'Missing expected value of type {message_field.message_type.full_name} in extension {extension.DESCRIPTOR.full_name}.')\n value = proto_utils.get_value_at_field(cast(Any, extension).value, value_field)\n if proto_utils.field_is_repeated(message_field):\n proto_utils.append_value_at_field(msg, message_field, value)\n else:\n proto_utils.set_value_at_field(msg, message_field, value)", "docstring": "Serialize the provided extension and add it to the message.\n\nArgs:\nextension: The FHIR extension to serialize.\nmsg: The message to add the serialized extension to.\nmessage_field: The field on the message to set.\n\nRaises:\nInvalidFhirError: In the event that the field to be set is not a singular\nmessage type, or if the provided extension is not singular (has nested\nextensions).", "source": "github-repos"} {"code": "def create_parser():\n parser = argparse.ArgumentParser(prog=pylink.__title__, description=pylink.__description__, epilog=pylink.__copyright__)\n parser.add_argument('--version', action='version', version=('%(prog)s ' + pylink.__version__))\n parser.add_argument('-v', '--verbose', action='count', default=0, help='increase output verbosity')\n kwargs = {}\n kwargs['title'] = 'command'\n kwargs['description'] = 'specify subcommand to run'\n kwargs['help'] = 'subcommands'\n subparsers = parser.add_subparsers(**kwargs)\n for command in commands():\n kwargs = {}\n kwargs['name'] = command.name\n kwargs['description'] = command.description\n kwargs['help'] = command.help\n subparser = subparsers.add_parser(**kwargs)\n subparser.set_defaults(command=command.run)\n command.add_arguments(subparser)\n return parser", "docstring": "Builds the command parser.\n\nThis needs to be exported in order for Sphinx to document it correctly.\n\nReturns:\nAn instance of an ``argparse.ArgumentParser`` that parses all the\ncommands supported by the PyLink CLI.", "source": "codesearchnet"} {"code": "def get_minimal_fba_flux(self, objective):\n \n \n vs_wt = self._v_wt.set(self._model.reactions)\n zs = self._z.set(self._model.reactions)\n\n wt_obj_flux = self.get_fba_obj_flux(objective)\n\n with self.constraints() as constr:\n constr.add(\n zs >= vs_wt, vs_wt >= -zs,\n self._v_wt[objective] >= wt_obj_flux)\n self._prob.set_objective(self._z.sum(self._model.reactions))\n result = self._solve(lp.ObjectiveSense.Minimize)\n\n fba_fluxes = {}\n for key in self._model.reactions:\n fba_fluxes[key] = result.get_value(self._v_wt[key])\n return fba_fluxes", "docstring": "Find the FBA solution that minimizes all the flux values.\n\nMaximize the objective flux then minimize all other fluxes\nwhile keeping the objective flux at the maximum.\n\nArgs:\nobjective: The objective reaction that is maximized.\n\nReturns:\nA dictionary of all the reactions and their minimized fluxes.", "source": "juraj-google-style"} {"code": "def get_rml_processors(es_defs):\n proc_defs = es_defs.get('kds_esRmlProcessor', [])\n if proc_defs:\n new_defs = []\n for proc in proc_defs:\n params = proc['kds_rmlProcessorParams'][0]\n proc_kwargs = {}\n if params.get('kds_rtn_format'):\n proc_kwargs['rtn_format'] = params.get('kds_rtn_format')[0]\n new_def = dict(name=proc['rdfs_label'][0], subj=params['kds_subjectKwarg'][0], proc_kwargs=proc_kwargs, force=proc.get('kds_forceNested', [False])[0], processor=CFG.rml.get_processor(proc['rdfs_label'][0], proc['kds_esRmlMapping'], proc['rdf_type'][0]))\n new_defs.append(new_def)\n es_defs['kds_esRmlProcessor'] = new_defs\n return es_defs", "docstring": "Returns the es_defs with the instaniated rml_processor\n\nArgs:\n-----\nes_defs: the rdf_class elacticsearch defnitions\ncls_name: the name of the tied class", "source": "codesearchnet"} {"code": "def _stdout_list_split(retcode, stdout='', splitstring='\\n'):\n if (retcode == 0):\n ret = stdout.split(splitstring)\n return ret\n else:\n return False", "docstring": "Evaulates Open vSwitch command`s retcode value.\n\nArgs:\nretcode: Value of retcode field from response, should be 0, 1 or 2.\nstdout: Value of stdout filed from response.\nsplitstring: String used to split the stdout default new line.\n\nReturns:\nList or False.", "source": "codesearchnet"} {"code": "def _get_domain(self):\n try:\n return self.libvirt_con.lookupByName(self._libvirt_name())\n except libvirt.libvirtError as e:\n raise vm_plugin.LagoVMDoesNotExistError(str(e))", "docstring": "Return the object representation of this provider VM.\n\nReturns:\nlibvirt.virDomain: Libvirt domain object\n\nRaises:\n:exc:`~lago.plugins.vm.LagoFailedToGetVMStateError:\nIf the VM exist, but the query returned an error.", "source": "codesearchnet"} {"code": "def isostr_to_datetime(dt_str):\n \n if len(dt_str) <= 20:\n return datetime.datetime.strptime(dt_str, \"%Y-%m-%dT%H:%M:%SZ\")\n else:\n dt_str = dt_str.split(\".\")\n return isostr_to_datetime(\"%sZ\" % dt_str[0])", "docstring": "Converts iso formated text string into a datetime object\n\nArgs:\ndt_str (str): ISO formated text string\nReturns:\n:obj:`datetime.datetime`", "source": "juraj-google-style"} {"code": "def issue(self, invoice_id, **kwargs):\n url = '{}/{}/issue'.format(self.base_url, invoice_id)\n return self.post_url(url, {}, **kwargs)", "docstring": "Issues an invoice in draft state\n\nArgs:\ninvoice_id : Id for delete the invoice\nReturns:\nIts response is the invoice entity, similar to create/update API response. Its status now would be issued.", "source": "codesearchnet"} {"code": "def _load_methods(package):\n global _methods\n _methods[package] = None\n from acorn.config import settings\n from acorn.logging.descriptors import _obj_getattr\n spack = settings(package)\n if (spack is not None):\n if spack.has_section('analysis.methods'):\n _methods[package] = {}\n from importlib import import_module\n mappings = dict(spack.items('analysis.methods'))\n for (fqdn, target) in mappings.items():\n rootname = target.split('.')[0]\n root = import_module(rootname)\n caller = _obj_getattr(root, target)\n _methods[package][fqdn] = caller", "docstring": "Loads the mappings from method call result to analysis.\n\nArgs:\npackage (str): name of the package to load for.", "source": "codesearchnet"} {"code": "def sg_reshape(tensor, opt):\n r\n assert opt.shape is not None, 'shape is mandatory.'\n return tf.reshape(tensor, opt.shape, name=opt.name)", "docstring": "r\"\"\"Reshapes a tensor.\n\nSee `tf.reshape()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\nshape: A tuple/list of integers. The destination shape.\nname: If provided, replace current tensor's name.\n\nReturns:\nA `Tensor`.", "source": "juraj-google-style"} {"code": "def __init__(self, config: Mask2FormerConfig, weight_dict: Dict[str, float]):\n super().__init__()\n requires_backends(self, ['scipy'])\n self.num_labels = config.num_labels\n self.weight_dict = weight_dict\n self.eos_coef = config.no_object_weight\n empty_weight = torch.ones(self.num_labels + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n self.num_points = config.train_num_points\n self.oversample_ratio = config.oversample_ratio\n self.importance_sample_ratio = config.importance_sample_ratio\n self.matcher = Mask2FormerHungarianMatcher(cost_class=1.0, cost_dice=config.dice_weight, cost_mask=config.mask_weight, num_points=self.num_points)", "docstring": "The Mask2Former Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we\ncompute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair\nof matched ground-truth / prediction (supervise class and mask)\n\nArgs:\nconfig (`Mask2FormerConfig`):\nThe configuration for Mask2Former model also containing loss calculation specific parameters.\nweight_dict (`Dict[str, float]`):\nA dictionary of weights to be applied to the different losses.", "source": "github-repos"} {"code": "def create_saturated_interstitial_structure(interstitial_def, dist_tol=0.1):\n sga = SpacegroupAnalyzer(interstitial_def.bulk_structure.copy())\n sg_ops = sga.get_symmetry_operations(cartesian=True)\n saturated_defect_struct = interstitial_def.bulk_structure.copy()\n saturated_defect_struct.DISTANCE_TOLERANCE = dist_tol\n for sgo in sg_ops:\n new_interstit_coords = sgo.operate(interstitial_def.site.coords[:])\n poss_new_site = PeriodicSite(interstitial_def.site.specie, new_interstit_coords, saturated_defect_struct.lattice, to_unit_cell=True, coords_are_cartesian=True)\n try:\n saturated_defect_struct.append(poss_new_site.specie, poss_new_site.coords, coords_are_cartesian=True, validate_proximity=True)\n except ValueError:\n pass\n saturated_sga = SpacegroupAnalyzer(saturated_defect_struct)\n if (saturated_sga.get_space_group_number() != sga.get_space_group_number()):\n raise ValueError('Warning! Interstitial sublattice generation has changed space group symmetry. I recommend reducing dist_tol and trying again...')\n return saturated_defect_struct", "docstring": "this takes a Interstitial defect object and generates the\nsublattice for it based on the structure's space group.\nUseful for understanding multiplicity of an interstitial\ndefect in thermodynamic analysis.\n\nNOTE: if large relaxation happens to interstitial or\ndefect involves a complex then there maybe additional\ndegrees of freedom that need to be considered for\nthe multiplicity.\n\nArgs:\ndist_tol: changing distance tolerance of saturated structure,\nallowing for possibly overlapping sites\nbut ensuring space group is maintained\n\nReturns:\nStructure object decorated with interstitial site equivalents", "source": "codesearchnet"} {"code": "def latest_malicious(self, ips):\n \n api_name = 'opendns-latest_malicious'\n fmt_url_path = u'ips/{0}/latest_domains'\n return self._multi_get(api_name, fmt_url_path, ips)", "docstring": "Get the a list of malicious domains related to input ips.\n\nArgs:\nips: an enumerable of strings as ips\nReturns:\nAn enumerable of strings for the malicious domains", "source": "juraj-google-style"} {"code": "def _quadratic_interpolation_step(x1, x2, x3, y1, y2, y3):\n r2 = (x2 - x1) / (y2 - y1)\n r3 = (x3 - x1) / (y3 - y1)\n return -x1 * tf.math.divide_no_nan(x3 * r3 - x2 * r2, r3 * r2 * (x3 - x2))", "docstring": "Returns the step size to use when using quadratic interpolation.\n\nThis function is meant for exclusive use by the `_brent_loop_body` function.\nIt does not guard against divisions by zero, and instead assumes that `y1` is\ndistinct from `y2` and `y3`. The `_brent_loop_body` function guarantees this\nproperty.\n\nArgs:\nx1: `Tensor` of any shape and real dtype containing the first position used\nfor extrapolation.\nx2: `Tensor` of the same shape and dtype as `x1` containing the second\nposition used for extrapolation.\nx3: `Tensor` of the same shape and dtype as `x1` containing the third\nposition used for extrapolation.\ny1: `Tensor` containing the value of the interpolated function at `x1`.\ny2: `Tensor` containing the value of interpolated function at `x2`.\ny3: `Tensor` containing the value of interpolated function at `x3`.\n\nReturns:\nA `Tensor` with the same shape and dtype as `x1`.", "source": "github-repos"} {"code": "def num_work_units_completed(self, name=None):\n if self._reader_ref.dtype == dtypes.resource:\n return gen_io_ops.reader_num_work_units_completed_v2(self._reader_ref, name=name)\n else:\n return gen_io_ops.reader_num_work_units_completed(self._reader_ref, name=name)", "docstring": "Returns the number of work units this reader has finished processing.\n\nArgs:\nname: A name for the operation (optional).\n\nReturns:\nAn int64 Tensor.", "source": "github-repos"} {"code": "def _guess_fmt_from_bytes(inp):\n stripped = inp.strip()\n fmt = None\n ini_section_header_re = re.compile(b'^\\\\[([\\\\w-]+)\\\\]')\n if (len(stripped) == 0):\n fmt = 'yaml'\n elif stripped.startswith(b'<'):\n fmt = 'xml'\n else:\n for l in stripped.splitlines():\n line = l.strip()\n if ((not line.startswith(b'\n break\n if ini_section_header_re.match(line):\n fmt = 'ini'\n else:\n fmt = 'yaml'\n return fmt", "docstring": "Try to guess format of given bytestring.\n\nArgs:\ninp: byte string to guess format of\nReturns:\nguessed format", "source": "codesearchnet"} {"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n tstream = BytearrayStream()\n self.hashing_algorithm.write(tstream, kmip_version=kmip_version)\n self.digest_value.write(tstream, kmip_version=kmip_version)\n self.key_format_type.write(tstream, kmip_version=kmip_version)\n self.length = tstream.length()\n super(Digest, self).write(ostream, kmip_version=kmip_version)\n ostream.write(tstream.buffer)", "docstring": "Write the data encoding the Digest object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "codesearchnet"} {"code": "def is_valid(self, addr, protocol='http', timeout=5):\n start = time.time()\n try:\n r = requests.get(self.test_url[protocol], timeout=timeout, proxies={protocol: ('http:\n except KeyboardInterrupt:\n raise\n except requests.exceptions.Timeout:\n return {'valid': False, 'msg': 'timeout'}\n except:\n return {'valid': False, 'msg': 'exception'}\n else:\n if (r.status_code == 200):\n response_time = (time.time() - start)\n return {'valid': True, 'response_time': response_time}\n else:\n return {'valid': False, 'msg': 'status code: {}'.format(r.status_code)}", "docstring": "Check if a proxy is valid\n\nArgs:\naddr: A string in the form of 'ip:port'\nprotocol: Either 'http' or 'https', different test urls will be used\naccording to protocol.\ntimeout: A integer indicating the timeout of connecting the test url.\n\nReturns:\ndict: If the proxy is valid, returns {'valid': True, 'response_time': xx}\notherwise returns {'valid': False, 'msg': 'xxxxxx'}.", "source": "codesearchnet"} {"code": "def Resolve(self, subject, attribute):\n for (_, value, timestamp) in self.ResolveMulti(subject, [attribute], timestamp=self.NEWEST_TIMESTAMP):\n return (value, timestamp)\n return (None, 0)", "docstring": "Retrieve a value set for a subject's attribute.\n\nThis method is easy to use but always gets the latest version of the\nattribute. It is more flexible and efficient to use the other Resolve\nmethods.\n\nArgs:\nsubject: The subject URN.\nattribute: The attribute.\n\nReturns:\nA (value, timestamp in microseconds) stored in the datastore cell, or\n(None, 0). Value will be the same type as originally stored with Set().\n\nRaises:\nAccessError: if anything goes wrong.", "source": "codesearchnet"} {"code": "def make_tar_stream(build_context, buffer):\n tf = tarfile.TarFile(fileobj=buffer, mode='w')\n for (context_path, fileobj) in build_context.items():\n if (getattr(fileobj, 'localpath', None) is not None):\n tf.add(fileobj.localpath, arcname=context_path)\n else:\n tar_add_bytes(tf, context_path, fileobj.read('rb'))\n tf.close()", "docstring": "Write a tar stream of the build context to the provided buffer\n\nArgs:\nbuild_context (Mapping[str, pyccc.FileReferenceBase]): dict mapping filenames to file references\nbuffer (io.BytesIO): writable binary mode buffer", "source": "codesearchnet"} {"code": "def scopes(self):\n if (not self.__scopes):\n self.__scopes = Scopes(self.__connection)\n return self.__scopes", "docstring": "Gets the Scopes API client.\n\nReturns:\nScopes:", "source": "codesearchnet"} {"code": "def convert_saved_model(saved_model_path, exported_names, show_debug_info=False):\n return pywrap_mlir.experimental_convert_saved_model_to_mlir(saved_model_path, exported_names, show_debug_info)", "docstring": "Converts a SavedModel to MLIR module.\n\nArgs:\nsaved_model_path: Path to SavedModel.\nexported_names: Names to export.\nshow_debug_info: Whether to include locations in the emitted textual form.\n\nReturns:\nA textual representation of the MLIR module corresponding to the\nSavedModel.", "source": "github-repos"} {"code": "def check_missing(self, args):\n \n return [opt.name for opt in self\n if (opt.name not in args) and (opt.default is None)]", "docstring": "Returns the names of all options that are required but were not specified.\n\nAll options that don't have a default value are required in order to run the\nworkflow.\n\nArgs:\nargs (dict): A dictionary of the provided arguments that is checked for\nmissing options.\n\nReturns:\nlist: A list with the names of the options that are missing from the\nprovided arguments.", "source": "juraj-google-style"} {"code": "def parse_package_string(path):\n \n parts = path.split('.')\n\n \n if parts[-1][0].isupper():\n return \".\".join(parts[:-1]), parts[-1]\n\n return path, \"\"", "docstring": "Parse the effect package string.\nCan contain the package python path or path to effect class in an effect package.\n\nExamples::\n\n# Path to effect pacakge\nexamples.cubes\n\n# Path to effect class\nexamples.cubes.Cubes\n\nArgs:\npath: python path to effect package. May also include effect class name.\n\nReturns:\ntuple: (package_path, effect_class)", "source": "juraj-google-style"} {"code": "def mark_bool_flags_as_mutual_exclusive(flag_names, required=False,\n flag_values=_flagvalues.FLAGS):\n \n for flag_name in flag_names:\n if not flag_values[flag_name].boolean:\n raise _exceptions.ValidationError(\n 'Flag --{} is not Boolean, which is required for flags used in '\n 'mark_bool_flags_as_mutual_exclusive.'.format(flag_name))\n\n def validate_boolean_mutual_exclusion(flags_dict):\n flag_count = sum(bool(val) for val in flags_dict.values())\n if flag_count == 1 or (not required and flag_count == 0):\n return True\n raise _exceptions.ValidationError(\n '{} one of ({}) must be True.'.format(\n 'Exactly' if required else 'At most', ', '.join(flag_names)))\n\n register_multi_flags_validator(\n flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values)", "docstring": "Ensures that only one flag among flag_names is True.\n\nArgs:\nflag_names: [str], names of the flags.\nrequired: bool. If true, exactly one flag must be True. Otherwise, at most\none flag can be True, and it is valid for all flags to be False.\nflag_values: flags.FlagValues, optional FlagValues instance where the flags\nare defined.", "source": "juraj-google-style"} {"code": "def get_type(mime=None, ext=None):\n for kind in types:\n if ((kind.extension is ext) or (kind.mime is mime)):\n return kind\n return None", "docstring": "Returns the file type instance searching by\nMIME type or file extension.\n\nArgs:\next: file extension string. E.g: jpg, png, mp4, mp3\nmime: MIME string. E.g: image/jpeg, video/mpeg\n\nReturns:\nThe matched file type instance. Otherwise None.", "source": "codesearchnet"} {"code": "def _ensure_unique_tensor_objects(parameter_positions, args):\n s = set()\n for i, t in enumerate(args):\n if i in parameter_positions:\n tid = ops.tensor_id(t)\n if tid in s:\n args[i] = gen_array_ops.identity(args[i])\n else:\n s.add(tid)\n return args", "docstring": "Make each of the parameter_positions in args a unique tensor_lib.Tensor object.\n\nEnsure that each parameter is treated independently.\nFor example:\n\ndef f(x, y): return x * y\ng = gradients_function(f)\none = tf.constant(1.)\n\ng(one, one) should return [1., 1.]\n(even though the two arguments are the same Tensor object).\n\nArgs:\nparameter_positions: List of indices into args defining the arguments to\ndifferentiate against.\nargs: A list of arguments to the function to be differentiated.\n\nReturns:\nargs, possibly edited in-place.", "source": "github-repos"} {"code": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0) + [1]\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nalready_has_special_tokens (`bool`, *optional*, defaults to `False`):\nWhether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.", "source": "github-repos"} {"code": "def custom_licenses(self):\n \n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n result = self._dll.JLINK_EMU_GetLicenses(buf, self.MAX_BUF_SIZE)\n if result < 0:\n raise errors.JLinkException(result)\n return ctypes.string_at(buf).decode()", "docstring": "Returns a string of the installed licenses the J-Link has.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nString of the contents of the custom licenses the J-Link has.", "source": "juraj-google-style"} {"code": "def _GetISO8601String(self, structure):\n time_zone_offset = structure.time_zone_offset\n try:\n time_zone_offset_hours = int(time_zone_offset[1:3], 10)\n time_zone_offset_minutes = int(time_zone_offset[3:5], 10)\n except (IndexError, TypeError, ValueError) as exception:\n raise ValueError('unable to parse time zone offset with error: {0!s}.'.format(exception))\n try:\n iso8601 = '{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.{6:03d}{7:s}{8:02d}:{9:02d}'.format(structure.year, structure.month, structure.day, structure.hours, structure.minutes, structure.seconds, structure.microseconds, time_zone_offset[0], time_zone_offset_hours, time_zone_offset_minutes)\n except ValueError as exception:\n raise ValueError('unable to format date time string with error: {0!s}.'.format(exception))\n return iso8601", "docstring": "Retrieves an ISO 8601 date time string from the structure.\n\nThe date and time values in Google Drive Sync log files are formatted as:\n\"2018-01-24 18:25:08,454 -0800\".\n\nArgs:\nstructure (pyparsing.ParseResults): structure of tokens derived from a\nline of a text file.\n\nReturns:\nstr: ISO 8601 date time string.\n\nRaises:\nValueError: if the structure cannot be converted into a date time string.", "source": "codesearchnet"} {"code": "def _get_model_field(self, name: str):\n \n\n field_name = self._normalize_field_name(name)\n\n \n \n if field_name == 'pk' and self.query.model._meta.pk:\n return self.query.model._meta.pk\n\n for field in self.query.model._meta.local_concrete_fields:\n if field.name == field_name or field.column == field_name:\n return field\n\n return None", "docstring": "Gets the field on a model with the specified name.\n\nArguments:\nname:\nThe name of the field to look for.\n\nThis can be both the actual field name, or\nthe name of the column, both will work :)\n\nReturns:\nThe field with the specified name or None if\nno such field exists.", "source": "juraj-google-style"} {"code": "def get_test_data(train_samples, test_samples, input_shape, num_classes, random_seed=None):\n np.random.seed(random_seed)\n total_samples = train_samples + test_samples\n samples_per_class = total_samples \n y = np.array([i for i in range(num_classes) for _ in range(samples_per_class)], dtype=np.int32)\n extra_samples = total_samples - len(y)\n y_extra = np.array([i % num_classes for i in range(extra_samples)], dtype=np.int64)\n y = np.concatenate([y, y_extra])\n templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)\n x = np.zeros((total_samples,) + input_shape, dtype=np.float32)\n for i in range(total_samples):\n x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1.0, size=input_shape)\n indices = np.arange(total_samples)\n np.random.shuffle(indices)\n x, y = (x[indices], y[indices])\n x_train, y_train, x_test, y_test = ([], [], [], [])\n for cls in range(num_classes):\n cls_indices = np.where(y == cls)[0]\n np.random.shuffle(cls_indices)\n train_count = int(train_samples / num_classes)\n x_train.extend(x[cls_indices[:train_count]])\n y_train.extend(y[cls_indices[:train_count]])\n x_test.extend(x[cls_indices[train_count:]])\n y_test.extend(y[cls_indices[train_count:]])\n x_train, y_train = (np.array(x_train), np.array(y_train))\n x_test, y_test = (np.array(x_test), np.array(y_test))\n train_indices = np.arange(len(x_train))\n test_indices = np.arange(len(x_test))\n np.random.shuffle(train_indices)\n np.random.shuffle(test_indices)\n x_train, y_train = (x_train[train_indices], y_train[train_indices])\n x_test, y_test = (x_test[test_indices], y_test[test_indices])\n return ((x_train, y_train), (x_test, y_test))", "docstring": "Generates balanced, stratified synthetic test data to train a model on.\n\nArgs:\ntrain_samples: Integer, how many training samples to generate.\ntest_samples: Integer, how many test samples to generate.\ninput_shape: Tuple of integers, shape of the inputs.\nnum_classes: Integer, number of classes for the data and targets.\nrandom_seed: Integer, random seed used by Numpy to generate data.\n\nReturns:\nA tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.", "source": "github-repos"} {"code": "def _geodetic_to_cartesian(cls, lat, lon, alt):\n C = (Earth.r / np.sqrt((1 - ((Earth.e * np.sin(lat)) ** 2))))\n S = ((Earth.r * (1 - (Earth.e ** 2))) / np.sqrt((1 - ((Earth.e * np.sin(lat)) ** 2))))\n r_d = ((C + alt) * np.cos(lat))\n r_k = ((S + alt) * np.sin(lat))\n norm = np.sqrt(((r_d ** 2) + (r_k ** 2)))\n return (norm * np.array([(np.cos(lat) * np.cos(lon)), (np.cos(lat) * np.sin(lon)), np.sin(lat)]))", "docstring": "Conversion from latitude, longitude and altitude coordinates to\ncartesian with respect to an ellipsoid\n\nArgs:\nlat (float): Latitude in radians\nlon (float): Longitude in radians\nalt (float): Altitude to sea level in meters\n\nReturn:\nnumpy.array: 3D element (in meters)", "source": "codesearchnet"} {"code": "def masks_to_boxes(masks: torch.Tensor) -> torch.Tensor:\n if masks.numel() == 0:\n return torch.zeros((0, 4), device=masks.device)\n h, w = masks.shape[-2:]\n y = torch.arange(0, h, dtype=torch.float32, device=masks.device)\n x = torch.arange(0, w, dtype=torch.float32, device=masks.device)\n y, x = torch.meshgrid(y, x, indexing='ij')\n x_mask = masks * torch.unsqueeze(x, 0)\n x_max = x_mask.view(x_mask.shape[0], -1).max(-1)[0]\n x_min = torch.where(masks, x.unsqueeze(0), torch.tensor(100000000.0, device=masks.device)).view(masks.shape[0], -1).min(-1)[0]\n y_mask = masks * torch.unsqueeze(y, 0)\n y_max = y_mask.view(y_mask.shape[0], -1).max(-1)[0]\n y_min = torch.where(masks, y.unsqueeze(0), torch.tensor(100000000.0, device=masks.device)).view(masks.shape[0], -1).min(-1)[0]\n return torch.stack([x_min, y_min, x_max, y_max], 1)", "docstring": "Compute the bounding boxes around the provided panoptic segmentation masks.\n\nArgs:\nmasks: masks in format `[number_masks, height, width]` where N is the number of masks\n\nReturns:\nboxes: bounding boxes in format `[number_masks, 4]` in xyxy format", "source": "github-repos"} {"code": "def most_specific_common_supertype(self, others: Sequence['TraceType']) -> Optional['TraceType']:", "docstring": "Returns the most specific supertype of `self` and `others`, if exists.\n\nThe returned `TraceType` is a supertype of `self` and `others`, that is,\nthey are all subtypes (see `is_subtype_of`) of it.\nIt is also most specific, that is, there it has no subtype that is also\na common supertype of `self` and `others`.\n\nIf `self` and `others` have no common supertype, this returns `None`.\n\nArgs:\nothers: A sequence of TraceTypes.\n\nExample:\n```python\nclass Dimension(TraceType):\ndef __init__(self, value: Optional[int]):\nself.value = value\n\ndef most_specific_common_supertype(self, other):\n# Either the value is the same or other has a generalized value that\n# can represent any specific ones.\nif self.value == other.value:\nreturn self.value\nelse:\nreturn Dimension(None)\n```", "source": "github-repos"} {"code": "def segment_text(text, seg_regex=SEG_REGEX):\n \n for m in seg_regex.finditer(text):\n yield m.group(0)", "docstring": "Return an iterator of segments in the text.\n\nArgs:\ntext (unicode): string of IPA Unicode text\nseg_regex (_regex.Pattern): compiled regex defining a segment (base +\nmodifiers)\n\nReturn:\ngenerator: segments in the input text", "source": "juraj-google-style"} {"code": "def __init__(self,\n tb_context,\n max_domain_discrete_len=10):\n \n self._tb_context = tb_context\n self._experiment_from_tag = None\n self._experiment_from_tag_lock = threading.Lock()\n self._max_domain_discrete_len = max_domain_discrete_len", "docstring": "Instantiates a context.\n\nArgs:\ntb_context: base_plugin.TBContext. The \"base\" context we extend.\nmax_domain_discrete_len: int. Only used when computing the experiment\nfrom the session runs. The maximum number of disticnt values a string\nhyperparameter can have for us to populate its 'domain_discrete' field.\nTypically, only tests should specify a value for this parameter.", "source": "juraj-google-style"} {"code": "def Match(self, file_entry):\n if (not file_entry):\n return False\n filename = file_entry.name.lower()\n return (filename == self._filename)", "docstring": "Determines if a file entry matches the filter.\n\nArgs:\nfile_entry (dfvfs.FileEntry): a file entry.\n\nReturns:\nbool: True if the file entry matches the filter.", "source": "codesearchnet"} {"code": "def union(self, other):\n if (not isinstance(other, self.__class__)):\n m = 'You can only union striplogs with each other.'\n raise StriplogError(m)\n result = []\n for iv in deepcopy(self):\n for jv in other:\n if iv.any_overlaps(jv):\n iv = iv.union(jv)\n result.append(iv)\n return Striplog(result)", "docstring": "Makes a striplog of all unions.\n\nArgs:\nStriplog. The striplog instance to union with.\n\nReturns:\nStriplog. The result of the union.", "source": "codesearchnet"} {"code": "def from_tool_cancellation(cls, *, function_call_id: str, **kwargs) -> 'ProcessorPart':\n part = genai_types.Part.from_function_response(name='tool_cancellation', response={'function_call_id': function_call_id})\n if 'role' in kwargs and kwargs['role'].upper() != 'MODEL':\n logging.warning('Role {kwargs[\"role\"]} is not supported for tool cancellation. Overriding it with the model role.')\n extra_args = kwargs\n extra_args['role'] = 'MODEL'\n return cls(part, **extra_args)", "docstring": "Constructs a ProcessorPart from a tool cancellation id.\n\nThe role is overridden to MODEL.\n\nArgs:\nfunction_call_id: The id of the function call to be cancelled.\n**kwargs: Additional arguments for the ProcessorPart constructor.\n\nReturns:\nA ProcessorPart of type tool cancellation.", "source": "github-repos"} {"code": "def get_sites_in_sphere(self, pt, r):\n \n neighbors = []\n for site in self._sites:\n dist = site.distance_from_point(pt)\n if dist <= r:\n neighbors.append((site, dist))\n return neighbors", "docstring": "Find all sites within a sphere from a point.\n\nArgs:\npt (3x1 array): Cartesian coordinates of center of sphere.\nr (float): Radius of sphere.\n\nReturns:\n[(site, dist) ...] since most of the time, subsequent processing\nrequires the distance.", "source": "juraj-google-style"} {"code": "def checkedThread(self, target: Callable[..., Any], args: Optional[tuple[Any, ...]]=None, kwargs: Optional[dict[str, Any]]=None) -> _CheckedThread:\n ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)\n self._threads.append(ret)\n return ret", "docstring": "Returns a Thread wrapper that asserts 'target' completes successfully.\n\nThis method should be used to create all threads in test cases, as\notherwise there is a risk that a thread will silently fail, and/or\nassertions made in the thread will not be respected.\n\nArgs:\ntarget: A callable object to be executed in the thread.\nargs: The argument tuple for the target invocation. Defaults to ().\nkwargs: A dictionary of keyword arguments for the target invocation.\nDefaults to {}.\n\nReturns:\nA wrapper for threading.Thread that supports start() and join() methods.", "source": "github-repos"} {"code": "def dispatch_for_types(op, *types):\n\n def decorator(func):\n _TypeBasedDispatcher(get_compatible_func(op, func), types).register(op)\n return func\n return decorator", "docstring": "Decorator to declare that a Python function overrides an op for a type.\n\nThe decorated function is used to override `op` if any of the arguments or\nkeyword arguments (including elements of lists or tuples) have one of the\nspecified types.\n\nExample:\n\n```python\n@dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue)\ndef ragged_add(x, y, name=None): ...\n```\n\nArgs:\nop: Python function: the operation that should be overridden.\n*types: The argument types for which this function should be used.", "source": "github-repos"} {"code": "def __init__(self, values): \n \n if isinstance(values, list):\n self.values = [to_str(v) for v in values]\n else:\n self.values = [to_str(values)]", "docstring": "Initialize a ``CategoricalParameter``.\n\nArgs:\nvalues (list or object): The possible values for the hyperparameter. This input will\nbe converted into a list of strings.", "source": "juraj-google-style"} {"code": "def logs(self, **kwargs):\n return self.client.api.logs(self.id, **kwargs)", "docstring": "Get logs from this container. Similar to the ``docker logs`` command.\n\nThe ``stream`` parameter makes the ``logs`` function return a blocking\ngenerator you can iterate over to retrieve log output as it happens.\n\nArgs:\nstdout (bool): Get ``STDOUT``. Default ``True``\nstderr (bool): Get ``STDERR``. Default ``True``\nstream (bool): Stream the response. Default ``False``\ntimestamps (bool): Show timestamps. Default ``False``\ntail (str or int): Output specified number of lines at the end of\nlogs. Either an integer of number of lines or the string\n``all``. Default ``all``\nsince (datetime or int): Show logs since a given datetime or\ninteger epoch (in seconds)\nfollow (bool): Follow log output. Default ``False``\nuntil (datetime or int): Show logs that occurred before the given\ndatetime or integer epoch (in seconds)\n\nReturns:\n(generator or str): Logs from the container.\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "codesearchnet"} {"code": "def VerifyScripts(verifiable):\n \n try:\n hashes = verifiable.GetScriptHashesForVerifying()\n except Exception as e:\n logger.debug(\"couldn't get script hashes %s \" % e)\n return False\n\n if len(hashes) != len(verifiable.Scripts):\n logger.debug(f\"hash - verification script length mismatch ({len(hashes)}/{len(verifiable.Scripts)})\")\n return False\n\n blockchain = GetBlockchain()\n\n for i in range(0, len(hashes)):\n verification = verifiable.Scripts[i].VerificationScript\n\n if len(verification) == 0:\n sb = ScriptBuilder()\n sb.EmitAppCall(hashes[i].Data)\n verification = sb.ms.getvalue()\n else:\n verification_hash = Crypto.ToScriptHash(verification, unhex=False)\n if hashes[i] != verification_hash:\n logger.debug(f\"hash {hashes[i]} does not match verification hash {verification_hash}\")\n return False\n\n state_reader = GetStateReader()\n script_table = CachedScriptTable(DBCollection(blockchain._db, DBPrefix.ST_Contract, ContractState))\n\n engine = ApplicationEngine(TriggerType.Verification, verifiable, script_table, state_reader, Fixed8.Zero())\n engine.LoadScript(verification)\n invocation = verifiable.Scripts[i].InvocationScript\n engine.LoadScript(invocation)\n\n try:\n success = engine.Execute()\n state_reader.ExecutionCompleted(engine, success)\n except Exception as e:\n state_reader.ExecutionCompleted(engine, False, e)\n\n if engine.ResultStack.Count != 1 or not engine.ResultStack.Pop().GetBoolean():\n Helper.EmitServiceEvents(state_reader)\n if engine.ResultStack.Count > 0:\n logger.debug(f\"Result stack failure! Count: {engine.ResultStack.Count} bool value: {engine.ResultStack.Pop().GetBoolean()}\")\n else:\n logger.debug(f\"Result stack failure! Count: {engine.ResultStack.Count}\")\n return False\n\n Helper.EmitServiceEvents(state_reader)\n\n return True", "docstring": "Verify the scripts of the provided `verifiable` object.\n\nArgs:\nverifiable (neo.IO.Mixins.VerifiableMixin):\n\nReturns:\nbool: True if verification is successful. False otherwise.", "source": "juraj-google-style"} {"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n \n super(LocateResponsePayload, self).read(\n input_buffer,\n kmip_version=kmip_version\n )\n local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n\n if self.is_tag_next(enums.Tags.LOCATED_ITEMS, local_buffer):\n self._located_items = primitives.Integer(\n tag=enums.Tags.LOCATED_ITEMS\n )\n self._located_items.read(\n local_buffer,\n kmip_version=kmip_version\n )\n\n self._unique_identifiers = []\n while self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n unique_identifier = primitives.TextString(\n tag=enums.Tags.UNIQUE_IDENTIFIER\n )\n unique_identifier.read(local_buffer, kmip_version=kmip_version)\n self._unique_identifiers.append(unique_identifier)\n\n self.is_oversized(local_buffer)", "docstring": "Read the data encoding the Locate response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data buffer containing encoded object\ndata, supporting a read method.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"} {"code": "def verify_controller_module(module):\n required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')\n for attr in required_attributes:\n if (not hasattr(module, attr)):\n raise signals.ControllerError(('Module %s missing required controller module attribute %s.' % (module.__name__, attr)))\n if (not getattr(module, attr)):\n raise signals.ControllerError(('Controller interface %s in %s cannot be null.' % (attr, module.__name__)))", "docstring": "Verifies a module object follows the required interface for\ncontrollers.\n\nThe interface is explained in the docstring of\n`base_test.BaseTestClass.register_controller`.\n\nArgs:\nmodule: An object that is a controller module. This is usually\nimported with import statements or loaded by importlib.\n\nRaises:\nControllerError: if the module does not match the Mobly controller\ninterface, or one of the required members is null.", "source": "codesearchnet"} {"code": "def __init__(self, cur_fst, cur_node):\n \n self.cur_node = cur_node\n self.cur_fst = cur_fst", "docstring": "Initialization function\nArgs:\nsid (int): The state identifier\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def has_intersection(self, other):\n return bool(lib.SDL_HasIntersection(self._ptr, other._ptr))", "docstring": "Return whether this rectangle intersects with another rectangle.\n\nArgs:\nother (Rect): The rectangle to test intersection with.\n\nReturns:\nbool: True if there is an intersection, False otherwise.", "source": "codesearchnet"} {"code": "def _calculateCrcString(inputstring):\n _checkString(inputstring, description='input CRC string')\n register = 65535\n for char in inputstring:\n register = ((register >> 8) ^ _CRC16TABLE[((register ^ ord(char)) & 255)])\n return _numToTwoByteString(register, LsbFirst=True)", "docstring": "Calculate CRC-16 for Modbus.\n\nArgs:\ninputstring (str): An arbitrary-length message (without the CRC).\n\nReturns:\nA two-byte CRC string, where the least significant byte is first.", "source": "codesearchnet"} {"code": "def __init__(self, persistent=False, watch_accessed_variables=True):\n self._tape = None\n self._persistent = persistent\n self._watch_accessed_variables = watch_accessed_variables\n self._watched_variables = ()\n self._recording = False", "docstring": "Creates a new GradientTape.\n\nArgs:\npersistent: Boolean controlling whether a persistent gradient tape\nis created. False by default, which means at most one call can\nbe made to the gradient() method on this object.\nwatch_accessed_variables: Boolean controlling whether the tape will\nautomatically `watch` any (trainable) variables accessed while the tape\nis active. Defaults to True meaning gradients can be requested from any\nresult computed in the tape derived from reading a trainable `Variable`.\nIf False users must explicitly `watch` any `Variable`s they want to\nrequest gradients from.", "source": "github-repos"} {"code": "def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]], Mapping[str, List[Bond]], Mapping[str, List[BondAngle]]]:\n stereo_chemical_props = resources.read_text('openfold.resources', 'stereo_chemical_props.txt')\n lines_iter = iter(stereo_chemical_props.splitlines())\n residue_bonds: Dict[str, List[Bond]] = {}\n next(lines_iter)\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, bond_length, stddev = line.split()\n atom1, atom2 = bond.split('-')\n if resname not in residue_bonds:\n residue_bonds[resname] = []\n residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev)))\n residue_bonds['UNK'] = []\n residue_bond_angles: Dict[str, List[BondAngle]] = {}\n next(lines_iter)\n next(lines_iter)\n for line in lines_iter:\n if line.strip() == '-':\n break\n bond, resname, angle_degree, stddev_degree = line.split()\n atom1, atom2, atom3 = bond.split('-')\n if resname not in residue_bond_angles:\n residue_bond_angles[resname] = []\n residue_bond_angles[resname].append(BondAngle(atom1, atom2, atom3, float(angle_degree) / 180.0 * np.pi, float(stddev_degree) / 180.0 * np.pi))\n residue_bond_angles['UNK'] = []\n\n def make_bond_key(atom1_name: str, atom2_name: str) -> str:\n \n return '-'.join(sorted([atom1_name, atom2_name]))\n residue_virtual_bonds: Dict[str, List[Bond]] = {}\n for resname, bond_angles in residue_bond_angles.items():\n bond_cache: Dict[str, Bond] = {}\n for b in residue_bonds[resname]:\n bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b\n residue_virtual_bonds[resname] = []\n for ba in bond_angles:\n bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]\n bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]\n gamma = ba.angle_rad\n length = np.sqrt(bond1.length ** 2 + bond2.length ** 2 - 2 * bond1.length * bond2.length * np.cos(gamma))\n dl_outer = 0.5 / length\n dl_dgamma = 2 * bond1.length * bond2.length * np.sin(gamma) * dl_outer\n dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer\n dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer\n stddev = np.sqrt((dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2)\n residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev))\n return (residue_bonds, residue_virtual_bonds, residue_bond_angles)", "docstring": "Load stereo_chemical_props.txt into a nice structure.\n\nLoad literature values for bond lengths and bond angles and translate bond angles into the length of the opposite\nedge of the triangle (\"residue_virtual_bonds\").\n\nReturns:\nresidue_bonds: dict that maps resname --> list of Bond tuples residue_virtual_bonds: dict that maps resname -->\nlist of Bond tuples residue_bond_angles: dict that maps resname --> list of BondAngle tuples", "source": "github-repos"} {"code": "def StartClients(cls, hunt_id, client_ids, token=None):\n \n token = token or access_control.ACLToken(username=\"Hunt\", reason=\"hunting\")\n\n with queue_manager.QueueManager(token=token) as flow_manager:\n for client_id in client_ids:\n \n \n \n state = rdf_flow_runner.RequestState(\n id=random.UInt32(),\n session_id=hunt_id,\n client_id=client_id,\n next_state=\"AddClient\")\n\n \n flow_manager.QueueRequest(state)\n\n \n msg = rdf_flows.GrrMessage(\n session_id=hunt_id,\n request_id=state.id,\n response_id=1,\n auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,\n type=rdf_flows.GrrMessage.Type.STATUS,\n payload=rdf_flows.GrrStatus())\n\n flow_manager.QueueResponse(msg)\n\n \n flow_manager.QueueNotification(session_id=hunt_id)", "docstring": "This method is called by the foreman for each client it discovers.\n\nNote that this function is performance sensitive since it is called by the\nforeman for every client which needs to be scheduled.\n\nArgs:\nhunt_id: The hunt to schedule.\nclient_ids: List of clients that should be added to the hunt.\ntoken: An optional access token to use.", "source": "juraj-google-style"} {"code": "def data_period_end_day(self, value=None):\n \n if value is not None:\n try:\n value = str(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type str '\n 'for field `data_period_end_day`'.format(value))\n if ',' in value:\n raise ValueError('value should not contain a comma '\n 'for field `data_period_end_day`')\n\n self._data_period_end_day = value", "docstring": "Corresponds to IDD Field `data_period_end_day`\n\nArgs:\nvalue (str): value for IDD Field `data_period_end_day`\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def _get_parser_call_method(func):\n \n func_name = func.__name__\n parser = func.parser\n\n def inner_call(instance=None, args=None):\n \n _LOG.debug(\"Calling %s.parser.call\", func_name)\n \n \n if func_name == \"__init__\":\n raise ParseThisError((\"To use 'create_parser' on the\"\n \"'__init__' you need to decorate the \"\n \"class with '@parse_class'\"))\n namespace = parser.parse_args(_get_args_to_parse(args, sys.argv))\n if instance is None:\n \n \n args_name = _get_args_name_from_parser(parser)\n return _call(func, args_name, namespace)\n return _call_method_from_namespace(instance, func_name, namespace)\n\n return inner_call", "docstring": "Returns the method that is linked to the 'call' method of the parser\n\nArgs:\nfunc: the decorated function\n\nRaises:\nParseThisError if the decorated method is __init__, __init__ can\nonly be decorated in a class decorated by parse_class", "source": "juraj-google-style"} {"code": "def capabilities(self):\n caps = []\n for cap in DeviceCapability:\n if self._libinput.libinput_device_has_capability(self._handle, cap):\n caps.append(cap)\n return tuple(caps)", "docstring": "A tuple of capabilities this device supports.\n\nReturns:\n(~libinput.constant.DeviceCapability): Device capabilities.", "source": "codesearchnet"} {"code": "def control_dependencies(self, control_inputs):\n if control_inputs is None:\n return super().control_dependencies(control_inputs)\n filtered_control_inputs = []\n for c in control_inputs:\n if isinstance(c, indexed_slices.IndexedSlices) or (hasattr(c, '_handle') and hasattr(c, 'op')):\n c = c.op\n graph_element = ops._as_graph_element(c)\n if graph_element is None:\n graph_element = c\n if graph_element is not None and getattr(graph_element, 'graph', None) is not self:\n self._function_captures.control.add(graph_element)\n else:\n filtered_control_inputs.append(graph_element)\n return super().control_dependencies(filtered_control_inputs)", "docstring": "Handles control dependencies.\n\nFuncGraph wraps Graph's control_dependencies logic by first filtering out\nany external tensors / operations and storing them in the graph's\ncontrol_captures member. Any consumers of this function graph must then\ndecide how to handle the control captures.\n\nArgs:\ncontrol_inputs: A list of `Operation` or `Tensor` objects which must be\nexecuted or computed before running the operations defined in the\ncontext. Can also be `None` to clear the control dependencies.\n\nReturns:\nA context manager that specifies control dependencies for all\noperations constructed within the context.\n\nRaises:\nTypeError: If `control_inputs` is not a list of `Operation` or\n`Tensor` objects.", "source": "github-repos"} {"code": "def reverse(self, transfer_id, data={}, **kwargs):\n url = '{}/{}/reversals'.format(self.base_url, transfer_id)\n return self.post_url(url, data, **kwargs)", "docstring": "Reverse Transfer from given id\n\nArgs:\ntransfer_id : Id for which transfer object has to be reversed\n\nReturns:\nTransfer Dict which was reversed", "source": "codesearchnet"} {"code": "def stations_listeners(stations):\n stations = (stations if isinstance(stations, (list, tuple)) else [stations])\n listeners = []\n for sta in stations:\n listeners.append(StationSignalListener(sta))\n listeners.append(StationMaxListener(sta))\n if (sta.mask is not None):\n listeners.append(StationMaskListener(sta))\n return listeners", "docstring": "Function for creating listeners for a a list of station\n\nArgs:\nstations (iterable): List of TopocentricFrame\nReturn:\nlist of Listener", "source": "codesearchnet"} {"code": "def url_to_dir_parts(url, include_protocol=False, include_hostname=False, alt_char=False):\n assert isinstance(url, str), 'Expect str. Got {}.'.format(type(url))\n url_split_result = urllib.parse.urlsplit(url)\n parts = []\n if include_protocol:\n parts.append(url_split_result.scheme)\n if include_hostname:\n hostname = url_split_result.hostname\n if url_split_result.port:\n if alt_char:\n port_delim = '+'\n else:\n port_delim = ':'\n hostname = '{0}{1}{2}'.format(hostname, port_delim, url_split_result.port)\n parts.append(hostname)\n for path_part in url_split_result.path.split('/'):\n if path_part:\n parts.append(path_part)\n if ((not url.endswith('/')) and parts):\n parts.pop()\n return parts", "docstring": "Return a list of directory parts from a URL.\n\nArgs:\nurl (str): The URL.\ninclude_protocol (bool): If True, the scheme from the URL will be\nincluded.\ninclude_hostname (bool): If True, the hostname from the URL will be\nincluded.\nalt_char (bool): If True, the character for the port deliminator\nwill be ``+`` intead of ``:``.\n\nThis function does not include the filename and the paths are not\nsanitized.\n\nReturns:\nlist", "source": "codesearchnet"} {"code": "def _try_to_clean_garbage(self, writer_spec, exclude_list=()):\n \n \n \n tmpl = string.Template(self._TMPFILE_PREFIX)\n prefix = tmpl.substitute(\n id=self.status.mapreduce_id, shard=self.status.shard)\n bucket = self._get_tmp_gcs_bucket(writer_spec)\n account_id = self._get_tmp_account_id(writer_spec)\n for f in cloudstorage.listbucket(\"/%s/%s\" % (bucket, prefix),\n _account_id=account_id):\n if f.filename not in exclude_list:\n self._remove_tmpfile(f.filename, self.status.writer_spec)", "docstring": "Tries to remove any files created by this shard that aren't needed.\n\nArgs:\nwriter_spec: writer_spec for the MR.\nexclude_list: A list of filenames (strings) that should not be\nremoved.", "source": "juraj-google-style"} {"code": "def _as_arg_names(names_str):\n names = re.split(',| ', names_str)\n names = [name.strip() for name in names if name.strip()]\n for name in names:\n if not _is_arg_name(name):\n return None\n if not names:\n return None\n return names", "docstring": "Converts names_str to a list of arg names.\n\nExample:\n_as_arg_names(\"a, b, c\") == [\"a\", \"b\", \"c\"]\n\nArgs:\nnames_str: A string with multiple space or comma separated arg names.\nReturns:\nA list of arg names, or None if names_str doesn't look like a list of arg\nnames.", "source": "github-repos"} {"code": "def query(self, minhash, size):\n for (i, index) in enumerate(self.indexes):\n u = self.uppers[i]\n if (u is None):\n continue\n (b, r) = self._get_optimal_param(u, size)\n for key in index[r]._query_b(minhash, b):\n (yield key)", "docstring": "Giving the MinHash and size of the query set, retrieve\nkeys that references sets with containment with respect to\nthe query set greater than the threshold.\n\nArgs:\nminhash (datasketch.MinHash): The MinHash of the query set.\nsize (int): The size (number of unique items) of the query set.\n\nReturns:\n`iterator` of keys.", "source": "codesearchnet"} {"code": "def get_collection(key, scope=None) -> list[Any]:\n return get_default_graph().get_collection(key, scope)", "docstring": "Wrapper for `Graph.get_collection()` using the default graph.\n\nSee `tf.Graph.get_collection`\nfor more details.\n\nArgs:\nkey: The key for the collection. For example, the `GraphKeys` class contains\nmany standard names for collections.\nscope: (Optional.) If supplied, the resulting list is filtered to include\nonly items whose `name` attribute matches using `re.match`. Items without\na `name` attribute are never returned if a scope is supplied and the\nchoice or `re.match` means that a `scope` without special tokens filters\nby prefix.\n\nReturns:\nThe list of values in the collection with the given `name`, or\nan empty list if no value has been added to that collection. The\nlist contains the values in the order under which they were\ncollected.\n\n@compatibility(eager)\nCollections are not supported when eager execution is enabled.\n@end_compatibility", "source": "github-repos"} {"code": "def RebuildHttpConnections(http):\n if getattr(http, 'connections', None):\n for conn_key in list(http.connections.keys()):\n if (':' in conn_key):\n del http.connections[conn_key]", "docstring": "Rebuilds all http connections in the httplib2.Http instance.\n\nhttplib2 overloads the map in http.connections to contain two different\ntypes of values:\n{ scheme string: connection class } and\n{ scheme + authority string : actual http connection }\nHere we remove all of the entries for actual connections so that on the\nnext request httplib2 will rebuild them from the connection types.\n\nArgs:\nhttp: An httplib2.Http instance.", "source": "codesearchnet"} {"code": "def dump_json(json_info, json_file, overwrite=True):\n if overwrite:\n mode = 'w'\n else:\n mode = 'w+'\n try:\n with open(json_file, mode) as f:\n f.write(json.dumps(json_info))\n except BaseException as e:\n logging.error(e.message)", "docstring": "Dump a whole json record into the given file.\n\nOverwrite the file if the overwrite flag set.\n\nArgs:\njson_info (dict): Information dict to be dumped.\njson_file (str): File path to be dumped to.\noverwrite(boolean)", "source": "codesearchnet"} {"code": "def simplify_replacements(replacements):\n if len(replacements) <= 1:\n return replacements\n replacements.sort(key=lambda x: len(x[0]))\n idx = 0\n while idx < len(replacements):\n old, new = replacements[idx]\n j = idx + 1\n while j < len(replacements):\n old_2, new_2 = replacements[j]\n if old_2.replace(old, new) == new_2:\n replacements.pop(j)\n else:\n j += 1\n idx += 1\n return replacements", "docstring": "Simplify a list of replacement patterns to make sure there are no needless ones.\n\nFor instance in the sequence \"Bert->BertNew, BertConfig->BertNewConfig, bert->bert_new\", the replacement\n\"BertConfig->BertNewConfig\" is implied by \"Bert->BertNew\" so not needed.\n\nArgs:\nreplacements (`List[Tuple[str, str]]`): List of patterns (old, new)\n\nReturns:\n`List[Tuple[str, str]]`: The list of patterns simplified.", "source": "github-repos"} {"code": "def on_execution(self, execution_index, execution):", "docstring": "Monitor method for top-level execution events.\n\nReturn values (if any) are ignored by the associated DebugDataReader.\n\nArgs:\nexecution_index: The index of the top-level execution event, as an int.\nexecution: An Execution data object, for a top-level op or function\nexecution event.", "source": "github-repos"} {"code": "def contains(self, key):\n \n try:\n self._api.objects_get(self._bucket, key)\n except datalab.utils.RequestException as e:\n if e.status == 404:\n return False\n raise e\n except Exception as e:\n raise e\n return True", "docstring": "Checks if the specified item exists.\n\nArgs:\nkey: the key of the item to lookup.\nReturns:\nTrue if the item exists; False otherwise.\nRaises:\nException if there was an error requesting information about the item.", "source": "juraj-google-style"} {"code": "def save_output_in_cache(name, filename, output):\n cache_filename = _get_cache_filename(name, filename)\n with _open_for_write(cache_filename) as f:\n f.write(output)", "docstring": "Saves output in the cache location.\n\nArgs:\nname: string: name of the linter.\nfilename: string: path of the filename for which we are saving the output.\noutput: string: full output (not yet filetered) of the lint command.", "source": "codesearchnet"} {"code": "def write_chunks(self, data, start, step, count) -> None:\n \n\n self.mglo.write_chunks(data, start, step, count)", "docstring": "Split data to count equal parts.\n\nWrite the chunks using offsets calculated from start, step and stop.\n\nArgs:\ndata (bytes): The data.\nstart (int): First offset.\nstep (int): Offset increment.\ncount (int): The number of offsets.", "source": "juraj-google-style"} {"code": "def AddWarning(self, warning):\n self._RaiseIfNotWritable()\n warning = self._PrepareAttributeContainer(warning)\n self._warnings.append(warning)\n self.number_of_warnings += 1", "docstring": "Adds a warnings.\n\nArgs:\nwarning (ExtractionWarning): warning.\n\nRaises:\nIOError: when the storage writer is closed.\nOSError: when the storage writer is closed.", "source": "codesearchnet"} {"code": "def clear_caches(self, hard=False):\n from rez.package_repository import package_repository_manager\n from rez.utils.memcached import memcached_client\n package_repository_manager.clear_caches()\n if hard:\n with memcached_client() as client:\n client.flush()", "docstring": "Clear all caches in Rez.\n\nRez caches package contents and iteration during a python session. Thus\nnewly released packages, and changes to existing packages, may not be\npicked up. You need to clear the cache for these changes to become\nvisible.\n\nArgs:\nhard (bool): Perform a 'hard' cache clear. This just means that the\nmemcached cache is also cleared. Generally this is not needed -\nthis option is for debugging purposes.", "source": "codesearchnet"} {"code": "def random_set_distribution(rnd: Optional[tcod.random.Random], dist: int) -> None:\n lib.TCOD_random_set_distribution((rnd.random_c if rnd else ffi.NULL), dist)", "docstring": "Change the distribution mode of a random number generator.\n\nArgs:\nrnd (Optional[Random]): A Random instance, or None to use the default.\ndist (int): The distribution mode to use. Should be DISTRIBUTION_*.", "source": "codesearchnet"} {"code": "def package_releases(self, project_name):\n try:\n return self._connection.package_releases(project_name)\n except Exception as err:\n raise PyPIClientError(err)", "docstring": "Retrieve the versions from PyPI by ``project_name``.\n\nArgs:\nproject_name (str): The name of the project we wish to retrieve\nthe versions of.\n\nReturns:\nlist: Of string versions.", "source": "codesearchnet"} {"code": "def exists(self, filename):\n \n result = True\n for repo in self._children:\n if not repo.exists(filename):\n result = False\n return result", "docstring": "Report whether a file exists on all distribution points.\n\nDetermines file type by extension.\n\nArgs:\nfilename: Filename you wish to check. (No path! e.g.:\n\"AdobeFlashPlayer-14.0.0.176.pkg\")\n\nReturns:\nBoolean", "source": "juraj-google-style"} {"code": "def FindStartOfExpressionInLine(line, endpos, stack):\n i = endpos\n while (i >= 0):\n char = line[i]\n if (char in ')]}'):\n stack.append(char)\n elif (char == '>'):\n if ((i > 0) and ((line[(i - 1)] == '-') or Match('\\\\s>=\\\\s', line[(i - 1):]) or Search('\\\\boperator\\\\s*$', line[0:i]))):\n i -= 1\n else:\n stack.append('>')\n elif (char == '<'):\n if ((i > 0) and (line[(i - 1)] == '<')):\n i -= 1\n elif (stack and (stack[(- 1)] == '>')):\n stack.pop()\n if (not stack):\n return (i, None)\n elif (char in '([{'):\n while (stack and (stack[(- 1)] == '>')):\n stack.pop()\n if (not stack):\n return ((- 1), None)\n if (((char == '(') and (stack[(- 1)] == ')')) or ((char == '[') and (stack[(- 1)] == ']')) or ((char == '{') and (stack[(- 1)] == '}'))):\n stack.pop()\n if (not stack):\n return (i, None)\n else:\n return ((- 1), None)\n elif (char == ';'):\n while (stack and (stack[(- 1)] == '>')):\n stack.pop()\n if (not stack):\n return ((- 1), None)\n i -= 1\n return ((- 1), stack)", "docstring": "Find position at the matching start of current expression.\n\nThis is almost the reverse of FindEndOfExpressionInLine, but note\nthat the input position and returned position differs by 1.\n\nArgs:\nline: a CleansedLines line.\nendpos: start searching at this position.\nstack: nesting stack at endpos.\n\nReturns:\nOn finding matching start: (index at matching start, None)\nOn finding an unclosed expression: (-1, None)\nOtherwise: (-1, new stack at beginning of this line)", "source": "codesearchnet"} {"code": "def upgrade_name(self, user_):\n if (user_.name_type > self.name_type):\n self.full_name = user_.full_name\n self.first_name = user_.first_name\n self.name_type = user_.name_type\n logger.debug('Added %s name to User \"%s\": %s', self.name_type.name.lower(), self.full_name, self)", "docstring": "Upgrade name type of this user.\n\nGoogle Voice participants often first appear with no name at all, and\nthen get upgraded unpredictably to numbers (\"+12125551212\") or names.\n\nArgs:\nuser_ (~hangups.user.User): User to upgrade with.", "source": "codesearchnet"} {"code": "def _CreateDynamicDisplayAdSettings(media_service, opener):\n \n image = _CreateImage(media_service, opener, 'https:\n\n logo = {\n 'type': 'IMAGE',\n 'mediaId': image['mediaId'],\n 'xsi_type': 'Image'\n }\n\n dynamic_settings = {\n 'landscapeLogoImage': logo,\n 'pricePrefix': 'as low as',\n 'promoText': 'Free shipping!',\n 'xsi_type': 'DynamicSettings',\n }\n\n return dynamic_settings", "docstring": "Creates settings for dynamic display ad.\n\nArgs:\nmedia_service: a SudsServiceProxy instance for AdWords's MediaService.\nopener: an OpenerDirector instance.\n\nReturns:\nThe dynamic display ad settings.", "source": "juraj-google-style"} {"code": "def run(self, sensor_graph, model):\n \n\n \n \n\n for node, inputs, outputs in sensor_graph.iterate_bfs():\n if node.num_inputs != 1:\n continue\n\n input_a, trigger_a = node.inputs[0]\n if input_a.selector.match_type not in [DataStream.InputType, DataStream.UnbufferedType]:\n continue\n\n if not isinstance(trigger_a, InputTrigger):\n continue\n\n if trigger_a.comp_string != u'==':\n continue\n\n if not trigger_a.use_count:\n continue\n\n if trigger_a.reference != 1:\n continue\n\n \n node.inputs[0] = (input_a, TrueTrigger())", "docstring": "Run this optimization pass on the sensor graph\n\nIf necessary, information on the device model being targeted\ncan be found in the associated model argument.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph to optimize\nmodel (DeviceModel): The device model we're using", "source": "juraj-google-style"} {"code": "def ekm_log(logstr, priority=3):\n if (priority <= ekmmeters_log_level):\n dt = datetime.datetime\n stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M.%f')\n ekmmeters_log_func(((('[EKM Meter Debug Message: ' + stamp) + '] -> ') + logstr))\n pass", "docstring": "Send string to module level log\n\nArgs:\nlogstr (str): string to print.\npriority (int): priority, supports 3 (default) and 4 (special).", "source": "codesearchnet"} {"code": "def get_uuid_string(low=None, high=None, **x):\n if ((low is None) or (high is None)):\n return None\n x = ''.join([parse_part(low), parse_part(high)])\n return '-'.join([x[:8], x[8:12], x[12:16], x[16:20], x[20:32]])", "docstring": "This method parses a UUID protobuf message type from its component\n'high' and 'low' longs into a standard formatted UUID string\n\nArgs:\nx (dict): containing keys, 'low' and 'high' corresponding to the UUID\nprotobuf message type\n\nReturns:\nstr: UUID formatted string", "source": "codesearchnet"} {"code": "def _IsHelpShortcut(component_trace, remaining_args):\n show_help = False\n if remaining_args:\n target = remaining_args[0]\n if target in ('-h', '--help'):\n component = component_trace.GetResult()\n if inspect.isclass(component) or inspect.isroutine(component):\n fn_spec = inspectutils.GetFullArgSpec(component)\n _, remaining_kwargs, _ = _ParseKeywordArgs(remaining_args, fn_spec)\n show_help = target in remaining_kwargs\n else:\n members = dict(inspect.getmembers(component))\n show_help = target not in members\n if show_help:\n component_trace.show_help = True\n command = f'{component_trace.GetCommand()} -- --help'\n print(f'INFO: Showing help with the command {shlex.quote(command)}.\\n', file=sys.stderr)\n return show_help", "docstring": "Determines if the user is trying to access help without '--' separator.\n\nFor example, mycmd.py --help instead of mycmd.py -- --help.\n\nArgs:\ncomponent_trace: (FireTrace) The trace for the Fire command.\nremaining_args: List of remaining args that haven't been consumed yet.\nReturns:\nTrue if help is requested, False otherwise.", "source": "github-repos"} {"code": "def update(self, **kwargs):\n \n return self.__class__(self.resource.update(kwargs),\n self.client,\n wallet=self.wallet)", "docstring": "Update the Account resource with specified content.\n\nArgs:\nname (str): Human-readable name for the account\n\nReturns: the updated Account object.", "source": "juraj-google-style"} {"code": "def decrypt(self, ciphertext):\n \n\n plaintext = self._rx_tinh.dec(ciphertext)\n if plaintext is None:\n logger.error('Message decryption failure')\n raise s_exc.CryptoErr(mesg='Message decryption failure')\n\n seqn = next(self._rx_sn)\n\n sn, mesg = s_msgpack.un(plaintext)\n if sn != seqn:\n logger.error('Message out of sequence: got %d expected %d', sn, seqn)\n raise s_exc.CryptoErr(mesg='Message out of sequence', expected=seqn, got=sn)\n\n return mesg", "docstring": "Decrypt a message, validating its sequence number is as we expect.\n\nArgs:\nciphertext (bytes): The message to decrypt and verify.\n\nReturns:\nmesg: A mesg.\n\nRaises:\ns_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected.", "source": "juraj-google-style"} {"code": "def mix(self, ca, cb, xb):\n \n r = (1 - xb) * ca.red + xb * cb.red\n g = (1 - xb) * ca.green + xb * cb.green\n b = (1 - xb) * ca.blue + xb * cb.blue\n a = (1 - xb) * ca.alpha + xb * cb.alpha\n return gdk.RGBA(red=r, green=g, blue=b, alpha=a)", "docstring": "Mix colors.\n\nArgs:\nca (gdk.RGBA): first color\ncb (gdk.RGBA): second color\nxb (float): between 0.0 and 1.0\n\nReturn:\ngdk.RGBA: linear interpolation between ca and cb,\n0 or 1 return the unaltered 1st or 2nd color respectively,\nas in CSS.", "source": "juraj-google-style"} {"code": "def prepare_run_debug_urls(self, fetches, feed_dict):", "docstring": "Abstract method to be implemented by concrete subclasses.\n\nThis method prepares the run-specific debug URL(s).\n\nArgs:\nfetches: Same as the `fetches` argument to `Session.run()`\nfeed_dict: Same as the `feed_dict` argument to `Session.run()`\n\nReturns:\ndebug_urls: (`str` or `list` of `str`) Debug URLs to be used in\nthis `Session.run()` call.", "source": "github-repos"} {"code": "def cmvn(vec, variance_normalization=False):\n \n eps = 2**-30\n rows, cols = vec.shape\n\n \n norm = np.mean(vec, axis=0)\n norm_vec = np.tile(norm, (rows, 1))\n\n \n mean_subtracted = vec - norm_vec\n\n \n if variance_normalization:\n stdev = np.std(mean_subtracted, axis=0)\n stdev_vec = np.tile(stdev, (rows, 1))\n output = mean_subtracted / (stdev_vec + eps)\n else:\n output = mean_subtracted\n\n return output", "docstring": "This function is aimed to perform global cepstral mean and\nvariance normalization (CMVN) on input feature vector \"vec\".\nThe code assumes that there is one observation per row.\n\nArgs:\nvec (array): input feature matrix\n(size:(num_observation,num_features))\nvariance_normalization (bool): If the variance\nnormilization should be performed or not.\n\nReturn:\narray: The mean(or mean+variance) normalized feature vector.", "source": "juraj-google-style"} {"code": "def max_cation_removal(self):\n oxid_pot = sum([((Element(spec.symbol).max_oxidation_state - spec.oxi_state) * self.comp[spec]) for spec in self.comp if is_redox_active_intercalation(Element(spec.symbol))])\n oxid_limit = (oxid_pot / self.cation_charge)\n num_cation = self.comp[Specie(self.cation.symbol, self.cation_charge)]\n return min(oxid_limit, num_cation)", "docstring": "Maximum number of cation A that can be removed while maintaining charge-balance.\n\nReturns:\ninteger amount of cation. Depends on cell size (this is an 'extrinsic' function!)", "source": "codesearchnet"} {"code": "def __init__(self, scope, parent):\n \n CodeControlFlow.__init__(self, scope, parent, 'if')\n self.else_body = CodeBlock(scope, self, explicit=False)", "docstring": "Constructor for conditionals.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.", "source": "juraj-google-style"} {"code": "def Get(self, path, follow_symlink = True):\n \n key = self._Key(path=path, follow_symlink=follow_symlink)\n try:\n return self._cache[key]\n except KeyError:\n value = Stat.FromPath(path, follow_symlink=follow_symlink)\n self._cache[key] = value\n\n \n \n \n if not follow_symlink and not value.IsSymlink():\n self._cache[self._Key(path=path, follow_symlink=True)] = value\n\n return value", "docstring": "Stats given file or returns a cached result if available.\n\nArgs:\npath: A path to the file to perform `stat` on.\nfollow_symlink: True if `stat` of a symlink should be returned instead of\na file that it points to. For non-symlinks this setting has no effect.\n\nReturns:\n`Stat` object corresponding to the given path.", "source": "juraj-google-style"} {"code": "def user_agent(self, text, **kwargs):\n indicator_obj = UserAgent(text, **kwargs)\n return self._indicator(indicator_obj)", "docstring": "Add User Agent data to Batch object\n\nArgs:\ntext (str): The value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of UserAgent.", "source": "codesearchnet"} {"code": "def _create_dataset_reader(dataset_creator, filenames, num_parallel_reads=None, name=None):\n\n def read_one_file(filename):\n filename = ops.convert_to_tensor(filename, dtypes.string, name='filename')\n return dataset_creator(filename)\n if num_parallel_reads is None:\n return filenames.flat_map(read_one_file, name=name)\n elif num_parallel_reads == dataset_ops.AUTOTUNE:\n return filenames.interleave(read_one_file, num_parallel_calls=num_parallel_reads, name=name)\n else:\n return ParallelInterleaveDataset(filenames, read_one_file, cycle_length=num_parallel_reads, block_length=1, sloppy=False, buffer_output_elements=None, prefetch_input_elements=None, name=name)", "docstring": "Creates a dataset that reads the given files using the given reader.\n\nArgs:\ndataset_creator: A function that takes in a single file name and returns a\ndataset.\nfilenames: A `tf.data.Dataset` containing one or more filenames.\nnum_parallel_reads: The number of parallel reads we should do.\nname: (Optional.) A name for the tf.data operation.\n\nReturns:\nA `Dataset` that reads data from `filenames`.", "source": "github-repos"} {"code": "def select_best_paths(examples):\n \n possible_paths = {} \n\n \n for example in examples:\n dom = _create_dom(example[\"html\"])\n matching_elements = _match_elements(dom, example[\"vars\"])\n\n for key, match in matching_elements.items():\n if key not in possible_paths: \n possible_paths[key] = _collect_paths(match)\n\n \n for example in examples:\n dom = _create_dom(example[\"html\"])\n matching_elements = _match_elements(dom, example[\"vars\"])\n\n for key, paths in possible_paths.items():\n if key not in matching_elements:\n continue\n\n possible_paths[key] = filter(\n lambda path: _is_working_path(\n dom,\n path,\n matching_elements[key]\n ),\n paths\n )\n\n priorities = [\n \"find\",\n \"left_neighbour_tag\",\n \"right_neighbour_tag\",\n \"wfind\",\n \"match\",\n \"Chained\"\n ]\n priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities)))\n\n \n for key in possible_paths.keys():\n possible_paths[key] = list(sorted(\n possible_paths[key],\n key=lambda x: priorities.get(x.call_type, 100)\n ))\n\n return possible_paths", "docstring": "Process `examples`, select only paths that works for every example. Select\nbest paths with highest priority.\n\nArgs:\nexamples (dict): Output from :func:`.read_config`.\n\nReturns:\nlist: List of :class:`.PathCall` and :class:`.Chained` objects.", "source": "juraj-google-style"} {"code": "def section(title, element_list):\n sect = {'Type': 'Section', 'Title': title}\n if isinstance(element_list, list):\n sect['Elements'] = element_list\n else:\n sect['Elements'] = [element_list]\n return sect", "docstring": "Returns a dictionary representing a new section. Sections\ncontain a list of elements that are displayed separately from\nthe global elements on the page.\n\nArgs:\ntitle: The title of the section to be displayed\nelement_list: The list of elements to display within the section\n\nReturns:\nA dictionary with metadata specifying that it is to be rendered as\na section containing multiple elements", "source": "codesearchnet"} {"code": "def send(self, request):\n self._connection.connection.rpush(self._request_key, pickle.dumps(request))\n resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)\n while True:\n if (self._connection.polling_time > 0.0):\n sleep(self._connection.polling_time)\n response_data = self._connection.connection.get(resp_key)\n if (response_data is not None):\n self._connection.connection.delete(resp_key)\n break\n return pickle.loads(response_data)", "docstring": "Send a request to the server and wait for its response.\n\nArgs:\nrequest (Request): Reference to a request object that is sent to the server.\n\nReturns:\nResponse: The response from the server to the request.", "source": "codesearchnet"} {"code": "def _print_if_not_none(obj):\n if obj is not None:\n print(repr(obj))", "docstring": "Print like a notebook: Show the repr if the object is not None.\n\n`_patch_compile` Uses this on the final expression in each cell.\n\nThis way the outputs feel like notebooks.\n\nArgs:\nobj: the object to print.", "source": "github-repos"} {"code": "def HandleExceptionsAndRebuildHttpConnections(retry_args):\n retry_after = None\n if isinstance(retry_args.exc, (http_client.BadStatusLine, http_client.IncompleteRead, http_client.ResponseNotReady)):\n logging.debug('Caught HTTP error %s, retrying: %s', type(retry_args.exc).__name__, retry_args.exc)\n elif isinstance(retry_args.exc, socket.error):\n logging.debug('Caught socket error, retrying: %s', retry_args.exc)\n elif isinstance(retry_args.exc, socket.gaierror):\n logging.debug('Caught socket address error, retrying: %s', retry_args.exc)\n elif isinstance(retry_args.exc, socket.timeout):\n logging.debug('Caught socket timeout error, retrying: %s', retry_args.exc)\n elif isinstance(retry_args.exc, httplib2.ServerNotFoundError):\n logging.debug('Caught server not found error, retrying: %s', retry_args.exc)\n elif isinstance(retry_args.exc, ValueError):\n logging.debug('Response content was invalid (%s), retrying', retry_args.exc)\n elif (isinstance(retry_args.exc, TokenRefreshError) and hasattr(retry_args.exc, 'status') and ((retry_args.exc.status == TOO_MANY_REQUESTS) or (retry_args.exc.status >= 500))):\n logging.debug('Caught transient credential refresh error (%s), retrying', retry_args.exc)\n elif isinstance(retry_args.exc, exceptions.RequestError):\n logging.debug('Request returned no response, retrying')\n elif isinstance(retry_args.exc, exceptions.BadStatusCodeError):\n logging.debug('Response returned status %s, retrying', retry_args.exc.status_code)\n elif isinstance(retry_args.exc, exceptions.RetryAfterError):\n logging.debug('Response returned a retry-after header, retrying')\n retry_after = retry_args.exc.retry_after\n else:\n raise retry_args.exc\n RebuildHttpConnections(retry_args.http)\n logging.debug('Retrying request to url %s after exception %s', retry_args.http_request.url, retry_args.exc)\n time.sleep((retry_after or util.CalculateWaitForRetry(retry_args.num_retries, max_wait=retry_args.max_retry_wait)))", "docstring": "Exception handler for http failures.\n\nThis catches known failures and rebuilds the underlying HTTP connections.\n\nArgs:\nretry_args: An ExceptionRetryArgs tuple.", "source": "codesearchnet"} {"code": "def __init__(self, saved_model_handler, checkpoint_variables_path,\n check_collections=True):\n \n check_unique_tags(saved_model_handler.get_tags())\n if check_collections:\n check_collections_are_supported(\n saved_model_handler, _SUPPORTED_COLLECTIONS)\n self._saved_model_handler = saved_model_handler\n self._checkpoint_variables_path = checkpoint_variables_path\n self._module_attachments = {\n tags: saved_model_handler.get_attached_bytes_map(tags)\n for tags in saved_model_handler.get_tags()}", "docstring": "Private constructor.\n\nArgs:\nsaved_model_handler: SavedModelHandler backing up this Module definition.\ncheckpoint_variables_path: An optional string to the checkpoint where this\nModule variables are checkpointed. If given the variables initializers\nare overridden to load from it.\ncheck_collections: Whether to check collections are supported.\n\nRaises:\nValueError: if SavedModel contains any unexpected value.", "source": "juraj-google-style"} {"code": "def download_and_verify(url, md5sum, fname=None, chunk_size=1024, clobber=False, verbose=True):\n if (fname is None):\n fname = url.split('/')[(- 1)]\n if ((not clobber) and os.path.isfile(fname)):\n print('Checking existing file to see if MD5 sum matches ...')\n md5_existing = get_md5sum(fname, chunk_size=chunk_size)\n if (md5_existing == md5sum):\n print('File exists. Not overwriting.')\n return fname\n dir_name = os.path.dirname(fname)\n if (not os.path.exists(dir_name)):\n os.makedirs(dir_name)\n sig = hashlib.md5()\n if verbose:\n print('Downloading {} ...'.format(url))\n if (url.startswith('http:\n with contextlib.closing(requests.get(url, stream=True)) as r:\n try:\n r.raise_for_status()\n except requests.exceptions.HTTPError as error:\n print('Error connecting to URL: \"{}\"'.format(url))\n print(r.text)\n raise error\n with open(fname, 'wb') as f:\n content_length = r.headers.get('content-length')\n if (content_length is not None):\n content_length = int(content_length)\n bar = FileTransferProgressBar(content_length)\n for (k, chunk) in enumerate(r.iter_content(chunk_size=chunk_size)):\n f.write(chunk)\n sig.update(chunk)\n if verbose:\n bar_val = (chunk_size * (k + 1))\n if (content_length is not None):\n bar_val = min(bar_val, content_length)\n bar.update(bar_val)\n else:\n with contextlib.closing(urlopen(url)) as r:\n content_length = r.headers.get('content-length')\n if (content_length is not None):\n content_length = int(content_length)\n bar = FileTransferProgressBar(content_length)\n with open(fname, 'wb') as f:\n k = 0\n while True:\n chunk = r.read(chunk_size)\n if (not chunk):\n break\n f.write(chunk)\n sig.update(chunk)\n if verbose:\n k += 1\n bar_val = (chunk_size * k)\n if (content_length is not None):\n bar_val = min(bar_val, content_length)\n bar.update(bar_val)\n if (sig.hexdigest() != md5sum):\n raise DownloadError((('The MD5 sum of the downloaded file is incorrect.\\n' + ' download: {}\\n'.format(sig.hexdigest())) + ' expected: {}\\n'.format(md5sum)))\n return fname", "docstring": "Download a file and verify the MD5 sum.\n\nArgs:\nurl (str): The URL to download.\nmd5sum (str): The expected MD5 sum.\nfname (Optional[str]): The filename to store the downloaded file in.\nIf `None`, infer the filename from the URL. Defaults to `None`.\nchunk_size (Optional[int]): Process in chunks of this size (in Bytes).\nDefaults to 1024.\nclobber (Optional[bool]): If `True`, any existing, identical file will\nbe overwritten. If `False`, the MD5 sum of any existing file with\nthe destination filename will be checked. If the MD5 sum does not\nmatch, the existing file will be overwritten. Defaults to `False`.\nverbose (Optional[bool]): If `True` (the default), then a progress bar\nwill be shownd during downloads.\n\nReturns:\nThe filename the URL was downloaded to.\n\nRaises:\nDownloadError: The MD5 sum of the downloaded file does not match\n`md5sum`.\nrequests.exceptions.HTTPError: There was a problem connecting to the\nURL.", "source": "codesearchnet"} {"code": "def _fulfill(self, bits, ignore_nonpromised_bits=False):\n if self._allsubsfulfilled():\n if (not self._components):\n if ignore_nonpromised_bits:\n self._value = bits[self._bitstartselective:(self._bitstartselective + self._bitlength)]\n else:\n self._value = bits[self._bitstart:self._bitend]\n else:\n self._value = self._components[0][0]._value\n for (sub, offset) in self._components[1:]:\n self._value += sub._value\n if (self._parent is not None):\n self._parent._fulfill(None)", "docstring": "Supply the promise with the bits from its associated primitive's execution.\n\nThe fulfillment process must walk the promise chain backwards\nuntil it reaches the original promise and can supply the final\nvalue.\n\nThe data that comes in can either be all a bit read for every\nbit written by the associated primitive, or (if the primitive\nsupports it), only the bits that are used by promises. The\nignore_nonpromised_bits flag specifies which format the\nincoming data is in.\n\nArgs:\nbits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin.\nignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data).", "source": "codesearchnet"} {"code": "def rotate_texture(texture, rotation, x_offset=0.5, y_offset=0.5):\n (x, y) = texture\n x = (x.copy() - x_offset)\n y = (y.copy() - y_offset)\n angle = np.radians(rotation)\n x_rot = ((x * np.cos(angle)) + (y * np.sin(angle)))\n y_rot = ((x * (- np.sin(angle))) + (y * np.cos(angle)))\n return ((x_rot + x_offset), (y_rot + y_offset))", "docstring": "Rotates the given texture by a given angle.\n\nArgs:\ntexture (texture): the texture to rotate\nrotation (float): the angle of rotation in degrees\nx_offset (float): the x component of the center of rotation (optional)\ny_offset (float): the y component of the center of rotation (optional)\n\nReturns:\ntexture: A texture.", "source": "codesearchnet"} {"code": "def dominator_tree_to_dot(self, filename):\n \n def description(node):\n desc ='{}\\n'.format(node)\n desc += 'id: {}'.format(node.node_id)\n if node.dominance_frontier:\n desc += '\\ndominance frontier: {}'.format([n.node_id for n in node.dominance_frontier])\n return desc\n with open(filename, 'w', encoding='utf8') as f:\n f.write('digraph{\\n')\n for node in self.nodes:\n f.write('{}[label=\"{}\"];\\n'.format(node.node_id, description(node)))\n if node.immediate_dominator:\n f.write('{}->{};\\n'.format(node.immediate_dominator.node_id, node.node_id))\n\n f.write(\"}\\n\")", "docstring": "Export the dominator tree of the function to a dot file\nArgs:\nfilename (str)", "source": "juraj-google-style"} {"code": "def upload(self, local_fn: str, remote_fn: str='', dont_overwrite: bool=False):\n raise NotImplementedError()", "docstring": "Uploads given file to the task. If remote_fn is not specified, dumps it\ninto task current directory with the same name.\n\nArgs:\nlocal_fn: location of file locally\nremote_fn: location of file on task\ndont_overwrite: if True, will be no-op if target file exists", "source": "codesearchnet"} {"code": "def ng(self, wavelength):\n return (self.n(wavelength) - ((wavelength * 1e-09) * self.nDer1(wavelength)))", "docstring": "The group index with respect to wavelength.\n\nArgs:\nwavelength (float, list, None): The wavelength(s) the group\nindex will be evaluated at.\n\nReturns:\nfloat, list: The group index at the target wavelength(s).", "source": "codesearchnet"} {"code": "def get_contact(self, jid):\n \n try:\n return self.get_contacts()[jid.bare()]\n except KeyError:\n raise ContactNotFound\n except AttributeError:\n raise AttributeError(\"jid must be an aioxmpp.JID object\")", "docstring": "Returns a contact\n\nArgs:\njid (aioxmpp.JID): jid of the contact\n\nReturns:\ndict: the roster of contacts", "source": "juraj-google-style"} {"code": "def receiveds_not_parsed(receiveds):\n \n log.debug(\"Receiveds for this email are not parsed\")\n\n output = []\n counter = Counter()\n\n for i in receiveds[::-1]:\n j = {\"raw\": i.strip()}\n j[\"hop\"] = counter[\"hop\"] + 1\n counter[\"hop\"] += 1\n output.append(j)\n else:\n return output", "docstring": "If receiveds are not parsed, makes a new structure with raw\nfield. It's useful to have the same structure of receiveds\nparsed.\n\nArgs:\nreceiveds (list): list of raw receiveds headers\n\nReturns:\na list of not parsed receiveds headers with first hop in first position", "source": "juraj-google-style"} {"code": "def _binary_op(cls,\n x: 'TensorFluent',\n y: 'TensorFluent',\n op: Callable[[tf.Tensor, tf.Tensor], tf.Tensor],\n dtype: tf.DType) -> 'TensorFluent':\n \n \n s1 = x.scope.as_list()\n s2 = y.scope.as_list()\n scope, perm1, perm2 = TensorFluentScope.broadcast(s1, s2)\n if x.batch and perm1 != []:\n perm1 = [0] + [p+1 for p in perm1]\n if y.batch and perm2 != []:\n perm2 = [0] + [p+1 for p in perm2]\n x = x.transpose(perm1)\n y = y.transpose(perm2)\n\n \n reshape1, reshape2 = TensorFluentShape.broadcast(x.shape, y.shape)\n if reshape1 is not None:\n x = x.reshape(reshape1)\n if reshape2 is not None:\n y = y.reshape(reshape2)\n\n \n x = x.cast(dtype)\n y = y.cast(dtype)\n\n \n t = op(x.tensor, y.tensor)\n\n \n batch = x.batch or y.batch\n\n return TensorFluent(t, scope, batch=batch)", "docstring": "Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.\n\nArgs:\nx: The first operand.\ny: The second operand.\nop: The binary operator.\ndtype: The output's data type.\n\nReturns:\nA TensorFluent wrapping the binary operator's output.", "source": "juraj-google-style"} {"code": "def exec_resize(self, exec_id, height=None, width=None):\n \n\n if isinstance(exec_id, dict):\n exec_id = exec_id.get('Id')\n\n params = {'h': height, 'w': width}\n url = self._url(\"/exec/{0}/resize\", exec_id)\n res = self._post(url, params=params)\n self._raise_for_status(res)", "docstring": "Resize the tty session used by the specified exec command.\n\nArgs:\nexec_id (str): ID of the exec instance\nheight (int): Height of tty session\nwidth (int): Width of tty session", "source": "juraj-google-style"} {"code": "def Process(self, parser_mediator, plist_name, top_level, **kwargs):\n if (not plist_name.startswith(self.PLIST_PATH)):\n raise errors.WrongPlistPlugin(self.NAME, plist_name)\n super(AppleAccountPlugin, self).Process(parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)", "docstring": "Check if it is a valid Apple account plist file name.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nplist_name (str): name of the plist.\ntop_level (dict[str, object]): plist top-level key.", "source": "codesearchnet"} {"code": "def to_gzipped_file(data, out=None):\n \n if not out:\n out = StringIO.StringIO()\n\n with gzip.GzipFile(fileobj=out, mode=\"w\") as f:\n f.write(data)\n\n out.seek(0)\n return out", "docstring": "Pack `data` to GZIP and write them to `out`. If `out` is not defined,\n:mod:`stringio` is used.\n\nArgs:\ndata (obj): Any packable data (str / unicode / whatever).\nout (file, default None): Optional opened file handler.\n\nReturns:\nobj: File handler with packed data seeked at the beginning.", "source": "juraj-google-style"} {"code": "def trace_region(self, region_index):\n cmd = enums.JLinkTraceCommand.GET_REGION_PROPS_EX\n region = structs.JLinkTraceRegion()\n region.RegionIndex = int(region_index)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(region))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region.')\n return region", "docstring": "Retrieves the properties of a trace region.\n\nArgs:\nself (JLink): the ``JLink`` instance.\nregion_index (int): the trace region index.\n\nReturns:\nAn instance of ``JLinkTraceRegion`` describing the specified region.", "source": "codesearchnet"} {"code": "def create_dummy_func(func, dependency):\n assert (not building_rtfd())\n if isinstance(dependency, (list, tuple)):\n dependency = ','.join(dependency)\n\n def _dummy(*args, **kwargs):\n raise ImportError(\"Cannot import '{}', therefore '{}' is not available\".format(dependency, func))\n return _dummy", "docstring": "When a dependency of a function is not available, create a dummy function which throws ImportError when used.\n\nArgs:\nfunc (str): name of the function.\ndependency (str or list[str]): name(s) of the dependency.\n\nReturns:\nfunction: a function object", "source": "codesearchnet"} {"code": "def trigger_if_changed(self, obj, old):\n \n new_value = self.__get__(obj, obj.__class__)\n if not self.property.matches(old, new_value):\n self._trigger(obj, old, new_value)", "docstring": "Send a change event notification if the property is set to a\nvalue is not equal to ``old``.\n\nArgs:\nobj (HasProps)\nThe object the property is being set on.\n\nold (obj) :\nThe previous value of the property to compare\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int:\n return sum((np_array.itemsize for np_array in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch.", "source": "github-repos"} {"code": "def _atleast_nd(n, new_shape, *arys):\n\n def f(x):\n x = asarray(x)\n return asarray(np_utils.cond(np_utils.greater(n, array_ops.rank(x)), lambda: reshape(x, new_shape(n, array_ops.shape(x))), lambda: x))\n arys = list(map(f, arys))\n if len(arys) == 1:\n return arys[0]\n else:\n return arys", "docstring": "Reshape arrays to be at least `n`-dimensional.\n\nArgs:\nn: The minimal rank.\nnew_shape: a function that takes `n` and the old shape and returns the\ndesired new shape.\n*arys: ndarray(s) to be reshaped.\n\nReturns:\nThe reshaped array(s).", "source": "github-repos"} {"code": "def process(self, element, model_state=beam.DoFn.StateParam(BIRCH_MODEL_SPEC), collected_docs_state=beam.DoFn.StateParam(DATA_ITEMS_SPEC), collected_embeddings_state=beam.DoFn.StateParam(EMBEDDINGS_SPEC), update_counter_state=beam.DoFn.StateParam(UPDATE_COUNTER_SPEC), *args, **kwargs):\n clustering = model_state.read() or Birch(n_clusters=None, threshold=0.7)\n collected_documents = collected_docs_state.read() or {}\n collected_embeddings = collected_embeddings_state.read() or {}\n update_counter = update_counter_state.read() or Counter()\n _, doc = element\n doc_id = doc['id']\n embedding_vector = doc['embedding']\n collected_embeddings[doc_id] = embedding_vector\n collected_documents[doc_id] = {'id': doc_id, 'text': doc['text']}\n update_counter = len(collected_documents)\n clustering.partial_fit(np.atleast_2d(embedding_vector))\n cluster_labels = clustering.predict(np.array(list(collected_embeddings.values())))\n model_state.write(clustering)\n collected_docs_state.write(collected_documents)\n collected_embeddings_state.write(collected_embeddings)\n update_counter_state.write(update_counter)\n yield {'labels': cluster_labels, 'docs': collected_documents, 'id': list(collected_embeddings.keys()), 'counter': update_counter}", "docstring": "Takes the embedding of a document and updates the clustering model\n\nArgs:\nelement: The input element to be processed.\nmodel_state: This is the state of the clustering model. It is a stateful parameter,\nwhich means that it will be updated after each call to the process function.\ncollected_docs_state: This is a stateful dictionary that stores the documents that\nhave been processed so far.\ncollected_embeddings_state: This is a dictionary of document IDs and their embeddings.\nupdate_counter_state: This is a counter that keeps track of how many documents have been\nprocessed.", "source": "github-repos"} {"code": "def lresolve(self, path):\n \n path = make_string_path(path)\n if path == self.root.name:\n \n return self.root\n\n \n path = self._path_without_trailing_separators(path)\n path = self._original_path(path)\n\n parent_directory, child_name = self.splitpath(path)\n if not parent_directory:\n parent_directory = self.cwd\n try:\n parent_obj = self.resolve(parent_directory)\n assert parent_obj\n if not isinstance(parent_obj, FakeDirectory):\n if not self.is_windows_fs and isinstance(parent_obj, FakeFile):\n self.raise_io_error(errno.ENOTDIR, path)\n self.raise_io_error(errno.ENOENT, path)\n return parent_obj.get_entry(child_name)\n except KeyError:\n self.raise_io_error(errno.ENOENT, path)", "docstring": "Search for the specified object, resolving only parent links.\n\nThis is analogous to the stat/lstat difference. This resolves links\n*to* the object but not of the final object itself.\n\nArgs:\npath: Specifies target FakeFile object to retrieve.\n\nReturns:\nThe FakeFile object corresponding to path.\n\nRaises:\nIOError: if the object is not found.", "source": "juraj-google-style"} {"code": "def export(export_path, vocabulary, embeddings, num_oov_buckets, preprocess_text):\n tmpdir = tempfile.mkdtemp()\n vocabulary_file = os.path.join(tmpdir, 'tokens.txt')\n with tf.gfile.GFile(vocabulary_file, 'w') as f:\n f.write('\\n'.join(vocabulary))\n vocab_size = len(vocabulary)\n embeddings_dim = embeddings.shape[1]\n spec = make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text)\n try:\n with tf.Graph().as_default():\n m = hub.Module(spec)\n p_embeddings = tf.placeholder(tf.float32)\n load_embeddings = tf.assign(m.variable_map[EMBEDDINGS_VAR_NAME], p_embeddings)\n with tf.Session() as sess:\n sess.run([load_embeddings], feed_dict={p_embeddings: embeddings})\n m.export(export_path, sess)\n finally:\n shutil.rmtree(tmpdir)", "docstring": "Exports a TF-Hub module that performs embedding lookups.\n\nArgs:\nexport_path: Location to export the module.\nvocabulary: List of the N tokens in the vocabulary.\nembeddings: Numpy array of shape [N+K,M] the first N rows are the\nM dimensional embeddings for the respective tokens and the next K\nrows are for the K out-of-vocabulary buckets.\nnum_oov_buckets: How many out-of-vocabulary buckets to add.\npreprocess_text: Whether to preprocess the input tensor by removing\npunctuation and splitting on spaces.", "source": "codesearchnet"} {"code": "def where(self, cond, other, **kwargs):\n assert isinstance(cond, type(self)), 'Must have the same DataManager subclass to perform this operation'\n if isinstance(other, type(self)):\n\n def where_builder_first_pass(cond, other, **kwargs):\n return cond.where(cond, other, **kwargs)\n\n def where_builder_second_pass(df, new_other, **kwargs):\n return df.where(new_other.eq(True), new_other, **kwargs)\n first_pass = cond._inter_manager_operations(other, 'left', where_builder_first_pass)\n final_pass = self._inter_manager_operations(first_pass, 'left', where_builder_second_pass)\n return self.__constructor__(final_pass.data, self.index, self.columns)\n else:\n axis = kwargs.get('axis', 0)\n if isinstance(other, pandas.Series):\n other.index = pandas.RangeIndex(len(other.index))\n\n def where_builder_series(df, cond):\n if (axis == 0):\n df.index = pandas.RangeIndex(len(df.index))\n cond.index = pandas.RangeIndex(len(cond.index))\n else:\n df.columns = pandas.RangeIndex(len(df.columns))\n cond.columns = pandas.RangeIndex(len(cond.columns))\n return df.where(cond, other, **kwargs)\n (reindexed_self, reindexed_cond, a) = self.copartition(axis, cond, 'left', False)\n reindexed_cond = reindexed_cond[0]\n new_data = reindexed_self.inter_data_operation(axis, (lambda l, r: where_builder_series(l, r)), reindexed_cond)\n return self.__constructor__(new_data, self.index, self.columns)", "docstring": "Gets values from this manager where cond is true else from other.\n\nArgs:\ncond: Condition on which to evaluate values.\n\nReturns:\nNew DataManager with updated data and index.", "source": "codesearchnet"} {"code": "def setup_sdk_logging(logfile=None, loglevel=logging.INFO):\n \n\n logging.root.setLevel(logging.DEBUG)\n logging.root.addHandler(logging.NullHandler())\n if logfile:\n fh = logging.FileHandler(logfile)\n fh.setLevel(loglevel)\n fh.setFormatter(get_default_log_formatter())\n logging.root.addHandler(fh)", "docstring": "Setup a NullHandler to the root logger. If ``logfile`` is passed,\nadditionally add a FileHandler in ``loglevel`` level.\n\nArgs:\nlogfile(str): A path to setup a log file.\nloglevel(int): :mod:`logging` log level.\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def get_number_of_image_tokens(self, height: int, width: int, images_kwargs=None):\n min_patches = images_kwargs.get('min_patches', None) or self.min_patches\n max_patches = images_kwargs.get('max_patches', None) or self.max_patches\n patch_size = images_kwargs.get('size', None) or self.size\n crop_to_patches = images_kwargs.get('crop_to_patches', None) or self.crop_to_patches\n num_patches = 1\n if crop_to_patches and max_patches > 1:\n num_columns, num_rows = get_optimal_tiled_canvas((height, width), (patch_size['height'], patch_size['width']), min_patches, max_patches)\n num_patches += num_columns * num_rows\n return num_patches", "docstring": "A utility that returns number patches for a given image size.\n\nArgs:\nheight (`int`):\nHeight of the input image.\nwidth (`int`):\nWidth of the input image.\nimages_kwargs (`dict`, *optional*)\nAny kwargs to override defaults of the image processor.\nReturns:\n`int`: Number of patches per image.", "source": "github-repos"} {"code": "def build_input(data, batch_size, dataset, train):\n \n image_size = 32\n depth = 3\n num_classes = 10 if dataset == \"cifar10\" else 100\n images, labels = data\n num_samples = images.shape[0] - images.shape[0] % batch_size\n dataset = tf.contrib.data.Dataset.from_tensor_slices(\n (images[:num_samples], labels[:num_samples]))\n\n def map_train(image, label):\n image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4,\n image_size + 4)\n image = tf.random_crop(image, [image_size, image_size, 3])\n image = tf.image.random_flip_left_right(image)\n image = tf.image.per_image_standardization(image)\n return (image, label)\n\n def map_test(image, label):\n image = tf.image.resize_image_with_crop_or_pad(image, image_size,\n image_size)\n image = tf.image.per_image_standardization(image)\n return (image, label)\n\n dataset = dataset.map(map_train if train else map_test)\n dataset = dataset.batch(batch_size)\n dataset = dataset.repeat()\n if train:\n dataset = dataset.shuffle(buffer_size=16 * batch_size)\n images, labels = dataset.make_one_shot_iterator().get_next()\n images = tf.reshape(images, [batch_size, image_size, image_size, depth])\n labels = tf.reshape(labels, [batch_size, 1])\n indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])\n labels = tf.sparse_to_dense(\n tf.concat([indices, labels], 1),\n [batch_size, num_classes], 1.0, 0.0)\n\n assert len(images.get_shape()) == 4\n assert images.get_shape()[0] == batch_size\n assert images.get_shape()[-1] == 3\n assert len(labels.get_shape()) == 2\n assert labels.get_shape()[0] == batch_size\n assert labels.get_shape()[1] == num_classes\n if not train:\n tf.summary.image(\"images\", images)\n return images, labels", "docstring": "Build CIFAR image and labels.\n\nArgs:\ndata_path: Filename for cifar10 data.\nbatch_size: Input batch size.\ntrain: True if we are training and false if we are testing.\n\nReturns:\nimages: Batches of images of size\n[batch_size, image_size, image_size, 3].\nlabels: Batches of labels of size [batch_size, num_classes].\n\nRaises:\nValueError: When the specified dataset is not supported.", "source": "juraj-google-style"} {"code": "def _GetSerializedAttributeContainerList(self, container_type):\n \n container_list = self._serialized_attribute_containers.get(\n container_type, None)\n if not container_list:\n container_list = SerializedAttributeContainerList()\n self._serialized_attribute_containers[container_type] = container_list\n\n return container_list", "docstring": "Retrieves a serialized attribute container list.\n\nArgs:\ncontainer_type (str): attribute container type.\n\nReturns:\nSerializedAttributeContainerList: serialized attribute container list.", "source": "juraj-google-style"} {"code": "def _generate_graph_dicts(self):\n transforms = self._pipeline_proto.components.transforms\n vertex_dict = collections.defaultdict(dict)\n edge_dict = collections.defaultdict(dict)\n self._edge_to_vertex_pairs = collections.defaultdict(list)\n for _, transform in self._top_level_transforms():\n vertex_dict[self._decorate(transform.unique_name)] = {}\n for pcoll_id in transform.outputs.values():\n pcoll_node = None\n if self._pipeline_instrument:\n cacheable = self._pipeline_instrument.cacheables.get(pcoll_id)\n pcoll_node = cacheable.var if cacheable else None\n if not pcoll_node:\n pcoll_node = 'pcoll%s' % (hash(pcoll_id) % 10000)\n vertex_dict[pcoll_node] = {'shape': 'circle', 'label': ''}\n else:\n vertex_dict[pcoll_node] = {'shape': 'circle'}\n if pcoll_id not in self._consumers:\n self._edge_to_vertex_pairs[pcoll_id].append((self._decorate(transform.unique_name), pcoll_node))\n edge_dict[self._decorate(transform.unique_name), pcoll_node] = {}\n else:\n for consumer in self._consumers[pcoll_id]:\n producer_name = self._decorate(transform.unique_name)\n consumer_name = self._decorate(transforms[consumer].unique_name)\n self._edge_to_vertex_pairs[pcoll_id].append((producer_name, pcoll_node))\n edge_dict[producer_name, pcoll_node] = {}\n self._edge_to_vertex_pairs[pcoll_id].append((pcoll_node, consumer_name))\n edge_dict[pcoll_node, consumer_name] = {}\n return (vertex_dict, edge_dict)", "docstring": "From pipeline_proto and other info, generate the graph.\n\nReturns:\nvertex_dict: (Dict[str, Dict[str, str]]) vertex mapped to attributes.\nedge_dict: (Dict[(str, str), Dict[str, str]]) vertex pair mapped to the\nedge's attribute.", "source": "github-repos"} {"code": "def __init__(self, location=None, **kwargs):\n \n parent = None\n if 'parent' in kwargs:\n parent = kwargs['parent']\n del kwargs['parent']\n\n if parent:\n raise ValueError('Parent value set.')\n\n super(FakePathSpec, self).__init__(\n location=location, parent=parent, **kwargs)", "docstring": "Initializes a path specification.\n\nNote that the fake path specification cannot have a parent.\n\nArgs:\nlocation (Optional[str]): location e.g. /opt/dfvfs.\n\nRaises:\nValueError: when parent is set.", "source": "juraj-google-style"} {"code": "def nic_a_c(msg):\n \n tc = typecode(msg)\n\n if tc != 31:\n raise RuntimeError(\"%s: Not a status operation message, expecting TC = 31\" % msg)\n\n msgbin = common.hex2bin(msg)\n nic_a = int(msgbin[75])\n nic_c = int(msgbin[51])\n\n return nic_a, nic_c", "docstring": "Obtain NICa/c, navigation integrity category supplements a and c\n\nArgs:\nmsg (string): 28 bytes hexadecimal message string\n\nReturns:\n(int, int): NICa and NICc number (0 or 1)", "source": "juraj-google-style"} {"code": "def _project_single_observable(self, **kwargs: Dict[str, Any]) -> Hist:\n \n \n assert isinstance(self.output_attribute_name, str)\n\n \n output_hist, projection_name, projection_name_args, = self._project_observable(\n input_key = \"single_observable\",\n input_observable = self.observable_to_project_from,\n **kwargs,\n )\n \n output_hist_args = projection_name_args\n output_hist_args.update({ \n \"output_hist\": output_hist,\n \"projection_name\": projection_name\n })\n\n \n output_hist = self.output_hist(**output_hist_args) \n\n \n if not hasattr(self.output_observable, self.output_attribute_name):\n raise ValueError(f\"Attempted to assign hist to non-existent attribute {self.output_attribute_name} of object {self.output_observable}. Check the attribute name!\")\n \n setattr(self.output_observable, self.output_attribute_name, output_hist)\n\n \n return output_hist", "docstring": "Driver function for projecting and storing a single observable.\n\nArgs:\nkwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...)\nReturns:\nThe projected histogram. The histogram is also stored in the output specified by ``output_observable``.", "source": "juraj-google-style"} {"code": "def _auto_kpath_labels(kpt_list):\n \n\n \n label_i = 1\n kpt_labels = {}\n for kpt in chain(*kpt_list):\n if tuple(kpt) in kpt_labels:\n continue\n else:\n kpt_labels.update({tuple(kpt): '({})'.format(label_i)})\n label_i += 1\n\n \n kpath_labels = [[kpt_labels[tuple(kpt)] for kpt in segment]\n for segment in kpt_list]\n\n return kpath_labels", "docstring": "Get a default set of labels (1), (2), (3)... for a k-point path\n\nRepeated points will be identified and the labels re-used.\n\nArgs:\nkpt_list (list): Nested list representing k-point path segments,\ne.g.::\n\n[[[0., 0., 0.], [0., 0., 0.5], [0., 0.5, 0.5]],\n[[0.5, 0.5, 0.], [0., 0., 0.]]]\n\nReturns:\nlist: Corresponding nested list of labels, e.g.::\n\n[['(1)', '(2)', '(3)'], ['(4)', '(1)']]", "source": "juraj-google-style"} {"code": "def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):\n with ScratchDir('.'):\n name = 'temp_zeo1'\n zeo_inp_filename = (name + '.cssr')\n ZeoCssr(structure).write_file(zeo_inp_filename)\n rad_flag = True\n rad_file = (name + '.rad')\n with open(rad_file, 'w+') as fp:\n for el in rad_dict.keys():\n print('{} {}'.format(el, rad_dict[el].real), file=fp)\n atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)\n red_ha_vornet = prune_voronoi_network_close_node(atmnet)\n red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)\n voro_out_filename = (name + '_voro.xyz')\n voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule\n species = (['X'] * len(voro_node_mol.sites))\n coords = []\n prop = []\n for site in voro_node_mol.sites:\n coords.append(list(site.coords))\n prop.append(site.properties['voronoi_radius'])\n lattice = Lattice.from_lengths_and_angles(structure.lattice.abc, structure.lattice.angles)\n vor_node_struct = Structure(lattice, species, coords, coords_are_cartesian=True, to_unit_cell=True, site_properties={'voronoi_radius': prop})\n return vor_node_struct", "docstring": "Analyze the void space in the input structure using high accuracy\nvoronoi decomposition.\nCalls Zeo++ for Voronoi decomposition.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\nrad_dict (optional): Dictionary of radii of elements in structure.\nIf not given, Zeo++ default values are used.\nNote: Zeo++ uses atomic radii of elements.\nFor ionic structures, pass rad_dict with ionic radii\nprobe_rad (optional): Sampling probe radius in Angstroms.\nDefault is 0.1 A\n\nReturns:\nvoronoi nodes as pymatgen.core.structure.Strucutre within the\nunit cell defined by the lattice of input structure\nvoronoi face centers as pymatgen.core.structure.Strucutre within the\nunit cell defined by the lattice of input structure", "source": "codesearchnet"} {"code": "def cost(self):\n total = 0.0\n for (family, rules) in self._excludes.iteritems():\n cost = sum((x.cost() for x in rules))\n if family:\n cost = (cost / float(10))\n total += cost\n return total", "docstring": "Get the approximate cost of this filter.\n\nCost is the total cost of the exclusion rules in this filter. The cost\nof family-specific filters is divided by 10.\n\nReturns:\nfloat: The approximate cost of the filter.", "source": "codesearchnet"} {"code": "def hardware_info(self, mask=4294967295):\n buf = (ctypes.c_uint32 * 32)()\n res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf))\n if (res != 0):\n raise errors.JLinkException(res)\n return list(buf)", "docstring": "Returns a list of 32 integer values corresponding to the bitfields\nspecifying the power consumption of the target.\n\nThe values returned by this function only have significance if the\nJ-Link is powering the target.\n\nThe words, indexed, have the following significance:\n0. If ``1``, target is powered via J-Link.\n1. Overcurrent bitfield:\n0: No overcurrent.\n1: Overcurrent happened. 2ms @ 3000mA\n2: Overcurrent happened. 10ms @ 1000mA\n3: Overcurrent happened. 40ms @ 400mA\n2. Power consumption of target (mA).\n3. Peak of target power consumption (mA).\n4. Peak of target power consumption during J-Link operation (mA).\n\nArgs:\nself (JLink): the ``JLink`` instance\nmask (int): bit mask to decide which hardware information words are\nreturned (defaults to all the words).\n\nReturns:\nList of bitfields specifying different states based on their index\nwithin the list and their value.\n\nRaises:\nJLinkException: on hardware error.", "source": "codesearchnet"} {"code": "def _match_greedily(reaction, compound_formula, score_func):\n \n uninstantiated_left, uninstantiated_right = _reaction_to_dicts(reaction)\n\n def compound_instances(uninstantiated):\n instances = []\n for compound, value in iteritems(uninstantiated):\n if value > 0:\n f = compound_formula[compound.name]\n instances.append(_CompoundInstance(compound, value, f))\n\n for inst in instances:\n uninstantiated[inst.compound] -= 1\n\n return instances\n\n def instantiate(uninstantiated, compound):\n n = uninstantiated[compound]\n if n > 0:\n f = compound_formula[compound.name]\n inst = _CompoundInstance(compound, n, f)\n uninstantiated[compound] -= 1\n return inst\n\n return None\n\n left = compound_instances(uninstantiated_left)\n right = compound_instances(uninstantiated_right)\n instances = left + right\n\n pairs = {}\n for inst1, inst2 in product(left, right):\n result = score_func(inst1, inst2)\n if result is not None:\n pairs[inst1, inst2] = result\n\n def inst_pair_sort_key(entry):\n \n (inst1, inst2), score = entry\n c1, c2 = inst1.compound, inst2.compound\n same_compound = c1.name == c2.name and c1.compartment != c2.compartment\n return same_compound, score, c1.name, c2.name\n\n transfer = {}\n while len(pairs) > 0:\n (inst1, inst2), _ = max(iteritems(pairs), key=inst_pair_sort_key)\n common = inst1.formula & inst2.formula\n\n key = (inst1.compound, inst1.index), (inst2.compound, inst2.index)\n if key not in transfer:\n transfer[key] = Formula()\n transfer[key] |= common\n\n for inst in (inst1, inst2):\n inst.formula -= common\n\n to_insert = set()\n\n inst = instantiate(uninstantiated_left, inst1.compound)\n if inst is not None:\n left.append(inst)\n instances.append(inst)\n to_insert.add(inst)\n\n inst = instantiate(uninstantiated_right, inst2.compound)\n if inst is not None:\n right.append(inst)\n instances.append(inst)\n to_insert.add(inst)\n\n to_update = {inst1, inst2}\n\n to_delete = set()\n for inst1, inst2 in pairs:\n if inst1 in to_update or inst2 in to_update:\n if len(inst1.formula) > 0 and len(inst2.formula) > 0:\n result = score_func(inst1, inst2)\n if result is None:\n to_delete.add((inst1, inst2))\n else:\n pairs[inst1, inst2] = result\n else:\n to_delete.add((inst1, inst2))\n\n for pair in to_delete:\n del pairs[pair]\n\n for inst1, inst2 in product(left, right):\n if inst1 in to_insert or inst2 in to_insert:\n result = score_func(inst1, inst2)\n if result is not None:\n pairs[inst1, inst2] = result\n\n balance = {}\n for inst in instances:\n if len(inst.formula) > 0:\n key = inst.compound, inst.index\n balance[key] = inst.formula\n\n return transfer, balance", "docstring": "Match compounds greedily based on score function.\n\nArgs:\nreaction: Reaction equation :class:`psamm.reaction.Reaction`.\ncompound_formula: Dictionary mapping compound IDs to\n:class:`psamm.formula.Formula`. Formulas must be flattened.\nscore_func: Function that takes two :class:`_CompoundInstance` and\nreturns the score.", "source": "juraj-google-style"} {"code": "def set_data(self, data={}, datetime_fields=[]):\n \n if datetime_fields:\n for field in datetime_fields:\n if field in data:\n data[field] = self._parse_datetime(data[field])\n\n super(CampfireEntity, self).set_data(data)", "docstring": "Set entity data\n\nArgs:\ndata (dict): Entity data\ndatetime_fields (array): Fields that should be parsed as datetimes", "source": "juraj-google-style"} {"code": "def path_new_using_map(\n m: tcod.map.Map, dcost: float = 1.41\n) -> tcod.path.AStar:\n \n return tcod.path.AStar(m, dcost)", "docstring": "Return a new AStar using the given Map.\n\nArgs:\nm (Map): A Map instance.\ndcost (float): The path-finding cost of diagonal movement.\nCan be set to 0 to disable diagonal movement.\nReturns:\nAStar: A new AStar instance.", "source": "juraj-google-style"} {"code": "def _find_furious_yaml(start, checked):\n directory = start\n while (directory not in checked):\n checked.add(directory)\n for fs_yaml_name in FURIOUS_YAML_NAMES:\n yaml_path = os.path.join(directory, fs_yaml_name)\n if os.path.exists(yaml_path):\n return yaml_path\n directory = os.path.dirname(directory)\n return None", "docstring": "Traverse the directory tree identified by start\nuntil a directory already in checked is encountered or the path\nof furious.yaml is found.\n\nChecked is present both to make the loop termination easy\nto reason about and so the same directories do not get\nrechecked\n\nArgs:\nstart: the path to start looking in and work upward from\nchecked: the set of already checked directories\n\nReturns:\nthe path of the furious.yaml file or None if it is not found", "source": "codesearchnet"} {"code": "def insert(self, start_time: int, schedule: ScheduleComponent) -> 'ScheduleComponent':\n return ops.insert(self, start_time, schedule)", "docstring": "Return a new schedule with `schedule` inserted within `self` at `start_time`.\n\nArgs:\nstart_time: time to be inserted\nschedule: schedule to be inserted", "source": "codesearchnet"} {"code": "def limit(self, count):\n return self.__class__(self._parent, projection=self._projection, field_filters=self._field_filters, orders=self._orders, limit=count, offset=self._offset, start_at=self._start_at, end_at=self._end_at)", "docstring": "Limit a query to return a fixed number of results.\n\nIf the current query already has a limit set, this will overwrite it.\n\nArgs:\ncount (int): Maximum number of documents to return that match\nthe query.\n\nReturns:\n~.firestore_v1beta1.query.Query: A limited query. Acts as a\ncopy of the current query, modified with the newly added\n\"limit\" filter.", "source": "codesearchnet"} {"code": "def move(self, destination):\n \n self.relocate(destination)\n shutil.move(self.path, destination)\n self._path = destination", "docstring": "Reconfigure and move the virtual environment to another path.\n\nArgs:\ndestination (str): The target path of the virtual environment.\n\nNote:\nUnlike `relocate`, this method *will* move the virtual to the\ngiven path.", "source": "juraj-google-style"} {"code": "async def send_message(\n self, request: str, response_expected: bool, **kwargs: Any\n ) -> Response:\n \n await self.socket.send(request)\n if response_expected:\n response_text = await self.socket.recv()\n return Response(response_text)\n return Response(\"\")", "docstring": "Transport the message to the server and return the response.\n\nArgs:\nrequest: The JSON-RPC request string.\nresponse_expected: Whether the request expects a response.\n\nReturns:\nA Response object.", "source": "juraj-google-style"} {"code": "def apply_default_prefetch(input_source_or_dataflow, trainer):\n \n if not isinstance(input_source_or_dataflow, InputSource):\n \n if type(trainer) == SimpleTrainer:\n input = FeedInput(input_source_or_dataflow)\n else:\n logger.info(\"Automatically applying QueueInput on the DataFlow.\")\n input = QueueInput(input_source_or_dataflow)\n else:\n input = input_source_or_dataflow\n if hasattr(trainer, 'devices'):\n towers = trainer.devices\n if len(towers) > 1:\n \n assert not isinstance(trainer, SimpleTrainer)\n\n if isinstance(input, FeedfreeInput) and \\\n not isinstance(input, (StagingInput, DummyConstantInput)):\n logger.info(\"Automatically applying StagingInput on the DataFlow.\")\n input = StagingInput(input)\n return input", "docstring": "Apply a set of default rules to make a fast :class:`InputSource`.\n\nArgs:\ninput_source_or_dataflow(InputSource | DataFlow):\ntrainer (Trainer):\n\nReturns:\nInputSource", "source": "juraj-google-style"} {"code": "def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):\n if (not current_app.testing):\n try:\n from sendgrid.helpers.mail import Email, Content, Substitution, Mail\n from_email = Email(sender_email, sender_name)\n to_email = Email(recipient)\n text_content = Content('text/plain', text_message)\n html_content = Content('text/html', html_message)\n mail = Mail(from_email, subject, to_email, text_content)\n mail.add_content(html_content)\n response = self.sg.client.mail.send.post(request_body=mail.get())\n print(response.status_code)\n print(response.body)\n print(response.headers)\n except ImportError:\n raise ConfigError(SENDGRID_IMPORT_ERROR_MESSAGE)\n except Exception as e:\n print(e)\n print(e.body)\n raise", "docstring": "Send email message via sendgrid-python.\n\nArgs:\nrecipient: Email address or tuple of (Name, Email-address).\nsubject: Subject line.\nhtml_message: The message body in HTML.\ntext_message: The message body in plain text.", "source": "codesearchnet"} {"code": "def get_iso3_country_code(cls, country, use_live=True, exception=None):\n countriesdata = cls.countriesdata(use_live=use_live)\n countryupper = country.upper()\n len_countryupper = len(countryupper)\n if (len_countryupper == 3):\n if (countryupper in countriesdata['countries']):\n return countryupper\n elif (len_countryupper == 2):\n iso3 = countriesdata['iso2iso3'].get(countryupper)\n if (iso3 is not None):\n return iso3\n iso3 = countriesdata['countrynames2iso3'].get(countryupper)\n if (iso3 is not None):\n return iso3\n for candidate in cls.expand_countryname_abbrevs(countryupper):\n iso3 = countriesdata['countrynames2iso3'].get(candidate)\n if (iso3 is not None):\n return iso3\n if (exception is not None):\n raise exception\n return None", "docstring": "Get ISO3 code for cls. Only exact matches or None are returned.\n\nArgs:\ncountry (str): Country for which to get ISO3 code\nuse_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\nexception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\nReturns:\nOptional[str]: ISO3 country code or None", "source": "codesearchnet"} {"code": "def mkdir_p(path_to_dir):\n \n try:\n os.makedirs(path_to_dir)\n except OSError as e: \n if e.errno == EEXIST and os.path.isdir(path_to_dir):\n logger.debug(\n \"Directory %s already exists. Skipping.\" % path_to_dir)\n else:\n raise e", "docstring": "Make directory(ies).\n\nThis function behaves like mkdir -p.\n\nArgs:\npath_to_dir (:obj:`str`): Path to the directory to make.", "source": "juraj-google-style"} {"code": "def can_file_be_synced_on_current_platform(path):\n can_be_synced = True\n fullpath = os.path.join(os.environ['HOME'], path)\n library_path = os.path.join(os.environ['HOME'], 'Library/')\n if (platform.system() == constants.PLATFORM_LINUX):\n if fullpath.startswith(library_path):\n can_be_synced = False\n return can_be_synced", "docstring": "Check if the given path can be synced locally.\n\nCheck if it makes sense to sync the file at the given path on the current\nplatform.\nFor now we don't sync any file in the ~/Library folder on GNU/Linux.\nThere might be other exceptions in the future.\n\nArgs:\n(str): Path to the file or folder to check. If relative, prepend it\nwith the home folder.\n'abc' becomes '~/abc'\n'/def' stays '/def'\n\nReturns:\n(bool): True if given file can be synced", "source": "codesearchnet"} {"code": "def __init__(self, details):\n\t\t\n\n\t\t\n\t\tif not isinstance(details, dict):\n\t\t\traise ValueError('details in ' + self.__class__.__name__ + '.' + sys._getframe().f_code.co_name + ' must be a dict')\n\n\t\t\n\t\tself._nodes = {}\n\t\tself._requires = {}\n\n\t\t\n\t\tfor k in tuple(details.keys()):\n\n\t\t\t\n\t\t\tif _standardField.match(k):\n\n\t\t\t\t\n\t\t\t\tif isinstance(details[k], _NodeInterface):\n\n\t\t\t\t\t\n\t\t\t\t\tself._nodes[k] = details[k]\n\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tself._nodes[k] = _child(details[k])\n\n\t\t\t\t\n\t\t\t\tdel details[k]\n\n\t\t\n\t\tif '__require__' in details:\n\t\t\tself.requires(details['__require__'])\n\t\t\tdel details['__require__']\n\n\t\t\n\t\tsuper(Parent, self).__init__(details, 'Parent')", "docstring": "Constructor\n\nInitialises the instance\n\nArguments:\ndetails {dict} -- Details describing the type of values allowed for\nthe node\n\nRaises:\nValueError\n\nReturns:\nParent", "source": "juraj-google-style"} {"code": "def execute(self):\n \n \n \n\n \n \n\n self.generate_workflow_description()\n\n \n if self.batch_values:\n self.id = self.workflow.launch_batch_workflow(self.definition)\n\n \n else:\n self.id = self.workflow.launch(self.definition)\n\n return self.id", "docstring": "Execute the workflow.\n\nArgs:\nNone\n\nReturns:\nWorkflow_id", "source": "juraj-google-style"} {"code": "def _ParseDLSPageHeader(self, file_object, page_offset):\n \n page_header_map = self._GetDataTypeMap('dls_page_header')\n\n try:\n page_header, page_size = self._ReadStructureFromFileObject(\n file_object, page_offset, page_header_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n 'Unable to parse page header at offset: 0x{0:08x} '\n 'with error: {1!s}'.format(page_offset, exception))\n\n if page_header.signature not in self._DLS_SIGNATURES:\n raise errors.UnableToParseFile(\n 'Unsupported page header signature at offset: 0x{0:08x}'.format(\n page_offset))\n\n return page_header, page_size", "docstring": "Parses a DLS page header from a file-like object.\n\nArgs:\nfile_object (file): file-like object to read the header from.\npage_offset (int): offset of the start of the page header, relative\nto the start of the file.\n\nReturns:\ntuple: containing:\n\ndls_page_header: parsed record structure.\nint: header size.\n\nRaises:\nParseError: when the header cannot be parsed.", "source": "juraj-google-style"} {"code": "def parse_signed_request(self, signed_request):\n\n def _b64_normalize(s):\n appendix = ('=' * (4 - (len(s) % 4)))\n return (s.replace('-', '+').replace('_', '/') + appendix)\n sr = str(signed_request)\n logging.info(('parse signed request: %s' % sr))\n (enc_sig, enc_payload) = sr.split('.', 1)\n sig = base64.b64decode(_b64_normalize(enc_sig))\n data = _parse_json(base64.b64decode(_b64_normalize(enc_payload)))\n if (data['algorithm'] != u'HMAC-SHA256'):\n return None\n expected_sig = hmac.new(self.client_secret, enc_payload, hashlib.sha256).digest()\n if (expected_sig == sig):\n data.user_id = data.uid = data.get('user_id', None)\n data.access_token = data.get('oauth_token', None)\n expires = data.get('expires', None)\n if expires:\n data.expires = data.expires_in = (time.time() + expires)\n return data\n return None", "docstring": "parse signed request when using in-site app.\n\nReturns:\ndict object like { 'uid': 12345, 'access_token': 'ABC123XYZ', 'expires': unix-timestamp },\nor None if parse failed.", "source": "codesearchnet"} {"code": "def _parse_response(self, respond):\n mobj = self._max_qubit_error_re.match(respond.text)\n if mobj:\n raise RegisterSizeError('device register size must be <= {}'.format(mobj.group(1)))\n return True", "docstring": "parse text of response for HTTP errors\n\nThis parses the text of the response to decide whether to\nretry request or raise exception. At the moment this only\ndetects an exception condition.\n\nArgs:\nrespond (Response): requests.Response object\n\nReturns:\nbool: False if the request should be retried, True\nif not.\n\nRaises:\nRegisterSizeError", "source": "codesearchnet"} {"code": "def _build_state_value(request_handler, user):\n uri = request_handler.request.url\n token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(), action_id=str(uri))\n return ((uri + ':') + token)", "docstring": "Composes the value for the 'state' parameter.\n\nPacks the current request URI and an XSRF token into an opaque string that\ncan be passed to the authentication server via the 'state' parameter.\n\nArgs:\nrequest_handler: webapp.RequestHandler, The request.\nuser: google.appengine.api.users.User, The current user.\n\nReturns:\nThe state value as a string.", "source": "codesearchnet"} {"code": "def CreateShoppingCampaign(client, budget_id, merchant_id):\n campaign_service = client.GetService('CampaignService', 'v201809')\n campaign = {'name': ('Shopping campaign \n campaign_operations = [{'operator': 'ADD', 'operand': campaign}]\n campaign = campaign_service.mutate(campaign_operations)['value'][0]\n print(('Campaign with name \"%s\" and ID \"%s\" was added.' % (campaign['name'], campaign['id'])))\n return campaign", "docstring": "Creates a shopping campaign with the given budget and merchant IDs.\n\nArgs:\nclient: an AdWordsClient instance.\nbudget_id: the str ID of the budget to be associated with the shopping\ncampaign.\nmerchant_id: the str ID of the merchant account to be associated with the\nshopping campaign.\n\nReturns:\nThe created Shopping Campaign as a sudsobject.", "source": "codesearchnet"} {"code": "def get_children(decider, root):\n \n collected = []\n\n def follow(elem):\n\n if elem in collected:\n return\n\n \n cls = elem.__class__\n\n if hasattr(cls, '_tx_attrs') and decider(elem):\n collected.append(elem)\n\n if hasattr(cls, '_tx_attrs'):\n for attr_name, attr in cls._tx_attrs.items():\n \n if attr.cont:\n if attr.mult in (MULT_ONE, MULT_OPTIONAL):\n new_elem = getattr(elem, attr_name)\n if new_elem:\n follow(new_elem)\n else:\n new_elem_list = getattr(elem, attr_name)\n if new_elem_list:\n for new_elem in new_elem_list:\n follow(new_elem)\n\n follow(root)\n return collected", "docstring": "Returns a list of all model elements of type 'typ' starting from model\nelement 'root'. The search process will follow containment links only.\nNon-containing references shall not be followed.\n\nArgs:\ndecider(obj): a callable returning True if the object is of interest.\nroot (model object): Python model object which is the start of the\nsearch process.", "source": "juraj-google-style"} {"code": "def create_summary_metadata(display_name, description):\n content = plugin_data_pb2.ImagePluginData(version=PROTO_VERSION)\n metadata = summary_pb2.SummaryMetadata(display_name=display_name, summary_description=description, plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME, content=content.SerializeToString()))\n return metadata", "docstring": "Create a `summary_pb2.SummaryMetadata` proto for image plugin data.\n\nReturns:\nA `summary_pb2.SummaryMetadata` protobuf object.", "source": "codesearchnet"} {"code": "def set_pair(self, term1, term2, value, **kwargs):\n\n \n\n key = self.key(term1, term2)\n self.keys.update([term1, term2])\n self.pairs[key] = value", "docstring": "Set the value for a pair of terms.\n\nArgs:\nterm1 (str)\nterm2 (str)\nvalue (mixed)", "source": "juraj-google-style"} {"code": "def sender(self, jid: str):\n if ((jid is not None) and (not isinstance(jid, str))):\n raise TypeError(\"'sender' MUST be a string\")\n self._sender = (aioxmpp.JID.fromstr(jid) if (jid is not None) else None)", "docstring": "Set jid of the sender\n\nArgs:\njid (str): jid of the sender", "source": "codesearchnet"} {"code": "def make_rc(base_cfg, target_filename, additions=None, replacements=None):\n if (additions is None):\n additions = {}\n if (replacements is None):\n replacements = {}\n new_cfg = six.moves.configparser.ConfigParser()\n new_cfg._sections = copy.deepcopy(base_cfg._sections)\n new_sections = new_cfg._sections\n for (section, opts) in additions.items():\n curr_section = new_sections.setdefault(section, collections.OrderedDict())\n for (opt, opt_val) in opts.items():\n curr_val = curr_section.get(opt)\n if (curr_val is None):\n msg = _MISSING_OPTION_ADDITION.format(opt)\n raise KeyError(msg)\n curr_val = curr_val.rstrip(',')\n opt_val = _transform_opt(opt_val)\n curr_section[opt] = ('%s, %s' % (curr_val, opt_val))\n for (section, opts) in replacements.items():\n curr_section = new_sections.setdefault(section, collections.OrderedDict())\n for (opt, opt_val) in opts.items():\n curr_val = curr_section.get(opt)\n if (curr_val is None):\n msg = _MISSING_OPTION_REPLACE.format(opt)\n print(msg, file=sys.stderr)\n opt_val = _transform_opt(opt_val)\n curr_section[opt] = ('%s' % (opt_val,))\n with open(target_filename, 'w') as file_obj:\n new_cfg.write(file_obj)", "docstring": "Combines a base rc and additions into single file.\n\nArgs:\nbase_cfg (ConfigParser.ConfigParser): The configuration we are\nmerging into.\ntarget_filename (str): The filename where the new configuration\nwill be saved.\nadditions (dict): (Optional) The values added to the configuration.\nreplacements (dict): (Optional) The wholesale replacements for\nthe new configuration.\n\nRaises:\nKeyError: if one of the additions or replacements does not\nalready exist in the current config.", "source": "codesearchnet"} {"code": "def add_edge(self, ind_node, dep_node):\n \n graph = self.graph\n if ind_node not in graph:\n raise KeyError('independent node %s does not exist' % ind_node)\n if dep_node not in graph:\n raise KeyError('dependent node %s does not exist' % dep_node)\n test_graph = deepcopy(graph)\n test_graph[ind_node].add(dep_node)\n test_dag = DAG()\n test_dag.graph = test_graph\n is_valid, message = test_dag.validate()\n if is_valid:\n graph[ind_node].add(dep_node)\n else:\n raise DAGValidationError(message)", "docstring": "Add an edge (dependency) between the specified nodes.\n\nArgs:\nind_node (str): The independent node to add an edge to.\ndep_node (str): The dependent node that has a dependency on the\nind_node.\n\nRaises:\nKeyError: Either the ind_node, or dep_node do not exist.\nDAGValidationError: Raised if the resulting graph is invalid.", "source": "juraj-google-style"} {"code": "def inner_dim_sizes(self):\n return self._inner_dim_sizes", "docstring": "The inner dimension sizes for this shape.\n\nReturns:\nA 1-D integer `Tensor`.", "source": "github-repos"} {"code": "def _big_endian_int(bits: np.ndarray) -> int:\n \n result = 0\n for e in bits:\n result <<= 1\n if e:\n result |= 1\n return result", "docstring": "Returns the big-endian integer specified by the given bits.\n\nFor example, [True, False, False, True, False] becomes binary 10010 which\nis 18 in decimal.\n\nArgs:\nbits: Descending bits of the integer, with the 1s bit at the end.\n\nReturns:\nThe integer.", "source": "juraj-google-style"} {"code": "def reshape_by_blocks(x, x_shape, memory_block_size):\n \n x = tf.reshape(x, [\n x_shape[0], x_shape[1], x_shape[2] \n memory_block_size, x_shape[3]\n ])\n return x", "docstring": "Reshapes input by splitting its length over blocks of memory_block_size.\n\nArgs:\nx: a Tensor with shape [batch, heads, length, depth]\nx_shape: tf.TensorShape of x.\nmemory_block_size: Integer which divides length.\n\nReturns:\nTensor with shape\n[batch, heads, length // memory_block_size, memory_block_size, depth].", "source": "juraj-google-style"} {"code": "def pearson(logu, name=None):\n with tf.compat.v1.name_scope(name, 'pearson', [logu]):\n logu = tf.convert_to_tensor(value=logu, name='logu')\n return tf.square(tf.math.expm1(logu))", "docstring": "The Pearson Csiszar-function in log-space.\n\nA Csiszar-function is a member of,\n\n```none\nF = { f:R_+ to R : f convex }.\n```\n\nThe Pearson Csiszar-function is:\n\n```none\nf(u) = (u - 1)**2\n```\n\nWarning: this function makes non-log-space calculations and may therefore be\nnumerically unstable for `|logu| >> 0`.\n\nArgs:\nlogu: `float`-like `Tensor` representing `log(u)` from above.\nname: Python `str` name prefixed to Ops created by this function.\n\nReturns:\npearson_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at\n`u = exp(logu)`.", "source": "codesearchnet"} {"code": "class BufferedSlidingQuantileTracker(BufferedQuantileTracker):\n\n def __init__(self, window_size, q):\n super().__init__(window_mode=WindowMode.SLIDING, q=q, window_size=window_size)", "docstring": "Sliding window quantile tracker using a sorted list for quantile\ncalculation.\n\nWarning:\nMaintains a sorted list of values within the sliding window to calculate\nthe specified quantile. Memory consumption is bounded by the window size\nbut can still be significant for large windows.\n\nArgs:\nwindow_size: The size of the sliding window.\nq: The quantile to calculate, a float between 0 and 1 (inclusive).", "source": "github-repos"} {"code": "def FormatTime(fmt, stime=None):\n precondition.AssertType(fmt, str)\n precondition.AssertOptionalType(stime, time.struct_time)\n if (stime is None):\n strftime = time.strftime\n else:\n strftime = (lambda fmt: time.strftime(fmt, stime))\n if PY2:\n return strftime(fmt.encode('ascii')).decode('ascii')\n else:\n return strftime(fmt)", "docstring": "A compatibility wrapper for the `strftime` function.\n\nIt is guaranteed to always take unicode string as an argument and return an\nunicode string as a result.\n\nArgs:\nfmt: A format string specifying formatting of the output.\nstime: A time representation as returned by `gmtime` or `localtime`.\n\nReturns:\nA human-readable representation of `stime`.", "source": "codesearchnet"} {"code": "def post_comment(self, sharekey=None, comment=None):\n \n endpoint = '/api/sharedfile/{0}/comments'.format(sharekey)\n\n post_data = {'body': comment}\n\n data = self._make_request(\"POST\", endpoint=endpoint, data=post_data)\n return Comment.NewFromJSON(data)", "docstring": "Post a comment on behalf of the current user to the\nSharedFile with the given sharekey.\n\nArgs:\nsharekey (str): Sharekey of the SharedFile to which you'd like\nto post a comment.\ncomment (str): Text of the comment to post.\n\nReturns:\nComment object.", "source": "juraj-google-style"} {"code": "def __init__(self, scope, parent, name, result, paren=False):\n \n CodeExpression.__init__(self, scope, parent, name, result, paren)\n self.full_name = name\n self.arguments = ()\n self.method_of = None\n self.reference = None", "docstring": "Constructor for function calls.\n\nArgs:\nscope (CodeEntity): The program scope where this object belongs.\nparent (CodeEntity): This object's parent in the program tree.\nname (str): The name of the function in the program.\nresult (str): The return type of the expression in the program.\n\nKwargs:\nparen (bool): Whether the expression is enclosed in parentheses.", "source": "juraj-google-style"} {"code": "def to_string(self, verbose=0, title=None, **kwargs):\n \n from pprint import pformat\n s = pformat(self, **kwargs)\n if title is not None:\n return \"\\n\".join([marquee(title, mark=\"=\"), s])\n return s", "docstring": "String representation. kwargs are passed to `pprint.pformat`.\n\nArgs:\nverbose: Verbosity level\ntitle: Title string.", "source": "juraj-google-style"} {"code": "def update_data(self, index, data):\n \n\n datapack = self.built_embed.to_dict()[\"fields\"][index]\n self.built_embed.set_field_at(index, name=datapack[\"name\"], value=data, inline=datapack[\"inline\"])", "docstring": "Updates a particular datapack's data\n\nArgs:\nindex (int): The index of the datapack\ndata (str): The new value to set for this datapack", "source": "juraj-google-style"} {"code": "def get_rot_mats(self) -> torch.Tensor:\n if self._rot_mats is not None:\n return self._rot_mats\n elif self._quats is not None:\n return quat_to_rot(self._quats)\n else:\n raise ValueError('Both rotations are None')", "docstring": "Returns the underlying rotation as a rotation matrix tensor.\n\nReturns:\nThe rotation as a rotation matrix tensor", "source": "github-repos"} {"code": "def add_child(self, child):\n \n if not isinstance(child, Node):\n raise TypeError(\"child must be a Node\")\n self.children.append(child); child.parent = self", "docstring": "Add child to ``Node`` object\n\nArgs:\n``child`` (``Node``): The child ``Node`` to be added", "source": "juraj-google-style"} {"code": "def project(self, n):\n \n n = get_uvec(n)\n return self.einsum_sequence([n] * self.rank)", "docstring": "Convenience method for projection of a tensor into a\nvector. Returns the tensor dotted into a unit vector\nalong the input n.\n\nArgs:\nn (3x1 array-like): direction to project onto\n\nReturns (float):\nscalar value corresponding to the projection of\nthe tensor into the vector", "source": "juraj-google-style"} {"code": "def dense_message_pass(node_states, edge_matrices):\n \n batch_size, num_nodes, node_dim = common_layers.shape_list(node_states)\n\n \n h_flat = tf.reshape(\n node_states, [batch_size, num_nodes * node_dim, 1], name=\"h_flat\")\n\n messages = tf.reshape(\n tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim],\n name=\"messages_matmul\")\n\n message_bias = tf.get_variable(\"message_bias\", shape=node_dim)\n messages = messages + message_bias\n messages = tf.reshape(messages, [batch_size, num_nodes, node_dim])\n return messages", "docstring": "Computes a_t from h_{t-1}, see bottom of page 3 in the paper.\n\nArgs:\nnode_states: [B, L, D] tensor (h_{t-1})\nedge_matrices (tf.float32): [B, L*D, L*D]\n\nReturns:\nmessages (tf.float32): [B, L, D] For each pair\nof nodes in the graph a message is sent along both the incoming and\noutgoing edge.", "source": "juraj-google-style"} {"code": "def ms_to_str(ms, fractions=False):\n \n sgn = \"-\" if ms < 0 else \"\"\n h, m, s, ms = ms_to_times(abs(ms))\n if fractions:\n return sgn + \"{:01d}:{:02d}:{:02d}.{:03d}\".format(h, m, s, ms)\n else:\n return sgn + \"{:01d}:{:02d}:{:02d}\".format(h, m, s)", "docstring": "Prettyprint milliseconds to [-]H:MM:SS[.mmm]\n\nHandles huge and/or negative times. Non-negative times with ``fractions=True``\nare matched by :data:`pysubs2.time.TIMESTAMP`.\n\nArguments:\nms: Number of milliseconds (int, float or other numeric class).\nfractions: Whether to print up to millisecond precision.\n\nReturns:\nstr", "source": "juraj-google-style"} {"code": "def correct_dihedral(self, construction_table,\n use_lookup=None):\n \n if use_lookup is None:\n use_lookup = settings['defaults']['use_lookup']\n\n problem_index = self.check_dihedral(construction_table)\n bond_dict = self._give_val_sorted_bond_dict(use_lookup=use_lookup)\n c_table = construction_table.copy()\n for i in problem_index:\n loc_i = c_table.index.get_loc(i)\n b, a, problem_d = c_table.loc[i, ['b', 'a', 'd']]\n try:\n c_table.loc[i, 'd'] = (bond_dict[a] - {b, a, problem_d}\n - set(c_table.index[loc_i:]))[0]\n except IndexError:\n visited = set(c_table.index[loc_i:]) | {b, a, problem_d}\n tmp_bond_dict = OrderedDict([(j, bond_dict[j] - visited)\n for j in bond_dict[problem_d]])\n found = False\n while tmp_bond_dict and not found:\n new_tmp_bond_dict = OrderedDict()\n for new_d in tmp_bond_dict:\n if new_d in visited:\n continue\n angle = self.get_angle_degrees([b, a, new_d])[0]\n if 5 < angle < 175:\n found = True\n c_table.loc[i, 'd'] = new_d\n else:\n visited.add(new_d)\n for j in tmp_bond_dict[new_d]:\n new_tmp_bond_dict[j] = bond_dict[j] - visited\n tmp_bond_dict = new_tmp_bond_dict\n if not found:\n other_atoms = c_table.index[:loc_i].difference({b, a})\n molecule = self.get_distance_to(origin=i, sort=True,\n other_atoms=other_atoms)\n k = 0\n while not found and k < len(molecule):\n new_d = molecule.index[k]\n angle = self.get_angle_degrees([b, a, new_d])[0]\n if 5 < angle < 175:\n found = True\n c_table.loc[i, 'd'] = new_d\n k = k + 1\n if not found:\n message = ('The atom with index {} has no possibility '\n 'to get nonlinear reference atoms'.format)\n raise UndefinedCoordinateSystem(message(i))\n return c_table", "docstring": "Reindexe the dihedral defining atom if linear reference is used.\n\nUses :meth:`~Cartesian.check_dihedral` to obtain the problematic\nindices.\n\nArgs:\nconstruction_table (pd.DataFrame):\nuse_lookup (bool): Use a lookup variable for\n:meth:`~chemcoord.Cartesian.get_bonds`. The default is\nspecified in ``settings['defaults']['use_lookup']``\n\nReturns:\npd.DataFrame: Appropiately renamed construction table.", "source": "juraj-google-style"} {"code": "def init_with_validation(cls, function, arguments):\n func = FUNCTIONS[function]\n args = []\n for (arg, arg_type) in zip(arguments, func.args):\n if arg_type.values:\n if isinstance(arg, six.string_types):\n try:\n args.append([arg_type.values[arg]])\n except KeyError:\n raise KeyError(('Unknown argument value: %s, valid values: %s' % (arg, [v.name for v in arg_type.values])))\n else:\n if isinstance(arg, (list, tuple)):\n arg = arg[0]\n try:\n args.append([arg_type.values(arg)])\n except ValueError:\n raise ValueError(('Unknown argument value: %s, valid values: %s' % (arg, list(arg_type.values))))\n elif isinstance(arg, int):\n args.append([arg])\n else:\n args.append(list(arg))\n return cls(func.id, args)", "docstring": "Return a `FunctionCall` given some validation for the function and args.\n\nArgs:\nfunction: A function name or id, to be converted into a function id enum.\narguments: An iterable of function arguments. Arguments that are enum\ntypes can be passed by name. Arguments that only take one value (ie\nnot a point) don't need to be wrapped in a list.\n\nReturns:\nA new `FunctionCall` instance.\n\nRaises:\nKeyError: if the enum name doesn't exist.\nValueError: if the enum id doesn't exist.", "source": "codesearchnet"} {"code": "def get_release_date(pdb_id):\n pdb_id = pdb_id.upper()\n if (pdb_id not in _property_table().index):\n raise ValueError('PDB ID not in property table')\n else:\n release_date = _property_table().ix[(pdb_id, 'releaseDate')]\n if pd.isnull(release_date):\n log.debug('{}: no release date available')\n release_date = None\n return release_date", "docstring": "Quick way to get the release date of a PDB ID using the table of results from the REST service\n\nReturns None if the release date is not available.\n\nReturns:\nstr: Organism of a PDB ID", "source": "codesearchnet"} {"code": "def GetConsensusAddress(validators):\n \n vlen = len(validators)\n script = Contract.CreateMultiSigRedeemScript(vlen - int((vlen - 1) / 3), validators)\n return Crypto.ToScriptHash(script)", "docstring": "Get the script hash of the consensus node.\n\nArgs:\nvalidators (list): of Ellipticcurve.ECPoint's\n\nReturns:\nUInt160:", "source": "juraj-google-style"} {"code": "def fit(self, mol1, mol2):\n return (self.get_rmsd(mol1, mol2) < self._tolerance)", "docstring": "Fit two molecules.\n\nArgs:\nmol1: First molecule. OpenBabel OBMol or pymatgen Molecule object\nmol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object\n\nReturns:\nA boolean value indicates whether two molecules are the same.", "source": "codesearchnet"} {"code": "def get_nsx_controller(self):\n \n urn = \"urn:brocade.com:mgmt:brocade-tunnels\"\n config = ET.Element(\"config\")\n ET.SubElement(config, \"nsx-controller\", xmlns=urn)\n output = self._callback(config, handler='get_config')\n result = {}\n element = ET.fromstring(str(output))\n for controller in element.iter('{%s}nsx-controller'%urn):\n result['name'] = controller.find('{%s}name'%urn).text\n isactivate = controller.find('{%s}activate'%urn)\n if isactivate is None:\n result['activate'] = False\n else:\n result['activate'] = True\n connection = controller.find('{%s}connection-addr'%urn)\n if connection is None:\n result['port'] = None\n result['address'] = None\n else:\n result['port'] = connection.find('{%s}port'%urn).text\n address = connection.find('{%s}address'%urn)\n if address is None:\n result['address'] = None\n else:\n result['address'] = address.text\n\n return result", "docstring": "Get/Set nsx controller name\n\nArgs:\nname: (str) : Name of the nsx-controller\ncallback (function): A function executed upon completion of the\nmethod.\n\nReturns: Return dictionary containing nsx-controller information.\nReturns blank dict if no nsx-controller is configured.\n\nRaises: None", "source": "juraj-google-style"} {"code": "def GetRelativePathForPathSpec(cls, path_spec, mount_path=None):\n if (not path_spec):\n return None\n location = getattr(path_spec, 'location', None)\n if ((not location) and path_spec.HasParent()):\n location = getattr(path_spec.parent, 'location', None)\n if (not location):\n return None\n data_stream = getattr(path_spec, 'data_stream', None)\n if data_stream:\n location = '{0:s}:{1:s}'.format(location, data_stream)\n if (path_spec.type_indicator != dfvfs_definitions.TYPE_INDICATOR_OS):\n return location\n if (mount_path and location.startswith(mount_path)):\n location = location[len(mount_path):]\n return location", "docstring": "Retrieves the relative path of a path specification.\n\nIf a mount path is defined the path will be relative to the mount point,\notherwise the path is relative to the root of the file system that is used\nby the path specification.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification.\nmount_path (Optional[str]): path where the file system that is used\nby the path specification is mounted, such as \"/mnt/image\". The\nmount path will be stripped from the absolute path defined by\nthe path specification.\n\nReturns:\nstr: relative path or None.", "source": "codesearchnet"} {"code": "def read_binary_array(self, key, b64decode=True, decode=False):\n \n data = None\n if key is not None:\n data = self.db.read(key.strip())\n if data is not None:\n data_decoded = []\n for d in json.loads(data, object_pairs_hook=OrderedDict):\n if b64decode:\n \n dd = base64.b64decode(d)\n if decode:\n \n try:\n dd = dd.decode('utf-8')\n except UnicodeDecodeError:\n \n dd = dd.decode('latin-1')\n data_decoded.append(dd)\n else:\n \n data_decoded.append(d)\n data = data_decoded\n else:\n self.tcex.log.warning(u'The key field was None.')\n return data", "docstring": "Read method of CRUD operation for binary array data.\n\nArgs:\nkey (string): The variable to read from the DB.\nb64decode (bool): If true the data will be base64 decoded.\ndecode (bool): If true the data will be decoded to a String.\n\nReturns:\n(list): Results retrieved from DB.", "source": "juraj-google-style"} {"code": "def _get_connection(self, cluster):\n \n \n \n if 'connection' not in cluster:\n cluster['connection'] = self._connection_class(\n socketTimeoutMS=self._network_timeout,\n w=1,\n j=self.j,\n **cluster['params'])\n\n return cluster['connection']", "docstring": "Return a connection to a Cluster.\n\nReturn a MongoClient or a MongoReplicaSetClient for the given Cluster.\nThis is done in a lazy manner (if there is already a Client connected to\nthe Cluster, it is returned and no other Client is created).\n\nArgs:\ncluster: A dict containing information about a cluster.\n\nReturns:\nA MongoClient or MongoReplicaSetClient instance connected to the\ndesired cluster", "source": "juraj-google-style"} {"code": "def get_root_dir(self):\n if os.path.isdir(self.root_path):\n return self.root_path\n else:\n return os.path.dirname(self.root_path)", "docstring": "Retrieve the absolute path to the root directory of this data source.\n\nReturns:\nstr: absolute path to the root directory of this data source.", "source": "codesearchnet"} {"code": "def expand_valid_values(valid_values):\n \n\n if '${GROUP_TYPES}' in valid_values:\n valid_values.remove('${GROUP_TYPES}')\n valid_values.extend(\n [\n 'Adversary',\n 'Campaign',\n 'Document',\n 'Email',\n 'Event',\n 'Incident',\n 'Intrusion Set',\n 'Signature',\n 'Task',\n 'Threat',\n ]\n )\n elif '${OWNERS}' in valid_values:\n valid_values.remove('${OWNERS}')\n valid_values.append('')\n elif '${USERS}' in valid_values:\n valid_values.remove('${USERS}')\n valid_values.append('')\n return valid_values", "docstring": "Expand supported playbook variables to their full list.\n\nArgs:\nvalid_values (list): The list of valid values for Choice or MultiChoice inputs.\n\nReturns:\nList: An expanded list of valid values for Choice or MultiChoice inputs.", "source": "juraj-google-style"} {"code": "def start(self, channel=None):\n \n\n super(EmulatedDevice, self).start(channel)\n self.emulator.start()", "docstring": "Start this emulated device.\n\nThis triggers the controller to call start on all peripheral tiles in\nthe device to make sure they start after the controller does and then\nit waits on each one to make sure they have finished initializing\nbefore returning.\n\nArgs:\nchannel (IOTilePushChannel): the channel with a stream and trace\nroutine for streaming and tracing data through a VirtualInterface", "source": "juraj-google-style"} {"code": "def distance_tt_point(a, b):\n \n return math.sqrt((b.lat-a.lat)**2 + (b.lon-a.lon)**2)", "docstring": "Euclidean distance between two (tracktotrip) points\n\nArgs:\na (:obj:`Point`)\nb (:obj:`Point`)\nReturns:\nfloat", "source": "juraj-google-style"} {"code": "def list_lb_nat_rules(access_token, subscription_id, resource_group, lb_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/loadBalancers/', lb_name, 'inboundNatRules?api-version=', NETWORK_API])\n return do_get(endpoint, access_token)", "docstring": "List the inbound NAT rules for a load balancer.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nlb_name (str): Name of the load balancer.\n\nReturns:\nHTTP response. JSON body of load balancer NAT rules.", "source": "codesearchnet"} {"code": "def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs):\n return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`ClapConfig`] (or a derived class) from clap text model configuration and clap audio model\nconfiguration.\n\nReturns:\n[`ClapConfig`]: An instance of a configuration object", "source": "github-repos"} {"code": "def SerializeUnsigned(self, writer):\n \n writer.WriteByte(self.Type)\n writer.WriteByte(self.Version)\n self.SerializeExclusiveData(writer)\n\n if len(self.Attributes) > self.MAX_TX_ATTRIBUTES:\n raise Exception(\"Cannot have more than %s transaction attributes\" % self.MAX_TX_ATTRIBUTES)\n\n writer.WriteSerializableArray(self.Attributes)\n writer.WriteSerializableArray(self.inputs)\n writer.WriteSerializableArray(self.outputs)", "docstring": "Serialize object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"} {"code": "def _factored_dims(self, shape):\n \n if not self._factored or shape.ndims < 2:\n return None\n sorted_dims = sorted(shape.dims, key=lambda d: -d.size)\n if sorted_dims[1].size < self._min_dim_size_to_factor:\n return None\n return sorted_dims[:2]", "docstring": "Should we use a factored second moment estimator.\n\nBased on the shape of the variable.\nIf we factor the accumulator, then this function returns a list of two\nmtf.Dimensions to reduce over. We always pick the two largest dimensions.\nIf there are not two dimensions of size >= min_dim_size_to_factor, then we\ndo not factor.\n\nArgs:\nshape: a Shape\nReturns:\neither a list of 2 Dimensions or None", "source": "juraj-google-style"} {"code": "def CheckGradConfigsToTestExplicit():\n\n def Config(input_size, filter_size, out_size, stride=1, padding=None, dilations=None):\n return (input_size, filter_size, out_size, stride, padding, dilations)\n return [Config([2, 5, 8, 1], [4, 4, 1, 2], [2, 3, 10, 2], padding=[[0, 1], [2, 3]]), Config([4, 5, 5, 1], [2, 2, 1, 2], [4, 4, 5, 2], 2, padding=[[3, 1], [5, 0]]), Config([2, 4, 4, 2], [3, 1, 2, 2], [2, 7, 11, 4], padding=[[4, 1], [3, 4]]), Config([1, 15, 15, 2], [1, 3, 2, 1], [1, 18, 23, 2], padding=[[3, 0], [2, 8]]), Config([2, 15, 16, 1], [3, 3, 1, 2], [2, 5, 8, 2], 3, padding=[[0, 0], [10, 0]]), Config([2, 5, 8, 1], [3, 4, 1, 2], [2, 5, 10, 2], padding=[[3, 1], [2, 3]], dilations=[2, 1]), Config([2, 4, 3, 2], [3, 2, 2, 1], [2, 4, 3, 2], padding=[[2, 0], [1, 0]])]", "docstring": "Iterator for different convolution shapes, strides and explicit paddings.\n\ncompute_gradient_error() is very expensive. So the configs should be\nrelatively small.\n\nReturns:\nList of tuples (input_size, filter_size, out_size, stride, padding,\ndilations), the depthwise convolution parameters.", "source": "github-repos"} {"code": "def ioa(boxes1, boxes2):\n \n intersect = intersection(boxes1, boxes2)\n inv_areas = np.expand_dims(1.0 / area(boxes2), axis=0)\n return intersect * inv_areas", "docstring": "Computes pairwise intersection-over-area between box collections.\n\nIntersection-over-area (ioa) between two boxes box1 and box2 is defined as\ntheir intersection area over box2's area. Note that ioa is not symmetric,\nthat is, IOA(box1, box2) != IOA(box2, box1).\n\nArgs:\nboxes1: a numpy array with shape [N, 4] holding N boxes.\nboxes2: a numpy array with shape [M, 4] holding N boxes.\n\nReturns:\na numpy array with shape [N, M] representing pairwise ioa scores.", "source": "juraj-google-style"} {"code": "def run_inference(self, batch, model, inference_args=None):\n _validate_inference_args(inference_args)\n vectorized_batch = np.vstack(batch)\n predictions = hdbscan.approximate_predict(model, vectorized_batch)\n return [PredictionResult(x, y) for x, y in zip(batch, predictions)]", "docstring": "Runs inferences on a batch of numpy arrays.\n\nArgs:\nbatch: A sequence of examples as numpy arrays. They should\nbe single examples.\nmodel: A numpy model or pipeline. Must implement predict(X).\nWhere the parameter X is a numpy array.\ninference_args: Any additional arguments for an inference.\n\nReturns:\nAn Iterable of type PredictionResult.", "source": "github-repos"} {"code": "def volatility_surface(self, currency: List[str], asset: List[str]) -> volatility_surface.VolatilitySurface:\n dates = []\n strikes = []\n implied_vols = []\n for cur, s in zip(currency, asset):\n if s not in self.supported_assets(cur):\n raise ValueError(f'No data for asset {s}')\n data_spot = self._market_data_dict['equities'][cur][s]\n if 'volatility_surface' not in data_spot:\n raise ValueError(f\"No volatility surface 'volatility_surface' for asset {s}\")\n vol_surface = data_spot['volatility_surface']\n vol_dates = dateslib.convert_to_date_tensor(vol_surface['dates'])\n vol_strikes = tf.convert_to_tensor(vol_surface['strikes'], dtype=self._dtype, name='strikes')\n vols = tf.convert_to_tensor(vol_surface['implied_volatilities'], dtype=self._dtype, name='implied_volatilities')\n dates.append(vol_dates)\n strikes.append(vol_strikes)\n implied_vols.append(vols)\n dates = math.pad.pad_date_tensors(dates)\n dates = dateslib.DateTensor.stack(dates, axis=0)\n implied_vols = math.pad.pad_tensors(implied_vols)\n implied_vols = tf.stack(implied_vols, axis=0)\n strikes = math.pad.pad_tensors(strikes)\n strikes = tf.stack(strikes, axis=0)\n vol_surface = volatility_surface.VolatilitySurface(self.date, dates, strikes, implied_vols)\n return vol_surface", "docstring": "The volatility surface object for the lsit of assets.\n\nArgs:\ncurrency: A list of strings with currency names.\nasset: A list of strings with asset names.\n\nReturns:\nAn instance of `VolatilitySurface`.", "source": "github-repos"} {"code": "def label(self, input_grid):\n \n marked = self.find_local_maxima(input_grid)\n marked = np.where(marked >= 0, 1, 0)\n \n \n markers = splabel(marked)[0]\n return markers", "docstring": "Labels input grid using enhanced watershed algorithm.\n\nArgs:\ninput_grid (numpy.ndarray): Grid to be labeled.\n\nReturns:\nArray of labeled pixels", "source": "juraj-google-style"} {"code": "def to_json(self, indent=4):\n agregate = {'metas': self.metas}\n agregate.update({k: getattr(self, k) for k in self._rule_attrs})\n return json.dumps(agregate, indent=indent)", "docstring": "Serialize metas and reference attributes to a JSON string.\n\nKeyword Arguments:\nindent (int): Space indentation, default to ``4``.\n\nReturns:\nstring: JSON datas.", "source": "codesearchnet"} {"code": "def register_gate(name, gateclass, allow_overwrite=False):\n if hasattr(Circuit, name):\n if allow_overwrite:\n warnings.warn(f'Circuit has attribute `{name}`.')\n else:\n raise ValueError(f'Circuit has attribute `{name}`.')\n if name.startswith('run_with_'):\n if allow_overwrite:\n warnings.warn(f'Gate name `{name}` may conflict with run of backend.')\n else:\n raise ValueError(f\"Gate name `{name}` shall not start with 'run_with_'.\")\n if (not allow_overwrite):\n if (name in GATE_SET):\n raise ValueError(f\"Gate '{name}' is already exists in gate set.\")\n if (name in GLOBAL_MACROS):\n raise ValueError(f\"Macro '{name}' is already exists.\")\n GATE_SET[name] = gateclass", "docstring": "Register new gate to gate set.\n\nArgs:\nname (str): The name of gate.\ngateclass (type): The type object of gate.\nallow_overwrite (bool, optional): If True, allow to overwrite the existing gate.\nOtherwise, raise the ValueError.\n\nRaises:\nValueError: The name is duplicated with existing gate.\nWhen `allow_overwrite=True`, this error is not raised.", "source": "codesearchnet"} {"code": "def add(cls, model, commit=True):\n \n if not isinstance(model, cls):\n raise ValueError('%s is not of type %s' % (model, cls))\n cls.session.add(model)\n try:\n if commit:\n cls.session.commit()\n return model\n except:\n cls.session.rollback()\n raise", "docstring": "Adds a model instance to session and commits the\ntransaction.\n\nArgs:\n\nmodel: The instance to add.\n\nExamples:\n\n>>> customer = Customer.new(name=\"hari\", email=\"hari@gmail.com\")\n\n>>> Customer.add(customer)\nhari@gmail.com", "source": "juraj-google-style"} {"code": "def create_or_update(cls, video, language_code, metadata, file_data=None):\n \n try:\n video_transcript = cls.objects.get(video=video, language_code=language_code)\n retrieved = True\n except cls.DoesNotExist:\n video_transcript = cls(video=video, language_code=language_code)\n retrieved = False\n\n for prop, value in six.iteritems(metadata):\n if prop in ['language_code', 'file_format', 'provider']:\n setattr(video_transcript, prop, value)\n\n transcript_name = metadata.get('file_name')\n\n try:\n if transcript_name:\n video_transcript.transcript.name = transcript_name\n elif file_data:\n with closing(file_data) as transcript_file_data:\n file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format)\n video_transcript.transcript.save(file_name, transcript_file_data)\n\n video_transcript.save()\n except Exception:\n logger.exception(\n '[VAL] Transcript save failed to storage for video_id \"%s\" language code \"%s\"',\n video.edx_video_id,\n language_code\n )\n raise\n\n return video_transcript, not retrieved", "docstring": "Create or update Transcript object.\n\nArguments:\nvideo (Video): Video for which transcript is going to be saved.\nlanguage_code (str): language code for (to be created/updated) transcript\nmetadata (dict): A dict containing (to be overwritten) properties\nfile_data (InMemoryUploadedFile): File data to be saved\n\nReturns:\nReturns a tuple of (video_transcript, created).", "source": "juraj-google-style"} {"code": "def remove_binding(self, binding):\n \n \n \n query = { \"binding_id\" : binding.binding_id, \"instance_id\" : binding.instance.instance_id }\n \n \n try:\n result = self.broker.delete_one(query)\n except:\n raise ErrStorageMongoConnection(\"Remove Binding\")\n\n \n if result is not None and result.deleted_count == 1:\n binding.provisioned = False\n else:\n raise ErrStorageRemoveBinding(binding.binding_id)", "docstring": "Remove a binding\n\nRemove an object from the MongoDB storage for caching\n\nArgs:\nbinding (AtlasServiceBinding.Binding): binding\n\nRaises:\nErrStorageMongoConnection: Error during MongoDB communication.\nErrStorageRemoveBinding: Failed to remove the binding", "source": "juraj-google-style"} {"code": "def confirmdir(self, target_directory):\n try:\n directory = self.resolve(target_directory)\n except IOError as exc:\n self.raise_os_error(exc.errno, target_directory)\n if (not (directory.st_mode & S_IFDIR)):\n if (self.is_windows_fs and IS_PY2):\n error_nr = errno.EINVAL\n else:\n error_nr = errno.ENOTDIR\n self.raise_os_error(error_nr, target_directory, 267)\n return directory", "docstring": "Test that the target is actually a directory, raising OSError\nif not.\n\nArgs:\ntarget_directory: Path to the target directory within the fake\nfilesystem.\n\nReturns:\nThe FakeDirectory object corresponding to target_directory.\n\nRaises:\nOSError: if the target is not a directory.", "source": "codesearchnet"} {"code": "def _group_sentences(total_nb_sentences, group_length):\n sentences_groups = []\n current_sentence_group = []\n for i in range(0, total_nb_sentences):\n if ((i % group_length) == 0):\n if (len(current_sentence_group) > 0):\n sentences_groups.append(current_sentence_group)\n current_sentence_group = [i]\n else:\n current_sentence_group.append(i)\n if (len(current_sentence_group) > 0):\n sentences_groups.append(current_sentence_group)\n return sentences_groups", "docstring": "Split sentences in groups, given a specific group length.\n\nArgs:\ntotal_nb_sentences (int): Total available sentences.\ngroup_length (int): Limit of length for each group.\n\nReturns:\nlist: Contains groups (lists) of sentences.", "source": "codesearchnet"} {"code": "def parse(self, requires_cfg=True):\n \n self._parse_default()\n self._parse_config(requires_cfg)\n self._parse_env()", "docstring": "Parse the configuration sources into `Bison`.\n\nArgs:\nrequires_cfg (bool): Specify whether or not parsing should fail\nif a config file is not found. (default: True)", "source": "juraj-google-style"} {"code": "def latitude(self, value=0.0):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `latitude`'.format(value))\n if (value < (- 90.0)):\n raise ValueError('value need to be greater or equal -90.0 for field `latitude`')\n if (value > 90.0):\n raise ValueError('value need to be smaller 90.0 for field `latitude`')\n self._latitude = value", "docstring": "Corresponds to IDD Field `latitude`\n\n+ is North, - is South, degree minutes represented in decimal (i.e. 30 minutes is .5)\n\nArgs:\nvalue (float): value for IDD Field `latitude`\nUnit: deg\nDefault value: 0.0\nvalue >= -90.0\nvalue <= 90.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def p44(msg):\n \n d = hex2bin(data(msg))\n\n if d[34] == '0':\n return None\n\n p = bin2int(d[35:46]) \n\n return p", "docstring": "Static pressure.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: static pressure in hPa", "source": "juraj-google-style"} {"code": "def is_file_opened(self, filename=None):\n \n if filename is None:\n \n return len(self.data) > 0\n else:\n return self.has_filename(filename)", "docstring": "Return if filename is in the editor stack.\n\nArgs:\nfilename: Name of the file to search for. If filename is None,\nthen checks if any file is open.\n\nReturns:\nTrue: If filename is None and a file is open.\nFalse: If filename is None and no files are open.\nNone: If filename is not None and the file isn't found.\ninteger: Index of file name in editor stack.", "source": "juraj-google-style"} {"code": "def add_to_writer(self,\n writer: PdfFileWriter,\n start_recto: bool = True) -> None:\n \n if self.is_html:\n pdf = get_pdf_from_html(\n html=self.html,\n header_html=self.header_html,\n footer_html=self.footer_html,\n wkhtmltopdf_filename=self.wkhtmltopdf_filename,\n wkhtmltopdf_options=self.wkhtmltopdf_options)\n append_memory_pdf_to_writer(pdf, writer, start_recto=start_recto)\n elif self.is_filename:\n if start_recto and writer.getNumPages() % 2 != 0:\n writer.addBlankPage()\n writer.appendPagesFromReader(PdfFileReader(\n open(self.filename, 'rb')))\n else:\n raise AssertionError(\"PdfPlan: shouldn't get here!\")", "docstring": "Add the PDF described by this class to a PDF writer.\n\nArgs:\nwriter: a :class:`PyPDF2.PdfFileWriter`\nstart_recto: start a new right-hand page?", "source": "juraj-google-style"} {"code": "def prepare_run_debug_urls(self, fetches, feed_dict):\n return self._grpc_debug_server_urls", "docstring": "Implementation of abstract method in superclass.\n\nSee doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`\nfor details.\n\nArgs:\nfetches: Same as the `fetches` argument to `Session.run()`\nfeed_dict: Same as the `feed_dict` argument to `Session.run()`\n\nReturns:\ndebug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in\nthis `Session.run()` call.", "source": "github-repos"} {"code": "def path_size(p: tcod.path.AStar) -> int:\n \n return int(lib.TCOD_path_size(p._path_c))", "docstring": "Return the current length of the computed path.\n\nArgs:\np (AStar): An AStar instance.\nReturns:\nint: Length of the path.", "source": "juraj-google-style"} {"code": "def with_device(self, new_device: devices.Device, qubit_mapping: Callable[([ops.Qid], ops.Qid)]=(lambda e: e)) -> 'Circuit':\n return Circuit(moments=[ops.Moment((operation.transform_qubits(qubit_mapping) for operation in moment.operations)) for moment in self._moments], device=new_device)", "docstring": "Maps the current circuit onto a new device, and validates.\n\nArgs:\nnew_device: The new device that the circuit should be on.\nqubit_mapping: How to translate qubits from the old device into\nqubits on the new device.\n\nReturns:\nThe translated circuit.", "source": "codesearchnet"} {"code": "def data_file(file_fmt, info=None, **kwargs):\n \n if isinstance(info, dict):\n kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest()\n kwargs.update(info)\n\n return utils.fstr(fmt=file_fmt, **kwargs)", "docstring": "Data file name for given infomation\n\nArgs:\nfile_fmt: file format in terms of f-strings\ninfo: dict, to be hashed and then pass to f-string using 'hash_key'\nthese info will also be passed to f-strings\n**kwargs: arguments for f-strings\n\nReturns:\nstr: data file name", "source": "juraj-google-style"} {"code": "def output_classes(self):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self.element_spec)", "docstring": "Returns the class of each component of an element of this dataset.\n\nReturns:\nA (nested) structure of Python `type` objects corresponding to each\ncomponent of an element of this dataset.", "source": "github-repos"} {"code": "def structured_input_signature(self):\n return self._func_graph.structured_input_signature", "docstring": "Returns structured signature for this concrete function.\n\nReturns:\nA tuple `(args, kwargs)`, where:\n\n* `args` is a tuple that specifies the expected type or value each for\npositional argument.\n* `kwargs` is a dictionary that specifies the expected type or value\nfor each keyword-only argument.\n\nThe type or value for each argument is specified using one of the\nfollowing:\n\n* A `tf.TypeSpec`, indicating that a Tensor or other TensorFlow-native\nvalue is expected.\n* A Python value, such as an integer, indicating that an equal value\nis expected.\n* A nested structure of `tf.TypeSpec`s and Python values, indicating\nthat a corresponding nested structure is expected.", "source": "github-repos"} {"code": "def _parse(self, stream):\n builddata = json.load(stream)\n log.debug('This is a JSON build file.')\n if ('targets' not in builddata):\n log.warn('Warning: No targets defined here.')\n return\n for tdata in builddata['targets']:\n target = address.new(target=tdata.pop('name'), repo=self.target.repo, path=self.target.path)\n if ((target in self.node) and ('target_obj' in self.node[target])):\n raise error.ButcherError('Target is defined more than once: %s', target)\n rule_obj = targets.new(name=target, ruletype=tdata.pop('type'), **tdata)\n log.debug('New target: %s', target)\n self.add_node(target, {'target_obj': rule_obj})\n for dep in (rule_obj.composed_deps() or []):\n d_target = address.new(dep)\n if (not d_target.repo):\n d_target.repo = self.target.repo\n if ((d_target.repo == self.target.repo) and (not d_target.path)):\n d_target.path = self.target.path\n if (d_target not in self.nodes()):\n self.add_node(d_target)\n log.debug('New dep: %s -> %s', target, d_target)\n self.add_edge(target, d_target)", "docstring": "Parse a JSON BUILD file.\n\nArgs:\nbuilddata: dictionary of buildfile data\nreponame: name of the repo that it came from\npath: directory path within the repo", "source": "codesearchnet"} {"code": "def Categories(unicode_dir=_UNICODE_DIR):\n categories = {}\n\n def DoLine(codes, fields):\n 'Process single UnicodeData.txt line, updating categories.'\n category = fields[2]\n categories.setdefault(category, []).extend(codes)\n if (len(category) > 1):\n short = category[0]\n categories.setdefault(short, []).extend(codes)\n ReadUnicodeTable((unicode_dir + '/UnicodeData.txt'), 15, DoLine)\n return categories", "docstring": "Returns dict mapping category names to code lists.\n\nArgs:\nunicode_dir: Unicode data directory\n\nReturns:\ndict mapping category names to code lists", "source": "codesearchnet"} {"code": "def __method_descriptor(self, service, method_info,\n rosy_method, protorpc_method_info):\n \n descriptor = {}\n\n request_message_type = (resource_container.ResourceContainer.\n get_request_message(protorpc_method_info.remote))\n request_kind = self.__get_request_kind(method_info)\n remote_method = protorpc_method_info.remote\n\n descriptor['path'] = method_info.get_path(service.api_info)\n descriptor['httpMethod'] = method_info.http_method\n descriptor['rosyMethod'] = rosy_method\n descriptor['request'] = self.__request_message_descriptor(\n request_kind, request_message_type,\n method_info.method_id(service.api_info),\n descriptor['path'])\n descriptor['response'] = self.__response_message_descriptor(\n remote_method.response_type(), method_info.method_id(service.api_info))\n\n \n \n \n scopes = (method_info.scopes\n if method_info.scopes is not None\n else service.api_info.scopes)\n if scopes:\n descriptor['scopes'] = scopes\n audiences = (method_info.audiences\n if method_info.audiences is not None\n else service.api_info.audiences)\n if audiences:\n descriptor['audiences'] = audiences\n allowed_client_ids = (method_info.allowed_client_ids\n if method_info.allowed_client_ids is not None\n else service.api_info.allowed_client_ids)\n if allowed_client_ids:\n descriptor['clientIds'] = allowed_client_ids\n\n if remote_method.method.__doc__:\n descriptor['description'] = remote_method.method.__doc__\n\n auth_level = (method_info.auth_level\n if method_info.auth_level is not None\n else service.api_info.auth_level)\n if auth_level is not None:\n descriptor['authLevel'] = AUTH_LEVEL.reverse_mapping[auth_level]\n\n descriptor['useRequestUri'] = method_info.use_request_uri(service.api_info)\n\n return descriptor", "docstring": "Describes a method.\n\nArgs:\nservice: endpoints.Service, Implementation of the API as a service.\nmethod_info: _MethodInfo, Configuration for the method.\nrosy_method: string, ProtoRPC method name prefixed with the\nname of the service.\nprotorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC\ndescription of the method.\n\nReturns:\nDictionary describing the method.", "source": "juraj-google-style"} {"code": "def diff_cleanupEfficiency(self, diffs):\n \n changes = False\n equalities = [] \n lastEquality = None \n pointer = 0 \n pre_ins = False \n pre_del = False \n post_ins = False \n post_del = False \n while pointer < len(diffs):\n if diffs[pointer][0] == self.DIFF_EQUAL: \n if (len(diffs[pointer][1]) < self.Diff_EditCost and\n (post_ins or post_del)):\n \n equalities.append(pointer)\n pre_ins = post_ins\n pre_del = post_del\n lastEquality = diffs[pointer][1]\n else:\n \n equalities = []\n lastEquality = None\n\n post_ins = post_del = False\n else: \n if diffs[pointer][0] == self.DIFF_DELETE:\n post_del = True\n else:\n post_ins = True\n\n \n \n \n \n \n \n\n if lastEquality and ((pre_ins and pre_del and post_ins and post_del) or\n ((len(lastEquality) < self.Diff_EditCost / 2) and\n (pre_ins + pre_del + post_ins + post_del) == 3)):\n \n diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))\n \n diffs[equalities[-1] + 1] = (self.DIFF_INSERT,\n diffs[equalities[-1] + 1][1])\n equalities.pop() \n lastEquality = None\n if pre_ins and pre_del:\n \n post_ins = post_del = True\n equalities = []\n else:\n if len(equalities):\n equalities.pop() \n if len(equalities):\n pointer = equalities[-1]\n else:\n pointer = -1\n post_ins = post_del = False\n changes = True\n pointer += 1\n\n if changes:\n self.diff_cleanupMerge(diffs)", "docstring": "Reduce the number of edits by eliminating operationally trivial\nequalities.\n\nArgs:\ndiffs: Array of diff tuples.", "source": "juraj-google-style"} {"code": "def get_well(self, uwi):\n if (uwi is None):\n raise ValueError('a UWI must be provided')\n matching_wells = [w for w in self if (w.uwi == uwi)]\n return (matching_wells[0] if (len(matching_wells) >= 1) else None)", "docstring": "Returns a Well object identified by UWI\n\nArgs:\nuwi (string): the UWI string for the well.\n\nReturns:\nwell", "source": "codesearchnet"} {"code": "def _date_to_datetime(value):\n \n if not isinstance(value, datetime.date):\n raise TypeError('Cannot convert to datetime expected date value; '\n 'received %s' % value)\n return datetime.datetime(value.year, value.month, value.day)", "docstring": "Convert a date to a datetime for Cloud Datastore storage.\n\nArgs:\nvalue: A datetime.date object.\n\nReturns:\nA datetime object with time set to 0:00.", "source": "juraj-google-style"} {"code": "def add_patch(ax, color, pts_per_edge, *edges):\n \n from matplotlib import patches\n from matplotlib import path as _path_mod\n\n s_vals = np.linspace(0.0, 1.0, pts_per_edge)\n \n all_points = []\n for edge in edges:\n points = edge.evaluate_multi(s_vals)\n \n \n all_points.append(points[:, 1:])\n \n first_edge = all_points[0]\n all_points.append(first_edge[:, [0]])\n \n polygon = np.asfortranarray(np.hstack(all_points))\n line, = ax.plot(polygon[0, :], polygon[1, :], color=color)\n \n color = line.get_color()\n \n \n path = _path_mod.Path(polygon.T)\n patch = patches.PathPatch(path, facecolor=color, alpha=0.625)\n ax.add_patch(patch)", "docstring": "Add a polygonal surface patch to a plot.\n\nArgs:\nax (matplotlib.artist.Artist): A matplotlib axis.\ncolor (Tuple[float, float, float]): Color as RGB profile.\npts_per_edge (int): Number of points to use in polygonal\napproximation of edge.\nedges (Tuple[~bezier.curve.Curve, ...]): Curved edges defining\na boundary.", "source": "juraj-google-style"} {"code": "def xray_driver_removed_handler(self, unused_channel, data):\n gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(data, 0)\n driver_data = gcs_entries.Entries(0)\n message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(driver_data, 0)\n driver_id = message.DriverId()\n logger.info('Monitor: XRay Driver {} has been removed.'.format(binary_to_hex(driver_id)))\n self._xray_clean_up_entries_for_driver(driver_id)", "docstring": "Handle a notification that a driver has been removed.\n\nArgs:\nunused_channel: The message channel.\ndata: The message data.", "source": "codesearchnet"} {"code": "def subtract_afromb(*inputs, **kwargs):\n try:\n value_a = inputs[0].pop()\n value_b = inputs[1].pop()\n return [IOTileReading(0, 0, (value_b.value - value_a.value))]\n except StreamEmptyError:\n return []", "docstring": "Subtract stream a from stream b.\n\nReturns:\nlist(IOTileReading)", "source": "codesearchnet"} {"code": "def __init__(self, metadata_list, registry=None):\n \n self._metrics = {} \n\n if registry is None:\n self._registry = prometheus_client.CollectorRegistry(auto_describe=True)\n else:\n self._registry = registry\n\n self.lock = threading.RLock()\n\n super(PrometheusStatsCollector, self).__init__(metadata_list)", "docstring": "Instantiates a new PrometheusStatsCollector.\n\nArgs:\nmetadata_list: A list of MetricMetadata objects describing the metrics\nthat the StatsCollector will track.\nregistry: An instance of prometheus_client.CollectorRegistry. If None, a\nnew CollectorRegistry is instantiated. Use prometheus_client.REGISTRY\nfor the global default registry.", "source": "juraj-google-style"} {"code": "def load(filenames, prepare_data_iterator=True, batch_size=None, exclude_parameter=False, parameter_only=False):\n \n class Info:\n pass\n info = Info()\n\n proto = nnabla_pb2.NNablaProtoBuf()\n for filename in filenames:\n _, ext = os.path.splitext(filename)\n\n \n \n \n \n \n\n if ext in ['.nntxt', '.prototxt']:\n if not parameter_only:\n with open(filename, 'rt') as f:\n try:\n text_format.Merge(f.read(), proto)\n except:\n logger.critical('Failed to read {}.'.format(filename))\n logger.critical(\n '2 byte characters may be used for file name or folder name.')\n raise\n if len(proto.parameter) > 0:\n if not exclude_parameter:\n nn.load_parameters(filename)\n elif ext in ['.protobuf', '.h5']:\n if not exclude_parameter:\n nn.load_parameters(filename)\n else:\n logger.info('Skip loading parameter.')\n\n elif ext == '.nnp':\n try:\n tmpdir = tempfile.mkdtemp()\n with zipfile.ZipFile(filename, 'r') as nnp:\n for name in nnp.namelist():\n _, ext = os.path.splitext(name)\n if name == 'nnp_version.txt':\n nnp.extract(name, tmpdir)\n with open(os.path.join(tmpdir, name), 'rt') as f:\n pass \n elif ext in ['.nntxt', '.prototxt']:\n nnp.extract(name, tmpdir)\n if not parameter_only:\n with open(os.path.join(tmpdir, name), 'rt') as f:\n text_format.Merge(f.read(), proto)\n if len(proto.parameter) > 0:\n if not exclude_parameter:\n nn.load_parameters(\n os.path.join(tmpdir, name))\n elif ext in ['.protobuf', '.h5']:\n nnp.extract(name, tmpdir)\n if not exclude_parameter:\n nn.load_parameters(os.path.join(tmpdir, name))\n else:\n logger.info('Skip loading parameter.')\n finally:\n shutil.rmtree(tmpdir)\n\n default_context = None\n if proto.HasField('global_config'):\n info.global_config = _global_config(proto)\n default_context = info.global_config.default_context\n if 'cuda' in default_context.backend:\n import nnabla_ext.cudnn\n elif 'cuda:float' in default_context.backend:\n try:\n import nnabla_ext.cudnn\n except:\n pass\n else:\n import nnabla_ext.cpu\n default_context = nnabla_ext.cpu.context()\n\n comm = current_communicator()\n if comm:\n default_context.device_id = str(comm.rank)\n if proto.HasField('training_config'):\n info.training_config = _training_config(proto)\n\n info.datasets = _datasets(\n proto, prepare_data_iterator if prepare_data_iterator is not None else info.training_config.max_epoch > 0)\n\n info.networks = _networks(proto, default_context, batch_size)\n\n info.optimizers = _optimizers(\n proto, default_context, info.networks, info.datasets)\n\n info.monitors = _monitors(\n proto, default_context, info.networks, info.datasets)\n\n info.executors = _executors(proto, info.networks)\n\n return info", "docstring": "load\nLoad network information from files.\n\nArgs:\nfilenames (list): List of filenames.\nReturns:\ndict: Network information.", "source": "juraj-google-style"} {"code": "def metadata(self):\n if (self._info is None):\n try:\n self._info = self._api.objects_get(self._bucket, self._key)\n except Exception as e:\n raise e\n return (ObjectMetadata(self._info) if self._info else None)", "docstring": "Retrieves metadata about the object.\n\nReturns:\nAn ObjectMetadata instance with information about this object.\nRaises:\nException if there was an error requesting the object's metadata.", "source": "codesearchnet"} {"code": "def singularize(plural):\n \n if plural in UNCOUNTABLES:\n return plural\n for i in IRREGULAR:\n if i[1] == plural:\n return i[0]\n for i in SINGULARIZE_PATTERNS:\n if re.search(i[0], plural):\n return re.sub(i[0], i[1], plural)\n return plural", "docstring": "Convert plural word to its singular form.\n\nArgs:\nplural: A word in its plural form.\nReturns:\nThe word in its singular form.", "source": "juraj-google-style"} {"code": "def persist_config(run, session, cfg):\n \n from benchbuild.utils import schema as s\n\n for cfg_elem in cfg:\n session.add(\n s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id))", "docstring": "Persist the configuration in as key-value pairs.\n\nArgs:\nrun: The run we attach the config to.\nsession: The db transaction we belong to.\ncfg: The configuration we want to persist.", "source": "juraj-google-style"} {"code": "def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n \n tstream = BytearrayStream()\n\n self.certificate_type.write(tstream, kmip_version=kmip_version)\n self.certificate_value.write(tstream, kmip_version=kmip_version)\n\n self.length = tstream.length()\n super(Certificate, self).write(ostream, kmip_version=kmip_version)\n ostream.write(tstream.buffer)", "docstring": "Write the data encoding the Certificate object to a stream.\n\nArgs:\nostream (Stream): A data stream in which to encode object data,\nsupporting a write method; usually a BytearrayStream object.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"} {"code": "def chempot_plot_addons(self, plt, xrange, ref_el, axes, pad=2.4, rect=[(- 0.047), 0, 0.84, 1], ylim=[]):\n plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.0)\n axes.set_xlabel(('Chemical potential $\\\\Delta\\\\mu_{%s}$ (eV)' % ref_el))\n ylim = (ylim if ylim else axes.get_ylim())\n plt.xticks(rotation=60)\n plt.ylim(ylim)\n xlim = axes.get_xlim()\n plt.xlim(xlim)\n plt.tight_layout(pad=pad, rect=rect)\n plt.plot([xrange[0], xrange[0]], ylim, '--k')\n plt.plot([xrange[1], xrange[1]], ylim, '--k')\n xy = [np.mean([xrange[1]]), np.mean(ylim)]\n plt.annotate(('%s-rich' % ref_el), xy=xy, xytext=xy, rotation=90, fontsize=17)\n xy = [np.mean([xlim[0]]), np.mean(ylim)]\n plt.annotate(('%s-poor' % ref_el), xy=xy, xytext=xy, rotation=90, fontsize=17)\n return plt", "docstring": "Helper function to a chempot plot look nicer.\n\nArgs:\nplt (Plot) Plot to add things to.\nxrange (list): xlim parameter\nref_el (str): Element of the referenced chempot.\naxes(axes) Axes object from matplotlib\npad (float) For tight layout\nrect (list): For tight layout\nylim (ylim parameter):\n\nreturn (Plot): Modified plot with addons.\nreturn (Plot): Modified plot with addons.", "source": "codesearchnet"} {"code": "def get_controller_info_records(self):\n info_records = []\n for controller_module_name in self._controller_objects.keys():\n with expects.expect_no_raises(('Failed to collect controller info from %s' % controller_module_name)):\n record = self._create_controller_info_record(controller_module_name)\n if record:\n info_records.append(record)\n return info_records", "docstring": "Get the info records for all the controller objects in the manager.\n\nNew info records for each controller object are created for every call\nso the latest info is included.\n\nReturns:\nList of records.ControllerInfoRecord objects. Each opject conatins\nthe info of a type of controller", "source": "codesearchnet"} {"code": "def __get_vtt_angles(self, pvals, nvals):\n angles = (np.arctan2(pvals, nvals) - (np.pi / 4))\n norm = np.maximum(np.minimum(angles, (np.pi - angles)), (((- 1) * np.pi) - angles))\n norm = csr_matrix(norm)\n for (key, value) in self.B.items():\n norm[(0, key)] = 0.0\n return norm", "docstring": "Fit the angles to the model\n\nArgs:\npvals (array-like) : positive values\nnvals (array-like) : negative values\n\nReturns: normalized coef_ values", "source": "codesearchnet"} {"code": "def bbox_transpose(bbox, axis, rows, cols):\n (x_min, y_min, x_max, y_max) = bbox\n if ((axis != 0) and (axis != 1)):\n raise ValueError('Axis must be either 0 or 1.')\n if (axis == 0):\n bbox = [y_min, x_min, y_max, x_max]\n if (axis == 1):\n bbox = [(1 - y_max), (1 - x_max), (1 - y_min), (1 - x_min)]\n return bbox", "docstring": "Transposes a bounding box along given axis.\n\nArgs:\nbbox (tuple): A tuple (x_min, y_min, x_max, y_max).\naxis (int): 0 - main axis, 1 - secondary axis.\nrows (int): Image rows.\ncols (int): Image cols.", "source": "codesearchnet"} {"code": "def qc_curve_group(self, tests, alias=None):\n keys = [k for (k, v) in self.data.items() if isinstance(v, Curve)]\n if (not keys):\n return {}\n all_tests = tests.get('all', tests.get('All', tests.get('ALL', [])))\n data = {test.__name__: test(self, keys, alias) for test in all_tests}\n results = {}\n for (i, key) in enumerate(keys):\n this = {}\n for (test, result) in data.items():\n this[test] = result[i]\n results[key] = this\n return results", "docstring": "Run tests on a cohort of curves.\n\nArgs:\nalias (dict): an alias dictionary, mapping mnemonics to lists of\nmnemonics.\n\nReturns:\ndict.", "source": "codesearchnet"} {"code": "def remove_from_queue(self, index):\n updid = '0'\n objid = ('Q:0/' + str((index + 1)))\n self.avTransport.RemoveTrackFromQueue([('InstanceID', 0), ('ObjectID', objid), ('UpdateID', updid)])", "docstring": "Remove a track from the queue by index. The index number is\nrequired as an argument, where the first index is 0.\n\nArgs:\nindex (int): The (0-based) index of the track to remove", "source": "codesearchnet"} {"code": "def __get_request(self, host, soup):\n \n\n url = URLHelper.make_absolute(host, self.__trim_grave_accent(soup[\"action\"])) if soup.has_attr(\"action\") else host\n method_original = soup[\"method\"] if soup.has_attr(\"method\") else \"get\"\n method = \"post\" if method_original.lower() == \"post\" else \"get\"\n data = self.__get_form_data(soup)\n\n return Request(url, method, data)", "docstring": "Build a request from the given soup form.\n\nArgs:\nhost str: The URL of the current queue item.\nsoup (obj): The BeautifulSoup form.\n\nReturns:\n:class:`nyawc.http.Request`: The new Request.", "source": "juraj-google-style"} {"code": "def __eq__(self, rhs):\n \n return self.key == rhs.key and self.sequence_equal(rhs)", "docstring": "Determine value equality with another grouping.\n\nArgs:\nrhs: The object on the right-hand-side of the comparison must\nsupport a property called 'key' and be iterable.\n\nReturns:\nTrue if the keys and sequences are equal, otherwise False.", "source": "juraj-google-style"} {"code": "def error_handler(self, handler):\n \n if not self.opened():\n handler = handler or util.noop\n self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)", "docstring": "Setter for the error handler function.\n\nIf the DLL is open, this function is a no-op, so it should be called\nprior to calling ``open()``.\n\nArgs:\nself (JLink): the ``JLink`` instance\nhandler (function): function to call on error messages\n\nReturns:\n``None``", "source": "juraj-google-style"} {"code": "def __init__(self, parser, pytype_single_args):\n self._parser = parser\n self.pytype_single_args = pytype_single_args\n self._pytype_arg_map = pytype_config.args_map()", "docstring": "Initialize a parser.\n\nArgs:\nparser: An argparse.ArgumentParser or compatible object\npytype_single_args: Iterable of args that will be passed to pytype_single", "source": "github-repos"} {"code": "def activate_async(fn, _engine):\n \n @coroutine\n @functools.wraps(fn)\n def wrapper(*args, **kw):\n _engine.activate()\n try:\n if iscoroutinefunction(fn):\n yield from fn(*args, **kw) \n else:\n fn(*args, **kw)\n finally:\n _engine.disable()\n\n return wrapper", "docstring": "Async version of activate decorator\n\nArguments:\nfn (function): function that be wrapped by decorator.\n_engine (Engine): pook engine instance\n\nReturns:\nfunction: decorator wrapper function.", "source": "juraj-google-style"} {"code": "def _CheckWindowsPath(self, filename, artifact_definition, source, path):\n \n result = True\n\n number_of_forward_slashes = path.count('/')\n number_of_backslashes = path.count('\\\\')\n if (number_of_forward_slashes < number_of_backslashes and\n source.separator != '\\\\'):\n logging.warning((\n 'Incorrect path separator: {0:s} in path: {1:s} defined '\n 'by artifact definition: {2:s} in file: {3:s}').format(\n source.separator, path, artifact_definition.name,\n filename))\n result = False\n\n if source.separator != '\\\\':\n return result\n\n path_lower = path.lower()\n path_segments = path_lower.split(source.separator)\n if not path_segments:\n logging.warning((\n 'Empty path defined by artifact definition: {0:s} in file: '\n '{1:s}').format(artifact_definition.name, filename))\n result = False\n\n elif path_segments[0].startswith('%%users.') and path_segments[0] not in (\n '%%users.appdata%%', '%%users.homedir%%', '%%users.localappdata%%',\n '%%users.temp%%', '%%users.username%%', '%%users.userprofile%%'):\n logging.warning((\n 'Unsupported \"{0:s}\" in path: {1:s} defined by artifact '\n 'definition: {2:s} in file: {3:s}').format(\n path_segments[0], path, artifact_definition.name, filename))\n result = False\n\n elif path_segments[0] == '%%users.homedir%%':\n logging.warning((\n 'Replace \"%%users.homedir%%\" by \"%%users.userprofile%%\" in path: '\n '{0:s} defined by artifact definition: {1:s} in file: '\n '{2:s}').format(path, artifact_definition.name, filename))\n result = False\n\n elif path_lower.startswith('%%users.userprofile%%\\\\appdata\\\\local\\\\'):\n logging.warning((\n 'Replace \"%%users.userprofile%%\\\\AppData\\\\Local\" by '\n '\"%%users.localappdata%%\" in path: {0:s} defined by artifact '\n 'definition: {1:s} in file: {2:s}').format(\n path, artifact_definition.name, filename))\n result = False\n\n elif path_lower.startswith('%%users.userprofile%%\\\\appdata\\\\roaming\\\\'):\n logging.warning((\n 'Replace \"%%users.userprofile%%\\\\AppData\\\\Roaming\" by '\n '\"%%users.appdata%%\" in path: {0:s} defined by artifact '\n 'definition: {1:s} in file: {2:s}').format(\n path, artifact_definition.name, filename))\n result = False\n\n elif path_lower.startswith('%%users.userprofile%%\\\\application data\\\\'):\n logging.warning((\n 'Replace \"%%users.userprofile%%\\\\Application Data\" by '\n '\"%%users.appdata%%\" in path: {0:s} defined by artifact '\n 'definition: {1:s} in file: {2:s}').format(\n path, artifact_definition.name, filename))\n result = False\n\n elif path_lower.startswith(\n '%%users.userprofile%%\\\\local settings\\\\application data\\\\'):\n logging.warning((\n 'Replace \"%%users.userprofile%%\\\\Local Settings\\\\Application Data\" '\n 'by \"%%users.localappdata%%\" in path: {0:s} defined by artifact '\n 'definition: {1:s} in file: {2:s}').format(\n path, artifact_definition.name, filename))\n result = False\n\n for path_segment in path_segments:\n if path_segment.startswith('%%') and path_segment.endswith('%%'):\n if (path_segment.startswith('%%environ_') and\n path_segment not in self._SUPPORTED_WINDOWS_ENVIRONMENT_VARIABLES):\n result = False\n logging.warning((\n 'Artifact definition: {0:s} in file: {1:s} contains Windows '\n 'path that contains an unuspported environment variable: '\n '\"{2:s}\".').format(\n artifact_definition.name, filename, path_segment))\n\n elif (path_segment.startswith('%%users.') and\n path_segment not in self._SUPPORTED_WINDOWS_USERS_VARIABLES):\n result = False\n logging.warning((\n 'Artifact definition: {0:s} in file: {1:s} contains Windows '\n 'path that contains an unsupported users variable: '\n '\"{2:s}\". ').format(\n artifact_definition.name, filename, path_segment))\n\n return result", "docstring": "Checks if a path is a valid Windows path.\n\nArgs:\nfilename (str): name of the artifacts definition file.\nartifact_definition (ArtifactDefinition): artifact definition.\nsource (SourceType): source definition.\npath (str): path to validate.\n\nReturns:\nbool: True if the Windows path is valid.", "source": "juraj-google-style"} {"code": "def fully_qualify_alias_labels(label, aliases):\n \n for alias, full_name in aliases.items():\n if label == alias:\n return full_name\n elif label.startswith(alias+'.'):\n return full_name + label[len(alias):]\n return label", "docstring": "Replace any aliases in label with the fully qualified name.\n\nArgs:\nlabel -- A label : str representing a name (e.g. myos.system)\naliases -- A dict of {alias: real_name} (e.g. {'myos': 'os'})\n\n>>> fully_qualify_alias_labels('myos.mycall', {'myos':'os'})\n'os.mycall'", "source": "juraj-google-style"} {"code": "def ExamineEvent(self, mediator, event):\n pathspec = getattr(event, 'pathspec', None)\n if (pathspec is None):\n return\n if self._paths_with_hashes.get(pathspec, None):\n return\n hash_attributes = {}\n for (attribute_name, attribute_value) in event.GetAttributes():\n if attribute_name.endswith('_hash'):\n hash_attributes[attribute_name] = attribute_value\n self._paths_with_hashes[pathspec] = hash_attributes", "docstring": "Analyzes an event and creates extracts hashes as required.\n\nArgs:\nmediator (AnalysisMediator): mediates interactions between\nanalysis plugins and other components, such as storage and dfvfs.\nevent (EventObject): event to examine.", "source": "codesearchnet"} {"code": "def processMailList(platformNames=[], emails=[]):\n platforms = platform_selection.getPlatformsByName(platformNames, mode='mailfy')\n results = []\n for e in emails:\n for pla in platforms:\n entities = pla.getInfo(query=e, mode='mailfy')\n if (entities != {}):\n results += json.loads(entities)\n return results", "docstring": "Method to perform the email search.\n\nArgs:\n-----\nplatformNames: List of names of the platforms.\nemails: List of numbers to be queried.\n\nReturn:\n-------\nA list of verified emails.", "source": "codesearchnet"} {"code": "def env_problem(env_problem_name, **kwargs):\n \n\n ep_cls = Registries.env_problems[env_problem_name]\n ep = ep_cls()\n ep.initialize(**kwargs)\n return ep", "docstring": "Get and initialize the `EnvProblem` with the given name and batch size.\n\nArgs:\nenv_problem_name: string name of the registered env problem.\n**kwargs: forwarded to env problem's initialize method.\n\nReturns:\nan initialized EnvProblem with the given batch size.", "source": "juraj-google-style"} {"code": "def compare(array, other, op, ty_str):\n \n weld_obj = WeldObject(encoder_, decoder_)\n\n array_var = weld_obj.update(array)\n if isinstance(array, WeldObject):\n array_var = array.obj_id\n weld_obj.dependencies[array_var] = array\n\n \n \n if isinstance(other, str) or isinstance(other, WeldObject):\n other_var = weld_obj.update(other)\n if isinstance(other, WeldObject):\n other_var = tmp.obj_id\n weld_obj.dependencies[other_var] = other\n else:\n other_var = \"%s(%s)\" % (ty_str, str(other))\n\n weld_template = \n weld_obj.weld_code = weld_template % {\"array\": array_var,\n \"other\": other_var,\n \"op\": op, \"ty\": ty_str}\n\n return weld_obj", "docstring": "Performs passed-in comparison op between every element in the passed-in\narray and other, and returns an array of booleans.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nother (WeldObject / Numpy.ndarray): Second input array\nop (str): Op string used for element-wise comparison (== >= <= !=)\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "juraj-google-style"} {"code": "def predict(self, X, break_ties='random', return_probs=False, **kwargs):\n Y_s = self.predict_proba(X, **kwargs)\n self._check(Y_s, typ=list)\n self._check(Y_s[0], typ=np.ndarray)\n Y_p = []\n for Y_ts in Y_s:\n Y_tp = self._break_ties(Y_ts, break_ties)\n Y_p.append(Y_tp.astype(np.int))\n if return_probs:\n return (Y_p, Y_s)\n else:\n return Y_p", "docstring": "Predicts int labels for an input X on all tasks\n\nArgs:\nX: The input for the predict_proba method\nbreak_ties: A tie-breaking policy\nreturn_probs: Return the predicted probabilities as well\n\nReturns:\nY_p: A t-length list of n-dim np.ndarrays of predictions in [1, K_t]\n[Optionally: Y_s: A t-length list of [n, K_t] np.ndarrays of\npredicted probabilities]", "source": "codesearchnet"} {"code": "def min(self, value):\n \n if value == self._defaults['min'] and 'min' in self._values:\n del self._values['min']\n else:\n self._values['min'] = value", "docstring": "The min property.\n\nArgs:\nvalue (float). the property value.", "source": "juraj-google-style"} {"code": "def TerminateFlow(client_id,\n flow_id,\n reason=None,\n flow_state=rdf_flow_objects.Flow.FlowState.ERROR):\n \n\n to_terminate = [data_store.REL_DB.ReadFlowObject(client_id, flow_id)]\n\n while to_terminate:\n next_to_terminate = []\n for rdf_flow in to_terminate:\n _TerminateFlow(rdf_flow, reason=reason, flow_state=flow_state)\n next_to_terminate.extend(\n data_store.REL_DB.ReadChildFlowObjects(rdf_flow.client_id,\n rdf_flow.flow_id))\n to_terminate = next_to_terminate", "docstring": "Terminates a flow and all of its children.\n\nArgs:\nclient_id: Client ID of a flow to terminate.\nflow_id: Flow ID of a flow to terminate.\nreason: String with a termination reason.\nflow_state: Flow state to be assigned to a flow after termination. Defaults\nto FlowState.ERROR.", "source": "juraj-google-style"} {"code": "def put(self, source, rel_path, metadata=None):\n \n\n \n if not isinstance(rel_path, basestring):\n rel_path = rel_path.cache_key\n\n sink = self.put_stream(rel_path, metadata=metadata)\n\n try:\n copy_file_or_flo(source, sink)\n except (KeyboardInterrupt, SystemExit):\n path_ = self.path(rel_path)\n if os.path.exists(path_):\n os.remove(path_)\n raise\n\n sink.close()\n\n return os.path.join(self.cache_dir, rel_path)", "docstring": "Copy a file to the repository\n\nArgs:\nsource: Absolute path to the source file, or a file-like object\nrel_path: path relative to the root of the repository", "source": "juraj-google-style"} {"code": "def _check_module_is_image_embedding(module_spec):\n issues = []\n input_info_dict = module_spec.get_input_info_dict()\n if ((list(input_info_dict.keys()) != ['images']) or (input_info_dict['images'].dtype != tf.float32)):\n issues.append(\"Module 'default' signature must require a single input, which must have type float32 and name 'images'.\")\n else:\n try:\n image_util.get_expected_image_size(module_spec)\n except ValueError as e:\n issues.append(('Module does not support hub.get_expected_image_size(); original error was:\\n' + str(e)))\n output_info_dict = module_spec.get_output_info_dict()\n if ('default' not in output_info_dict):\n issues.append(\"Module 'default' signature must have a 'default' output.\")\n else:\n output_type = output_info_dict['default'].dtype\n output_shape = output_info_dict['default'].get_shape()\n if (not ((output_type == tf.float32) and (output_shape.ndims == 2) and output_shape.dims[1].value)):\n issues.append(\"Module 'default' signature must have a 'default' output of tf.Tensor(shape=(_,K), dtype=float32).\")\n if issues:\n raise ValueError(('Module is not usable as image embedding: %r' % issues))", "docstring": "Raises ValueError if `module_spec` is not usable as image embedding.\n\nArgs:\nmodule_spec: A `_ModuleSpec` to test.\n\nRaises:\nValueError: if `module_spec` default signature is not compatible with\nmappingan \"images\" input to a Tensor(float32, shape=(_,K)).", "source": "codesearchnet"} {"code": "def ParseInput(self, a_file):\n \n input_lines = a_file.read().splitlines()\n self.ParseLines(input_lines)", "docstring": "Consumes input extracting definitions.\n\nArgs:\na_file: The file like stream to parse.\n\nRaises:\nPDDMError if there are any issues.", "source": "juraj-google-style"} {"code": "def info(self, collector_id):\n cid = self.collector_id\n if collector_id:\n cid = collector_id\n url = '{0}/{1}'.format(self.url, cid)\n request = requests.get(url, auth=self.auth)\n return request.json()", "docstring": "Return a dict of collector.\n\nArgs:\ncollector_id (int): id of collector (optional)", "source": "codesearchnet"} {"code": "def enqueue(self, priority: int, item: TItem) -> bool:\n if (self._drop_set is not None):\n if ((priority, item) in self._drop_set):\n return False\n self._drop_set.add((priority, item))\n if (not self._buckets):\n self._buckets.append([item])\n self._offset = priority\n self._len = 1\n return True\n i = (priority - self._offset)\n if (i < 0):\n self._buckets[:0] = [[] for _ in range((- i))]\n self._offset = priority\n i = 0\n while (i >= len(self._buckets)):\n self._buckets.append([])\n self._buckets[i].append(item)\n self._len += 1\n return True", "docstring": "Adds an entry to the priority queue.\n\nIf drop_duplicate_entries is set and there is already a (priority, item)\nentry in the queue, then the enqueue is ignored. Check the return value\nto determine if an enqueue was kept or dropped.\n\nArgs:\npriority: The priority of the item. Lower priorities dequeue before\nhigher priorities.\nitem: The item associated with the given priority.\n\nReturns:\nTrue if the item was enqueued. False if drop_duplicate_entries is\nset and the item is already in the queue.", "source": "codesearchnet"} {"code": "def VerifyStructure(self, parser_mediator, line):\n structure = self.LOG_LINE\n try:\n parsed_structure = structure.parseString(line)\n except pyparsing.ParseException:\n logger.debug('Not a XChat scrollback log file')\n return False\n try:\n int(parsed_structure.timestamp, 10)\n except ValueError:\n logger.debug('Not a XChat scrollback log file, invalid timestamp string')\n return False\n return True", "docstring": "Verify that this file is a XChat scrollback log file.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nline (str): line from a text file.\n\nReturns:\nbool: True if the line was successfully parsed.", "source": "codesearchnet"} {"code": "def delete(self, personId):\n \n check_type(personId, basestring, may_be_none=False)\n\n \n self._session.delete(API_ENDPOINT + '/' + personId)", "docstring": "Remove a person from the system.\n\nOnly an admin can remove a person.\n\nArgs:\npersonId(basestring): The ID of the person to be deleted.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"} {"code": "def user_set_string(namespace, name, metric, ptransform=None):\n labels = create_labels(ptransform=ptransform, namespace=namespace, name=name)\n if isinstance(metric, StringSetData):\n metric = metric.string_set\n if isinstance(metric, set):\n metric = list(metric)\n if isinstance(metric, list):\n metric = coders.IterableCoder(coders.StrUtf8Coder()).encode(metric)\n return create_monitoring_info(USER_STRING_SET_URN, STRING_SET_TYPE, metric, labels)", "docstring": "Return the string set monitoring info for the URN, metric and labels.\n\nArgs:\nnamespace: User-defined namespace of StringSet.\nname: Name of StringSet.\nmetric: The StringSetData representing the metrics.\nptransform: The ptransform id used as a label.", "source": "github-repos"} {"code": "def get_absorbing_atom_symbol_index(absorbing_atom, structure):\n \n if isinstance(absorbing_atom, str):\n return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]\n elif isinstance(absorbing_atom, int):\n return str(structure[absorbing_atom].specie), absorbing_atom\n else:\n raise ValueError(\"absorbing_atom must be either specie symbol or site index\")", "docstring": "Return the absorbing atom symboll and site index in the given structure.\n\nArgs:\nabsorbing_atom (str/int): symbol or site index\nstructure (Structure)\n\nReturns:\nstr, int: symbol and site index", "source": "juraj-google-style"} {"code": "def get_numpy_to_framework_fn(arr) -> Callable:\n if isinstance(arr, np.ndarray):\n return np.array\n if is_tf_available() and is_tf_tensor(arr):\n import tensorflow as tf\n return tf.convert_to_tensor\n if is_torch_available() and is_torch_tensor(arr):\n import torch\n return torch.tensor\n if is_flax_available() and is_jax_tensor(arr):\n import jax.numpy as jnp\n return jnp.array\n raise ValueError(f'Cannot convert arrays of type {type(arr)}')", "docstring": "Returns a function that converts a numpy array to the framework of the input array.\n\nArgs:\narr (`np.ndarray`): The array to convert.", "source": "github-repos"} {"code": "def newick(self):\n if (self.root.edge_length is None):\n suffix = ';'\n elif isinstance(self.root.edge_length, int):\n suffix = (':%d;' % self.root.edge_length)\n elif (isinstance(self.root.edge_length, float) and self.root.edge_length.is_integer()):\n suffix = (':%d;' % int(self.root.edge_length))\n else:\n suffix = (':%s;' % str(self.root.edge_length))\n if self.is_rooted:\n return ('[&R] %s%s' % (self.root.newick(), suffix))\n else:\n return ('%s%s' % (self.root.newick(), suffix))", "docstring": "Output this ``Tree`` as a Newick string\n\nReturns:\n``str``: Newick string of this ``Tree``", "source": "codesearchnet"} {"code": "def _process_in_collection_filter_directive(filter_operation_info, location, context, parameters):\n filtered_field_type = filter_operation_info.field_type\n filtered_field_name = filter_operation_info.field_name\n argument_inferred_type = GraphQLList(strip_non_null_from_type(filtered_field_type))\n (argument_expression, non_existence_expression) = _represent_argument(location, context, parameters[0], argument_inferred_type)\n filter_predicate = expressions.BinaryComposition(u'contains', argument_expression, expressions.LocalField(filtered_field_name))\n if (non_existence_expression is not None):\n filter_predicate = expressions.BinaryComposition(u'||', non_existence_expression, filter_predicate)\n return blocks.Filter(filter_predicate)", "docstring": "Return a Filter basic block that checks for a value's existence in a collection.\n\nArgs:\nfilter_operation_info: FilterOperationInfo object, containing the directive and field info\nof the field where the filter is to be applied.\nlocation: Location where this filter is used.\ncontext: dict, various per-compilation data (e.g. declared tags, whether the current block\nis optional, etc.). May be mutated in-place in this function!\nparameters: list of 1 element, specifying the collection in which the value must exist;\nif the collection is optional and missing, the check will return True\n\nReturns:\na Filter basic block that performs the collection existence check", "source": "codesearchnet"} {"code": "def buckingham_input(self, structure, keywords, library=None, uc=True, valence_dict=None):\n gin = self.keyword_line(*keywords)\n gin += self.structure_lines(structure, symm_flg=(not uc))\n if (not library):\n gin += self.buckingham_potential(structure, valence_dict)\n else:\n gin += self.library_line(library)\n return gin", "docstring": "Gets a GULP input for an oxide structure and buckingham potential\nfrom library.\n\nArgs:\nstructure: pymatgen.core.structure.Structure\nkeywords: GULP first line keywords.\nlibrary (Default=None): File containing the species and potential.\nuc (Default=True): Unit Cell Flag.\nvalence_dict: {El: valence}", "source": "codesearchnet"} {"code": "def ping(hostname: str, timeout_s: int=5) -> bool:\n if (sys.platform == 'win32'):\n timeout_ms = (timeout_s * 1000)\n args = ['ping', hostname, '-n', '1', '-w', str(timeout_ms)]\n elif sys.platform.startswith('linux'):\n args = ['ping', hostname, '-c', '1', '-w', str(timeout_s)]\n else:\n raise AssertionError(\"Don't know how to ping on this operating system\")\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc.communicate()\n retcode = proc.returncode\n return (retcode == 0)", "docstring": "Pings a host, using OS tools.\n\nArgs:\nhostname: host name or IP address\ntimeout_s: timeout in seconds\n\nReturns:\nwas the ping successful?", "source": "codesearchnet"} {"code": "def from_config(cls, path, directory=None):\n \n if not exists(path):\n raise REPPError('REPP config file not found: {}'.format(path))\n confdir = dirname(path)\n\n \n conf = io.open(path, encoding='utf-8').read()\n conf = re.sub(r';.*', '', conf).replace('\\n',' ')\n m = re.search(\n r'repp-modules\\s*:=\\s*((?:[-\\w]+\\s+)*[-\\w]+)\\s*\\.', conf)\n t = re.search(\n r'repp-tokenizer\\s*:=\\s*([-\\w]+)\\s*\\.', conf)\n a = re.search(\n r'repp-calls\\s*:=\\s*((?:[-\\w]+\\s+)*[-\\w]+)\\s*\\.', conf)\n f = re.search(\n r'format\\s*:=\\s*(\\w+)\\s*\\.', conf)\n d = re.search(\n r'repp-directory\\s*:=\\s*(.*)\\.\\s*$', conf)\n\n if m is None:\n raise REPPError('repp-modules option must be set')\n if t is None:\n raise REPPError('repp-tokenizer option must be set')\n\n mods = m.group(1).split()\n tok = t.group(1).strip()\n active = a.group(1).split() if a is not None else None\n fmt = f.group(1).strip() if f is not None else None\n\n if directory is None:\n if d is not None:\n directory = d.group(1).strip(' \"')\n elif exists(joinpath(confdir, tok + '.rpp')):\n directory = confdir\n elif exists(joinpath(confdir, 'rpp', tok + '.rpp')):\n directory = joinpath(confdir, 'rpp')\n elif exists(joinpath(confdir, '../rpp', tok + '.rpp')):\n directory = joinpath(confdir, '../rpp')\n else:\n raise REPPError('Could not find a suitable REPP directory.')\n\n \n return REPP.from_file(\n joinpath(directory, tok + '.rpp'),\n directory=directory,\n active=active\n )", "docstring": "Instantiate a REPP from a PET-style `.set` configuration file.\n\nThe *path* parameter points to the configuration file.\nSubmodules are loaded from *directory*. If *directory* is not\ngiven, it is the directory part of *path*.\n\nArgs:\npath (str): the path to the REPP configuration file\ndirectory (str, optional): the directory in which to search\nfor submodules", "source": "juraj-google-style"} {"code": "def __radd__(self, other):\n return self + other", "docstring": "Returns the sum of `other` and `self`.\n\nArgs:\nother: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\nA Dimension whose value is the sum of `self` and `other`.", "source": "github-repos"} {"code": "def count_true_positive(truth, recommend):\n \n tp = 0\n for r in recommend:\n if r in truth:\n tp += 1\n return tp", "docstring": "Count number of true positives from given sets of samples.\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\n\nReturns:\nint: Number of true positives.", "source": "juraj-google-style"} {"code": "def is_open(self):\n return bool(self._telnet_client.is_open)", "docstring": "This function returns the state of the telnet connection to the\nunderlying AttenuatorDevice.\n\nReturns:\nTrue if there is a successfully open connection to the\nAttenuatorDevice.", "source": "github-repos"} {"code": "def submit_snl(self, snl):\n try:\n snl = (snl if isinstance(snl, list) else [snl])\n jsondata = [s.as_dict() for s in snl]\n payload = {'snl': json.dumps(jsondata, cls=MontyEncoder)}\n response = self.session.post('{}/snl/submit'.format(self.preamble), data=payload)\n if (response.status_code in [200, 400]):\n resp = json.loads(response.text, cls=MontyDecoder)\n if resp['valid_response']:\n if resp.get('warning'):\n warnings.warn(resp['warning'])\n return resp['inserted_ids']\n else:\n raise MPRestError(resp['error'])\n raise MPRestError('REST error with status code {} and error {}'.format(response.status_code, response.text))\n except Exception as ex:\n raise MPRestError(str(ex))", "docstring": "Submits a list of StructureNL to the Materials Project site.\n\n.. note::\n\nAs of now, this MP REST feature is open only to a select group of\nusers. Opening up submissions to all users is being planned for\nthe future.\n\nArgs:\nsnl (StructureNL/[StructureNL]): A single StructureNL, or a list\nof StructureNL objects\n\nReturns:\nA list of inserted submission ids.\n\nRaises:\nMPRestError", "source": "codesearchnet"} {"code": "def NewFromJSON(data):\n return SharedFile(sharekey=data.get('sharekey', None), name=data.get('name', None), user=User.NewFromJSON(data.get('user', None)), title=data.get('title', None), description=data.get('description', None), posted_at=data.get('posted_at', None), permalink=data.get('permalink', None), width=data.get('width', None), height=data.get('height', None), views=data.get('views', 0), likes=data.get('likes', 0), saves=data.get('saves', 0), comments=data.get('comments', None), nsfw=data.get('nsfw', False), image_url=data.get('image_url', None), source_url=data.get('source_url', None), saved=data.get('saved', False), liked=data.get('liked', False))", "docstring": "Create a new SharedFile instance from a JSON dict.\n\nArgs:\ndata (dict): JSON dictionary representing a SharedFile.\n\nReturns:\nA SharedFile instance.", "source": "codesearchnet"} {"code": "def start(self):\n server_sock = self.start_listening_socket()\n host = self.get_server_ip()\n port = server_sock.getsockname()[1]\n addr = (host, port)\n logging.info('listening for reservations at {0}'.format(addr))\n\n def _listen(self, sock):\n CONNECTIONS = []\n CONNECTIONS.append(sock)\n while (not self.done):\n (read_socks, write_socks, err_socks) = select.select(CONNECTIONS, [], [], 60)\n for sock in read_socks:\n if (sock == server_sock):\n (client_sock, client_addr) = sock.accept()\n CONNECTIONS.append(client_sock)\n logging.debug('client connected from {0}'.format(client_addr))\n else:\n try:\n msg = self.receive(sock)\n self._handle_message(sock, msg)\n except Exception as e:\n logging.debug(e)\n sock.close()\n CONNECTIONS.remove(sock)\n server_sock.close()\n t = threading.Thread(target=_listen, args=(self, server_sock))\n t.daemon = True\n t.start()\n return addr", "docstring": "Start listener in a background thread\n\nReturns:\naddress of the Server as a tuple of (host, port)", "source": "codesearchnet"} {"code": "def depth_march_average_ground_temperature(self, value=None):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `depth_march_average_ground_temperature`'.format(value))\n self._depth_march_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_march_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_march_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def get_path(url):\n \n\n url = urlsplit(url)\n path = url.path\n if url.query:\n path += \"?{}\".format(url.query)\n return path", "docstring": "Get the path from a given url, including the querystring.\n\nArgs:\nurl (str)\nReturns:\nstr", "source": "juraj-google-style"} {"code": "def _get_bounds(self, layers):\n extent_query = 'SELECT ST_EXTENT(the_geom) AS the_geom FROM ({query}) AS t{idx}\\n'\n union_query = 'UNION ALL\\n'.join([extent_query.format(query=layer.orig_query, idx=idx) for (idx, layer) in enumerate(layers) if (not layer.is_basemap)])\n extent = self.sql_client.send(utils.minify_sql(('SELECT', ' ST_XMIN(ext) AS west,', ' ST_YMIN(ext) AS south,', ' ST_XMAX(ext) AS east,', ' ST_YMAX(ext) AS north', 'FROM (', ' SELECT ST_Extent(the_geom) AS ext', ' FROM ({union_query}) AS _wrap1', ') AS _wrap2')).format(union_query=union_query), do_post=False)\n return extent['rows'][0]", "docstring": "Return the bounds of all data layers involved in a cartoframes map.\n\nArgs:\nlayers (list): List of cartoframes layers. See `cartoframes.layers`\nfor all types.\n\nReturns:\ndict: Dictionary of northern, southern, eastern, and western bounds\nof the superset of data layers. Keys are `north`, `south`,\n`east`, and `west`. Units are in WGS84.", "source": "codesearchnet"} {"code": "def _build(self, inputs):\n input_shape = tuple(inputs.get_shape().as_list())\n if (len(input_shape) != 2):\n raise base.IncompatibleShapeError('{}: rank of shape must be 2 not: {}'.format(self.scope_name, len(input_shape)))\n if (input_shape[1] is None):\n raise base.IncompatibleShapeError('{}: Input size must be specified at module build time'.format(self.scope_name))\n if ((self._input_shape is not None) and (input_shape[1] != self._input_shape[1])):\n raise base.IncompatibleShapeError('{}: Input shape must be [batch_size, {}] not: [batch_size, {}]'.format(self.scope_name, self._input_shape[1], input_shape[1]))\n self._input_shape = input_shape\n dtype = inputs.dtype\n if ('w' not in self._initializers):\n self._initializers['w'] = create_linear_initializer(self._input_shape[1], dtype)\n if (('b' not in self._initializers) and self._use_bias):\n self._initializers['b'] = create_bias_initializer(self._input_shape[1], dtype)\n weight_shape = (self._input_shape[1], self.output_size)\n self._w = tf.get_variable('w', shape=weight_shape, dtype=dtype, initializer=self._initializers['w'], partitioner=self._partitioners.get('w', None), regularizer=self._regularizers.get('w', None))\n outputs = tf.matmul(inputs, self._w)\n if self._use_bias:\n bias_shape = (self.output_size,)\n self._b = tf.get_variable('b', shape=bias_shape, dtype=dtype, initializer=self._initializers['b'], partitioner=self._partitioners.get('b', None), regularizer=self._regularizers.get('b', None))\n outputs += self._b\n return outputs", "docstring": "Connects the Linear module into the graph, with input Tensor `inputs`.\n\nIf this is not the first time the module has been connected to the graph,\nthe Tensor provided here must have the same final dimension, in order for\nthe existing variables to be the correct size for the multiplication. The\nbatch size may differ for each connection.\n\nArgs:\ninputs: A 2D Tensor of size [batch_size, input_size].\n\nReturns:\nA 2D Tensor of size [batch_size, output_size].\n\nRaises:\nbase.IncompatibleShapeError: If the input is not a 2-D `Tensor` with\nthe size of the second dimension specified.\nbase.IncompatibleShapeError: If reconnecting an already connected module\ninto the graph, and the shape of the input is not compatible with\nprevious inputs.", "source": "codesearchnet"} {"code": "def _is_variant_with_internal_stacking(t):\n type_id = _variant_type_id(t)\n return type_id in _INTERNAL_STACKING_TYPE_IDS", "docstring": "Identifies variant tensors which pfor always maintains as scalars.\n\nFor these, the pfor tensor is recorded as \"stacked\" if the content of the\nvariant tensor (e.g. the elements of a TensorList) are all stacked.\n\nArgs:\nt: A tensor to identify.\nReturns:\nTrue if `t` is a TensorList/Optional, False not, None if unknown.", "source": "github-repos"} {"code": "def add_controller_info_record(self, controller_info_record):\n self.controller_info.append(controller_info_record)", "docstring": "Adds a controller info record to results.\n\nThis can be called multiple times for each test class.\n\nArgs:\ncontroller_info_record: ControllerInfoRecord object to be added to\nthe result.", "source": "github-repos"} {"code": "def compile_cpfs(self,\n scope: Dict[str, TensorFluent],\n batch_size: Optional[int] = None,\n noise: Optional[Noise] = None) -> Tuple[List[CPFPair], List[CPFPair]]:\n \n interm_fluents = self.compile_intermediate_cpfs(scope, batch_size, noise)\n scope.update(dict(interm_fluents))\n next_state_fluents = self.compile_state_cpfs(scope, batch_size, noise)\n return interm_fluents, next_state_fluents", "docstring": "Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope.\n\nArgs:\nscope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.\nbatch_size (Optional[int]): The batch size.\n\nReturns:\nTuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent\nrepresenting the intermediate and state CPFs.", "source": "juraj-google-style"} {"code": "def _add_gmaf(self, variant_obj, info_dict):\n \n \n for transcript in variant_obj.transcripts:\n gmaf_raw = transcript.GMAF\n if gmaf_raw:\n gmaf = float(gmaf_raw.split(':')[-1])\n variant_obj.add_frequency('GMAF', gmaf)\n \n if not variant_obj.thousand_g:\n variant_obj.thousand_g = gmaf", "docstring": "Add the gmaf frequency\n\nArgs:\nvariant_obj (puzzle.models.Variant)\ninfo_dict (dict): A info dictionary", "source": "juraj-google-style"} {"code": "def extract_subtree(self, node):\n if (not isinstance(node, Node)):\n raise TypeError('node must be a Node')\n r = self.root\n self.root = node\n o = copy(self)\n self.root = r\n return o", "docstring": "Return a copy of the subtree rooted at ``node``\n\nArgs:\n``node`` (``Node``): The root of the desired subtree\n\nReturns:\n``Tree``: A copy of the subtree rooted at ``node``", "source": "codesearchnet"} {"code": "def variable(dims=1):\n if (dims == 1):\n return Poly({(1,): 1}, dim=1, shape=())\n return Poly({tuple(indices): indices for indices in numpy.eye(dims, dtype=int)}, dim=dims, shape=(dims,))", "docstring": "Simple constructor to create single variables to create polynomials.\n\nArgs:\ndims (int):\nNumber of dimensions in the array.\n\nReturns:\n(Poly):\nPolynomial array with unit components in each dimension.\n\nExamples:\n>>> print(variable())\nq0\n>>> print(variable(3))\n[q0, q1, q2]", "source": "codesearchnet"} {"code": "def run_example(example_coroutine, *extra_args):\n \n args = _get_parser(extra_args).parse_args()\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)\n \n \n cookies = hangups.auth.get_auth_stdin(args.token_path)\n client = hangups.Client(cookies)\n loop = asyncio.get_event_loop()\n task = asyncio.ensure_future(_async_main(example_coroutine, client, args),\n loop=loop)\n\n try:\n loop.run_until_complete(task)\n except KeyboardInterrupt:\n task.cancel()\n loop.run_until_complete(task)\n finally:\n loop.close()", "docstring": "Run a hangups example coroutine.\n\nArgs:\nexample_coroutine (coroutine): Coroutine to run with a connected\nhangups client and arguments namespace as arguments.\nextra_args (str): Any extra command line arguments required by the\nexample.", "source": "juraj-google-style"} {"code": "def ReleaseRecords(cls, ids, token):\n \n with data_store.DB.GetMutationPool() as mutation_pool:\n mutation_pool.QueueReleaseRecords(ids)", "docstring": "Release records identified by subjects.\n\nReleases any claim on the records identified by ids.\n\nArgs:\nids: A list of ids provided by ClaimRecords.\ntoken: The database access token to write with.\n\nRaises:\nLockError: If the queue is not locked.", "source": "juraj-google-style"} {"code": "def _help_handler(self, args, screen_info=None):\n _ = screen_info\n if not args:\n return self.get_help()\n elif len(args) == 1:\n return self.get_help(args[0])\n else:\n return RichTextLines(['ERROR: help takes only 0 or 1 input argument.'])", "docstring": "Command handler for \"help\".\n\n\"help\" is a common command that merits built-in support from this class.\n\nArgs:\nargs: Command line arguments to \"help\" (not including \"help\" itself).\nscreen_info: (dict) Information regarding the screen, e.g., the screen\nwidth in characters: {\"cols\": 80}\n\nReturns:\n(RichTextLines) Screen text output.", "source": "github-repos"} {"code": "def run_excel_to_html():\n parser = argparse.ArgumentParser(prog='excel_to_html')\n parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.')\n parser.add_argument('-s', nargs='?', help='The name of a sheet in our excel file. Defaults to \"Sheet1\".')\n parser.add_argument('-css', nargs='?', help='Space separated css classes to append to the table.')\n parser.add_argument('-m', action='store_true', help='Merge, attempt to combine merged cells.')\n parser.add_argument('-c', nargs='?', help='Caption for creating an accessible table.')\n parser.add_argument('-d', nargs='?', help='Two strings separated by a | character. The first string is for the html \"summary\" attribute and the second string is for the html \"details\" attribute. both values must be provided and nothing more.')\n parser.add_argument('-r', action='store_true', help='Row headers. Does the table have row headers?')\n args = parser.parse_args()\n inputs = {'p': args.p, 's': args.s, 'css': args.css, 'm': args.m, 'c': args.c, 'd': args.d, 'r': args.r}\n p = inputs['p']\n s = (inputs['s'] if inputs['s'] else 'Sheet1')\n css = (inputs['css'] if inputs['css'] else '')\n m = (inputs['m'] if inputs['m'] else False)\n c = (inputs['c'] if inputs['c'] else '')\n d = (inputs['d'].split('|') if inputs['d'] else [])\n r = (inputs['r'] if inputs['r'] else False)\n html = fp.excel_to_html(p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m)\n print(html)", "docstring": "Run the excel_to_html function from the\ncommand-line.\n\nArgs:\n-p path to file\n-s name of the sheet to convert\n-css classes to apply\n-m attempt to combine merged cells\n-c caption for accessibility\n-su summary for accessibility\n-d details for accessibility\n\nExample use:\n\nexcel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true", "source": "codesearchnet"} {"code": "def execute(self):\n self.generate_workflow_description()\n if self.batch_values:\n self.id = self.workflow.launch_batch_workflow(self.definition)\n else:\n self.id = self.workflow.launch(self.definition)\n return self.id", "docstring": "Execute the workflow.\n\nArgs:\nNone\n\nReturns:\nWorkflow_id", "source": "codesearchnet"} {"code": "def update(self, settings):\n \n Instrument.update(self, settings)\n\n for key, value in settings.items():\n if key == 'test1':\n self._internal_state = value", "docstring": "updates the internal dictionary and sends changed values to instrument\nArgs:\nsettings: parameters to be set\n# mabe in the future:\n# Returns: boolean that is true if update successful", "source": "juraj-google-style"} {"code": "def _ParseKeysFromFindSpecs(self, parser_mediator, win_registry, find_specs):\n searcher = dfwinreg_registry_searcher.WinRegistrySearcher(win_registry)\n for registry_key_path in iter(searcher.Find(find_specs=find_specs)):\n if parser_mediator.abort:\n break\n registry_key = searcher.GetKeyByPath(registry_key_path)\n self._ParseKey(parser_mediator, registry_key)", "docstring": "Parses the Registry keys from FindSpecs.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nwin_registry (dfwinreg.WinRegistryKey): root Windows Registry key.\nfind_specs (dfwinreg.FindSpecs): Keys to search for.", "source": "codesearchnet"} {"code": "def build_wheel(dir_path: str, cwd: str, project_name: str, platform: str, collab: str=False) -> None:\n env = os.environ.copy()\n if is_windows():\n env['HOMEPATH'] = 'C:'\n env['project_name'] = project_name\n if collab == 'True':\n env['collaborator_build'] = True\n subprocess.run([sys.executable, 'tensorflow/tools/pip_package/setup.py', 'bdist_wheel', f'--dist-dir={dir_path}', f'--plat-name={platform}'], check=True, cwd=cwd, env=env)", "docstring": "Build the wheel in the target directory.\n\nArgs:\ndir_path: directory where the wheel will be stored\ncwd: path to directory with wheel source files\nproject_name: name to pass to setup.py.\nplatform: platform name to pass to setup.py.\ncollab: defines if this is a collab build", "source": "github-repos"} {"code": "def SetLookupHash(self, lookup_hash):\n if (lookup_hash not in self.SUPPORTED_HASHES):\n raise ValueError('Unsupported lookup hash: {0!s}'.format(lookup_hash))\n self.lookup_hash = lookup_hash", "docstring": "Sets the hash to query.\n\nArgs:\nlookup_hash (str): name of the hash attribute to look up.\n\nRaises:\nValueError: if the lookup hash is not supported.", "source": "codesearchnet"} {"code": "def deliver_tx(self, raw_transaction):\n self.abort_if_abci_chain_is_not_synced()\n logger.debug('deliver_tx: %s', raw_transaction)\n transaction = self.bigchaindb.is_valid_transaction(decode_transaction(raw_transaction), self.block_transactions)\n if (not transaction):\n logger.debug('deliver_tx: INVALID')\n return ResponseDeliverTx(code=CodeTypeError)\n else:\n logger.debug('storing tx')\n self.block_txn_ids.append(transaction.id)\n self.block_transactions.append(transaction)\n return ResponseDeliverTx(code=CodeTypeOk)", "docstring": "Validate the transaction before mutating the state.\n\nArgs:\nraw_tx: a raw string (in bytes) transaction.", "source": "codesearchnet"} {"code": "def FileEntryExistsByPathSpec(self, path_spec):\n \n volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)\n\n \n \n if volume_index is None:\n location = getattr(path_spec, 'location', None)\n return location is not None and location == self.LOCATION_ROOT\n\n return 0 <= volume_index < self._fsapfs_container.number_of_volumes", "docstring": "Determines if a file entry for a path specification exists.\n\nArgs:\npath_spec (PathSpec): a path specification.\n\nReturns:\nbool: True if the file entry exists.", "source": "juraj-google-style"} {"code": "def _ConvertFileProtoToFileDescriptor(self, file_proto):\n if (file_proto.name not in self._file_descriptors):\n built_deps = list(self._GetDeps(file_proto.dependency))\n direct_deps = [self.FindFileByName(n) for n in file_proto.dependency]\n public_deps = [direct_deps[i] for i in file_proto.public_dependency]\n file_descriptor = descriptor.FileDescriptor(pool=self, name=file_proto.name, package=file_proto.package, syntax=file_proto.syntax, options=_OptionsOrNone(file_proto), serialized_pb=file_proto.SerializeToString(), dependencies=direct_deps, public_dependencies=public_deps)\n scope = {}\n for dependency in built_deps:\n scope.update(self._ExtractSymbols(dependency.message_types_by_name.values()))\n scope.update(((_PrefixWithDot(enum.full_name), enum) for enum in dependency.enum_types_by_name.values()))\n for message_type in file_proto.message_type:\n message_desc = self._ConvertMessageDescriptor(message_type, file_proto.package, file_descriptor, scope, file_proto.syntax)\n file_descriptor.message_types_by_name[message_desc.name] = message_desc\n for enum_type in file_proto.enum_type:\n file_descriptor.enum_types_by_name[enum_type.name] = self._ConvertEnumDescriptor(enum_type, file_proto.package, file_descriptor, None, scope)\n for (index, extension_proto) in enumerate(file_proto.extension):\n extension_desc = self._MakeFieldDescriptor(extension_proto, file_proto.package, index, is_extension=True)\n extension_desc.containing_type = self._GetTypeFromScope(file_descriptor.package, extension_proto.extendee, scope)\n self._SetFieldType(extension_proto, extension_desc, file_descriptor.package, scope)\n file_descriptor.extensions_by_name[extension_desc.name] = extension_desc\n for desc_proto in file_proto.message_type:\n self._SetAllFieldTypes(file_proto.package, desc_proto, scope)\n if file_proto.package:\n desc_proto_prefix = _PrefixWithDot(file_proto.package)\n else:\n desc_proto_prefix = ''\n for desc_proto in file_proto.message_type:\n desc = self._GetTypeFromScope(desc_proto_prefix, desc_proto.name, scope)\n file_descriptor.message_types_by_name[desc_proto.name] = desc\n for (index, service_proto) in enumerate(file_proto.service):\n file_descriptor.services_by_name[service_proto.name] = self._MakeServiceDescriptor(service_proto, index, scope, file_proto.package, file_descriptor)\n self.Add(file_proto)\n self._file_descriptors[file_proto.name] = file_descriptor\n return self._file_descriptors[file_proto.name]", "docstring": "Creates a FileDescriptor from a proto or returns a cached copy.\n\nThis method also has the side effect of loading all the symbols found in\nthe file into the appropriate dictionaries in the pool.\n\nArgs:\nfile_proto: The proto to convert.\n\nReturns:\nA FileDescriptor matching the passed in proto.", "source": "codesearchnet"} {"code": "def get_container_image_from_options(pipeline_options):\n worker_options = pipeline_options.view_as(WorkerOptions)\n if worker_options.sdk_container_image:\n return worker_options.sdk_container_image\n container_repo = names.DATAFLOW_CONTAINER_IMAGE_REPOSITORY\n image_name = '{repository}/beam_python{major}.{minor}_sdk'.format(repository=container_repo, major=sys.version_info[0], minor=sys.version_info[1])\n image_tag = _get_required_container_version()\n return image_name + ':' + image_tag", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nArgs:\npipeline_options (PipelineOptions): A container for pipeline options.\n\nReturns:\nstr: Container image for remote execution.", "source": "github-repos"} {"code": "def from_dict(cls, d):\n labels_dict = d['labels_dict']\n projections = {}\n structure = None\n if isinstance(list(d['bands'].values())[0], dict):\n eigenvals = {Spin(int(k)): np.array(d['bands'][k]['data']) for k in d['bands']}\n else:\n eigenvals = {Spin(int(k)): d['bands'][k] for k in d['bands']}\n if ('structure' in d):\n structure = Structure.from_dict(d['structure'])\n if d.get('projections'):\n projections = {Spin(int(spin)): np.array(v) for (spin, v) in d['projections'].items()}\n return BandStructure(d['kpoints'], eigenvals, Lattice(d['lattice_rec']['matrix']), d['efermi'], labels_dict, structure=structure, projections=projections)", "docstring": "Create from dict.\n\nArgs:\nA dict with all data for a band structure object.\n\nReturns:\nA BandStructure object", "source": "codesearchnet"} {"code": "def __init__(self, ascii_codepage='cp1252', key_path_prefix=''):\n \n super(FakeWinRegistryFile, self).__init__(\n ascii_codepage=ascii_codepage, key_path_prefix=key_path_prefix)\n self._root_key = None", "docstring": "Initializes a Windows Registry file.\n\nArgs:\nascii_codepage (str): ASCII string codepage.\nkey_path_prefix (str): Windows Registry key path prefix.", "source": "juraj-google-style"} {"code": "class ActivityRegularization(Layer):\n\n def __init__(self, l1=0.0, l2=0.0, **kwargs):\n super().__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n self._build_at_init()\n\n def call(self, inputs):\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n base_config = super().get_config()\n base_config.pop('activity_regularizer', None)\n config = {'l1': self.l1, 'l2': self.l2}\n return {**base_config, **config}", "docstring": "Layer that applies an update to the cost function based input activity.\n\nArgs:\nl1: L1 regularization factor (positive float).\nl2: L2 regularization factor (positive float).\n\nInput shape:\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n\nOutput shape:\nSame shape as input.", "source": "github-repos"} {"code": "def get_pages(self, url, params=None, **kwargs):\n check_type(url, basestring, may_be_none=False)\n check_type(params, dict)\n erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])\n response = self.request('GET', url, erc, params=params, **kwargs)\n while True:\n (yield extract_and_parse_json(response))\n if response.links.get('next'):\n next_url = response.links.get('next').get('url')\n next_url = _fix_next_url(next_url)\n response = self.request('GET', next_url, erc, **kwargs)\n else:\n break", "docstring": "Return a generator that GETs and yields pages of data.\n\nProvides native support for RFC5988 Web Linking.\n\nArgs:\nurl(basestring): The URL of the API endpoint.\nparams(dict): The parameters for the HTTP GET request.\n**kwargs:\nerc(int): The expected (success) response code for the request.\nothers: Passed on to the requests package.\n\nRaises:\nApiError: If anything other than the expected response code is\nreturned by the Webex Teams API endpoint.", "source": "codesearchnet"} {"code": "def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n if not self._inbound_nodes:\n raise RuntimeError('The layer has never been called and thus has no defined ' + attr_name + '.')\n if not len(self._inbound_nodes) > node_index:\n raise ValueError('Asked to get ' + attr_name + ' at node ' + str(node_index) + ', but the layer has only ' + str(len(self._inbound_nodes)) + ' inbound nodes.')\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values", "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node.\n\nThis is used to implement the methods:\n- get_input_shape_at\n- get_output_shape_at\n- get_input_at\netc...\n\nArgs:\nnode_index: Integer index of the node from which\nto retrieve the attribute.\nattr: Exact node attribute name.\nattr_name: Human-readable attribute name, for error messages.\n\nReturns:\nThe layer's attribute `attr` at the node of index `node_index`.\n\nRaises:\nRuntimeError: If the layer has no inbound nodes, or if called in Eager\nmode.\nValueError: If the index provided does not match any node.", "source": "github-repos"} {"code": "def lenet5(images, labels):\n images = pt.wrap(images)\n with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=1e-05):\n return images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2).flatten().fully_connected(500).softmax_classifier(10, labels)", "docstring": "Creates a multi layer convolutional network.\n\nThe architecture is similar to that defined in LeNet 5.\nPlease change this to experiment with architectures.\n\nArgs:\nimages: The input images.\nlabels: The labels as dense one-hot vectors.\nReturns:\nA softmax result.", "source": "codesearchnet"} {"code": "def store_sample_set(self, md5_list):\n \n\n \n if not md5_list:\n print 'Warning: Trying to store an empty sample_set'\n return None\n\n \n md5_list = list(set(md5_list))\n\n for md5 in md5_list:\n if not self.has_sample(md5):\n raise RuntimeError('%s: Not found! All items in sample_set must be in the datastore' % (md5))\n set_md5 = hashlib.md5(str(md5_list)).hexdigest()\n self._store_work_results({'md5_list':md5_list}, 'sample_set', set_md5)\n return set_md5", "docstring": "Store a sample set (which is just a list of md5s).\n\nNote: All md5s must already be in the data store.\n\nArgs:\nmd5_list: a list of the md5s in this set (all must exist in data store)\n\nReturns:\nThe md5 of the set (the actual md5 of the set)", "source": "juraj-google-style"} {"code": "def run(self, xml, **kwargs):\n \n kwargs['output'] = self.__graph__()\n if isinstance(xml, str):\n try:\n self.source = etree.XML(xml)\n except ValueError:\n try:\n self.source = etree.XML(xml.encode())\n except:\n raise ValueError(\"Cannot run error {}\".format(sys.exc_info()[0]))\n else:\n self.source = xml\n super(XMLProcessor, self).run(**kwargs)\n self.output = kwargs['output']\n return kwargs['output']", "docstring": "Method takes either an etree.ElementTree or raw XML text\nas the first argument.\n\nArgs:\nxml(etree.ElementTree or text", "source": "juraj-google-style"} {"code": "def parity(number):\n \n sval = -1\n if verbose:\n print(80*\"*\")\n try:\n assert number == int(round(number))\n m = Model()\n m.hideOutput()\n\n \n \n \n \n \n x = m.addVar(\"x\", vtype=\"I\", lb=None, ub=None) \n n = m.addVar(\"n\", vtype=\"I\", lb=None)\n s = m.addVar(\"s\", vtype=\"B\")\n \n \n \n \n\n m.addCons(x==number)\n\n \n m.addCons(s == x-2*n)\n m.setObjective(s)\n m.optimize()\n\n assert m.getStatus() == \"optimal\"\n boolmod = m.getVal(s) == m.getVal(x)%2\n if verbose:\n for v in m.getVars():\n print(\"%*s: %d\" % (fmtlen, v,m.getVal(v)))\n print(\"%*d%%2 == %d?\" % (fmtlen, m.getVal(x), m.getVal(s)))\n print(\"%*s\" % (fmtlen, boolmod))\n\n xval = m.getVal(x)\n sval = m.getVal(s)\n sstr = sdic[sval]\n print(\"%*d is %s\" % (fmtlen, xval, sstr))\n except (AssertionError, TypeError):\n print(\"%*s is neither even nor odd!\" % (fmtlen, number.__repr__()))\n finally:\n if verbose:\n print(80*\"*\")\n print(\"\")\n return sval", "docstring": "Prints if a value is even/odd/neither per each value in a example list\n\nThis example is made for newcomers and motivated by:\n- modulus is unsupported for pyscipopt.scip.Variable and int\n- variables are non-integer by default\nBased on this: #172#issuecomment-394644046\n\nArgs:\nnumber: value which parity is checked\n\nReturns:\nsval: 1 if number is odd, 0 if number is even, -1 if neither", "source": "juraj-google-style"} {"code": "def add_untagged(self, *responses: 'Response') -> None:\n \n for resp in responses:\n try:\n merge_key = resp.merge_key\n except TypeError:\n self._untagged.append(resp)\n else:\n key = (type(resp), merge_key)\n try:\n untagged_idx = self._mergeable[key]\n except KeyError:\n untagged_idx = len(self._untagged)\n self._mergeable[key] = untagged_idx\n self._untagged.append(resp)\n else:\n merged = self._untagged[untagged_idx].merge(resp)\n self._untagged[untagged_idx] = merged\n self._raw = None", "docstring": "Add an untagged response. These responses are shown before the\nparent response.\n\nArgs:\nresponses: The untagged responses to add.", "source": "juraj-google-style"} {"code": "def partition(self, id_):\n \n from ..orm import Partition as OrmPartition\n from sqlalchemy import or_\n from ..identity import PartialPartitionName\n\n if isinstance(id_, PartitionIdentity):\n id_ = id_.id_\n elif isinstance(id_, PartialPartitionName):\n id_ = id_.promote(self.bundle.identity.name)\n\n session = self.bundle.dataset._database.session\n q = session\\\n .query(OrmPartition)\\\n .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\\\n .filter(or_(OrmPartition.id == str(id_).encode('ascii'),\n OrmPartition.vid == str(id_).encode('ascii')))\n\n try:\n orm_partition = q.one()\n return self.bundle.wrap_partition(orm_partition)\n except NoResultFound:\n orm_partition = None\n\n if not orm_partition:\n q = session\\\n .query(OrmPartition)\\\n .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\\\n .filter(OrmPartition.name == str(id_).encode('ascii'))\n\n try:\n orm_partition = q.one()\n return self.bundle.wrap_partition(orm_partition)\n except NoResultFound:\n orm_partition = None\n\n return orm_partition", "docstring": "Get a partition by the id number.\n\nArguments:\nid_ -- a partition id value\n\nReturns:\nA partitions.Partition object\n\nThrows:\na Sqlalchemy exception if the partition either does not exist or\nis not unique\n\nBecause this method works on the bundle, the id_ ( without version information )\nis equivalent to the vid ( with version information )", "source": "juraj-google-style"} {"code": "def _OpenPathSpec(self, path_specification, ascii_codepage='cp1252'):\n if (not path_specification):\n return None\n file_entry = self._file_system.GetFileEntryByPathSpec(path_specification)\n if (file_entry is None):\n return None\n file_object = file_entry.GetFileObject()\n if (file_object is None):\n return None\n registry_file = dfwinreg_regf.REGFWinRegistryFile(ascii_codepage=ascii_codepage)\n try:\n registry_file.Open(file_object)\n except IOError as exception:\n logger.warning('Unable to open Windows Registry file with error: {0!s}'.format(exception))\n file_object.close()\n return None\n return registry_file", "docstring": "Opens the Windows Registry file specified by the path specification.\n\nArgs:\npath_specification (dfvfs.PathSpec): path specification.\nascii_codepage (Optional[str]): ASCII string codepage.\n\nReturns:\nWinRegistryFile: Windows Registry file or None.", "source": "codesearchnet"} {"code": "def fromcube(cube, template):\n array = dc.zeros_like(template)\n (y, x) = (array.y.values, array.x.values)\n (gy, gx) = (cube.y.values, cube.x.values)\n iy = interp1d(gy, np.arange(len(gy)))(y)\n ix = interp1d(gx, np.arange(len(gx)))(x)\n for ch in range(len(cube.ch)):\n array[(:, ch)] = map_coordinates(cube.values[(:, :, ch)], (ix, iy))\n return array", "docstring": "Covert a decode cube to a decode array.\n\nArgs:\ncube (decode.cube): Decode cube to be cast.\ntemplate (decode.array): Decode array whose shape the cube is cast on.\n\nReturns:\ndecode array (decode.array): Decode array.\n\nNotes:\nThis functions is under development.", "source": "codesearchnet"} {"code": "def invertible_flatten2_numpy(unflat_arrs, axis=0):\n cumlen_list = np.cumsum([arr.shape[axis] for arr in unflat_arrs])\n flat_list = np.concatenate(unflat_arrs, axis=axis)\n return (flat_list, cumlen_list)", "docstring": "more numpy version\n\nTODO: move to vtool\n\nArgs:\nunflat_arrs (list): list of ndarrays\n\nReturns:\ntuple: (flat_list, cumlen_list)\n\nCommandLine:\npython -m utool.util_list --test-invertible_flatten2_numpy\n\nExample:\n>>> # ENABLE_DOCTET\n>>> from utool.util_list import * # NOQA\n>>> unflat_arrs = [np.array([1, 2, 1]), np.array([5, 9]), np.array([4])]\n>>> (flat_list, cumlen_list) = invertible_flatten2_numpy(unflat_arrs)\n>>> result = str((flat_list, cumlen_list))\n>>> print(result)\n(array([1, 2, 1, 5, 9, 4]), array([3, 5, 6]))", "source": "codesearchnet"} {"code": "def set_release_description(self, description, **kwargs):\n id = self.get_id().replace('/', '%2F')\n path = ('%s/%s/release' % (self.manager.path, id))\n data = {'description': description}\n if (self.release is None):\n try:\n server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs)\n except exc.GitlabHttpError as e:\n raise exc.GitlabCreateError(e.response_code, e.error_message)\n else:\n try:\n server_data = self.manager.gitlab.http_put(path, post_data=data, **kwargs)\n except exc.GitlabHttpError as e:\n raise exc.GitlabUpdateError(e.response_code, e.error_message)\n self.release = server_data", "docstring": "Set the release notes on the tag.\n\nIf the release doesn't exist yet, it will be created. If it already\nexists, its description will be updated.\n\nArgs:\ndescription (str): Description of the release.\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server fails to create the release\nGitlabUpdateError: If the server fails to update the release", "source": "codesearchnet"} {"code": "def load(fin, dtype=np.float32, max_vocab=None):\n vocab = {}\n arr = None\n i = 0\n for line in fin:\n if ((max_vocab is not None) and (i >= max_vocab)):\n break\n try:\n (token, v) = _parse_line(line, dtype)\n except (ValueError, IndexError):\n raise ParseError((b'Parsing error in line: ' + line))\n if (token in vocab):\n parse_warn((b'Duplicated vocabulary ' + token))\n continue\n if (arr is None):\n arr = np.array(v, dtype=dtype).reshape(1, (- 1))\n else:\n if (arr.shape[1] != len(v)):\n raise ParseError((b'Vector size did not match in line: ' + line))\n arr = np.append(arr, [v], axis=0)\n vocab[token] = i\n i += 1\n return (arr, vocab)", "docstring": "Load word embedding file.\n\nArgs:\nfin (File): File object to read. File should be open for reading ascii.\ndtype (numpy.dtype): Element data type to use for the array.\nmax_vocab (int): Number of vocabulary to read.\n\nReturns:\nnumpy.ndarray: Word embedding representation vectors\ndict: Mapping from words to vector indices.", "source": "codesearchnet"} {"code": "def normal_mean(data, variance):\n if (not isinstance(data, np.ndarray)):\n data = np.array(data)\n i_variance_2 = (1 / (variance ** 2))\n cmm = [0.0]\n cmm.extend(np.cumsum(data))\n cmm2 = [0.0]\n cmm2.extend(np.cumsum(np.abs(data)))\n\n def cost(start, end):\n ' Cost function for normal distribution with variable mean\\n\\n Args:\\n start (int): start index\\n end (int): end index\\n Returns:\\n float: Cost, from start to end\\n '\n cmm2_diff = (cmm2[end] - cmm2[start])\n cmm_diff = pow((cmm[end] - cmm[start]), 2)\n i_diff = (end - start)\n diff = (cmm2_diff - cmm_diff)\n return ((diff / i_diff) * i_variance_2)\n return cost", "docstring": "Creates a segment cost function for a time series with a\nNormal distribution with changing mean\n\nArgs:\ndata (:obj:`list` of float): 1D time series data\nvariance (float): variance\nReturns:\nfunction: Function with signature\n(int, int) -> float\nwhere the first arg is the starting index, and the second\nis the last arg. Returns the cost of that segment", "source": "codesearchnet"} {"code": "def normalize(x, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(x, order, axis))\n l2[l2 == 0] = 1\n return x / np.expand_dims(l2, axis)", "docstring": "Normalizes a Numpy array.\n\nArgs:\nx: Numpy array to normalize.\naxis: axis along which to normalize.\norder: Normalization order (e.g. `order=2` for L2 norm).\n\nReturns:\nA normalized copy of the array.", "source": "github-repos"} {"code": "def _invalid_triple_quote(self, quote, row, col=None):\n self.add_message('invalid-triple-quote', line=row, args=(quote, TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)), **self.get_offset(col))", "docstring": "Add a message for an invalid triple quote.\n\nArgs:\nquote: The quote characters that were found.\nrow: The row number the quote characters were found on.\ncol: The column the quote characters were found on.", "source": "codesearchnet"} {"code": "def __parse(self, raw_string):\n \n self._args = []\n\n def escape_braces(origin_string):\n return origin_string.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n\n try:\n match_start_position = raw_string.index(\"$\", 0)\n begin_string = raw_string[0:match_start_position]\n self._string = escape_braces(begin_string)\n except ValueError:\n self._string = escape_braces(raw_string)\n return\n\n while match_start_position < len(raw_string):\n\n \n \n\n \n dollar_match = dolloar_regex_compile.match(raw_string, match_start_position)\n if dollar_match:\n match_start_position = dollar_match.end()\n self._string += \"$\"\n continue\n\n \n func_match = function_regex_compile.match(raw_string, match_start_position)\n if func_match:\n function_meta = parse_function_params(func_match.group(1))\n function_meta = {\n \"func_name\": func_match.group(1)\n }\n function_meta.update(parse_function_params(func_match.group(2)))\n lazy_func = LazyFunction(\n function_meta,\n self.functions_mapping,\n self.check_variables_set\n )\n self._args.append(lazy_func)\n match_start_position = func_match.end()\n self._string += \"{}\"\n continue\n\n \n var_match = variable_regex_compile.match(raw_string, match_start_position)\n if var_match:\n var_name = var_match.group(1) or var_match.group(2)\n \n if var_name not in self.check_variables_set:\n raise exceptions.VariableNotFound(var_name)\n\n self._args.append(var_name)\n match_start_position = var_match.end()\n self._string += \"{}\"\n continue\n\n curr_position = match_start_position\n try:\n \n match_start_position = raw_string.index(\"$\", curr_position+1)\n remain_string = raw_string[curr_position:match_start_position]\n except ValueError:\n remain_string = raw_string[curr_position:]\n \n match_start_position = len(raw_string)\n\n self._string += escape_braces(remain_string)", "docstring": "parse raw string, replace function and variable with {}\n\nArgs:\nraw_string(str): string with functions or varialbes\ne.g. \"ABC${func2($a, $b)}DE$c\"\n\nReturns:\nstring: \"ABC{}DE{}\"\nargs: [\"${func2($a, $b)}\", \"$c\"]", "source": "juraj-google-style"} {"code": "def view(self, filename=None, directory=None, cleanup=False):\n return self.render(filename=filename, directory=directory, view=True, cleanup=cleanup)", "docstring": "Save the source to file, open the rendered result in a viewer.\n\nArgs:\nfilename: Filename for saving the source (defaults to ``name`` + ``'.gv'``)\ndirectory: (Sub)directory for source saving and rendering.\ncleanup (bool): Delete the source file after rendering.\nReturns:\nThe (possibly relative) path of the rendered file.\nRaises:\ngraphviz.ExecutableNotFound: If the Graphviz executable is not found.\nsubprocess.CalledProcessError: If the exit status is non-zero.\nRuntimeError: If opening the viewer is not supported.\n\nShort-cut method for calling :meth:`.render` with ``view=True``.", "source": "codesearchnet"} {"code": "def _TravelTimes(self, triplist, index=0):\n\n def DistanceInTravelTime(dep_secs, arr_secs):\n t_dist = (arr_secs - dep_secs)\n if (t_dist < 0):\n t_dist = self._DUMMY_SEPARATOR\n return t_dist\n if (not triplist):\n return []\n if (0 < index < len(triplist)):\n trip = triplist[index]\n else:\n trip = triplist[0]\n t_dists2 = [DistanceInTravelTime(stop[3], tail[2]) for (stop, tail) in itertools.izip(trip.GetTimeStops(), trip.GetTimeStops()[1:])]\n return t_dists2", "docstring": "Calculate distances and plot stops.\n\nUses a timetable to approximate distances\nbetween stations\n\nArgs:\n# Class Trip is defined in transitfeed.py\ntriplist: [Trip, Trip, ...]\n# (Optional) Index of Triplist prefered for timetable Calculation\nindex: 3\n\nReturns:\n# One integer for each pair of stations\n# indicating the approximate distance\n[0,33,140, ... ,X]", "source": "codesearchnet"} {"code": "def module_import(module_path):\n \n try:\n \n module = __import__(module_path)\n\n \n \n components = module_path.split('.')\n\n \n \n \n \n for component in components[1:]:\n module = getattr(module, component)\n\n return module\n\n except ImportError:\n raise BadModulePathError(\n 'Unable to find module \"%s\".' % (module_path,))", "docstring": "Imports the module indicated in name\n\nArgs:\nmodule_path: string representing a module path such as\n'app.config' or 'app.extras.my_module'\nReturns:\nthe module matching name of the last component, ie: for\n'app.extras.my_module' it returns a\nreference to my_module\nRaises:\nBadModulePathError if the module is not found", "source": "juraj-google-style"} {"code": "def list(cls, session, endpoint_override=None, data=None):\n \n cls._check_implements('list')\n return cls(\n endpoint_override or '/%s.json' % cls.__endpoint__,\n data=data,\n session=session,\n )", "docstring": "Return records in a mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nendpoint_override (str, optional): Override the default\nendpoint using this.\ndata (dict, optional): Data to provide as request parameters.\n\nReturns:\nRequestPaginator(output_type=helpscout.BaseModel): Results\niterator.", "source": "juraj-google-style"} {"code": "def available_resources(self):\n available_resources_by_id = {}\n subscribe_clients = [redis_client.pubsub(ignore_subscribe_messages=True) for redis_client in self.redis_clients]\n for subscribe_client in subscribe_clients:\n subscribe_client.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_CHANNEL)\n client_ids = self._live_client_ids()\n while (set(available_resources_by_id.keys()) != client_ids):\n for subscribe_client in subscribe_clients:\n raw_message = subscribe_client.get_message()\n if ((raw_message is None) or (raw_message['channel'] != ray.gcs_utils.XRAY_HEARTBEAT_CHANNEL)):\n continue\n data = raw_message['data']\n gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(data, 0)\n heartbeat_data = gcs_entries.Entries(0)\n message = ray.gcs_utils.HeartbeatTableData.GetRootAsHeartbeatTableData(heartbeat_data, 0)\n num_resources = message.ResourcesAvailableLabelLength()\n dynamic_resources = {}\n for i in range(num_resources):\n resource_id = decode(message.ResourcesAvailableLabel(i))\n dynamic_resources[resource_id] = message.ResourcesAvailableCapacity(i)\n client_id = ray.utils.binary_to_hex(message.ClientId())\n available_resources_by_id[client_id] = dynamic_resources\n client_ids = self._live_client_ids()\n for client_id in available_resources_by_id.keys():\n if (client_id not in client_ids):\n del available_resources_by_id[client_id]\n total_available_resources = defaultdict(int)\n for available_resources in available_resources_by_id.values():\n for (resource_id, num_available) in available_resources.items():\n total_available_resources[resource_id] += num_available\n for subscribe_client in subscribe_clients:\n subscribe_client.close()\n return dict(total_available_resources)", "docstring": "Get the current available cluster resources.\n\nThis is different from `cluster_resources` in that this will return\nidle (available) resources rather than total resources.\n\nNote that this information can grow stale as tasks start and finish.\n\nReturns:\nA dictionary mapping resource name to the total quantity of that\nresource in the cluster.", "source": "codesearchnet"} {"code": "def __init__(self, label, element=None, state=None):\n self.label = label\n self.state = state\n if element is not None:\n self.set_element(element)", "docstring": "Initialize a processing context object with an element and state.\n\nThe element represents one value from a PCollection that will be accessed\nby a DoFn object during pipeline execution, and state is an arbitrary object\nwhere counters and other pipeline state information can be passed in.\n\nDoFnProcessContext objects are also used as inputs to PartitionFn instances.\n\nArgs:\nlabel: label of the PCollection whose element is being processed.\nelement: element of a PCollection being processed using this context.\nstate: a DoFnState object with state to be passed in to the DoFn object.", "source": "github-repos"} {"code": "def __init__(self, prefix, section, filename=None):\n \n options = dict(prefix=prefix, section=section, filename=filename)\n\n super(Settings, self).__init__(**options)", "docstring": "Will try to read configuration from environment variables and ini\nfiles, if no value found in either of those ``None`` is\nreturned.\n\nArgs:\nprefix: The environment variable prefix.\nsection: The ini file section this configuration is scoped to\nfilename: The path to the ini file to use", "source": "juraj-google-style"} {"code": "def first(self):\n try:\n return next(self.data(sorted_by='energy', name='Sample'))\n except StopIteration:\n raise ValueError('{} is empty'.format(self.__class__.__name__))", "docstring": "Sample with the lowest-energy.\n\nRaises:\nValueError: If empty.\n\nExample:\n\n>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})\n>>> sampleset.first\nSample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1)", "source": "codesearchnet"} {"code": "def GetLayerFromFeatureService(self, fs, layerName='', returnURLOnly=False):\n layers = None\n table = None\n layer = None\n sublayer = None\n try:\n layers = fs.layers\n if (((layers is None) or (len(layers) == 0)) and (fs.url is not None)):\n fs = arcrest.ags.FeatureService(url=fs.url)\n layers = fs.layers\n if (layers is not None):\n for layer in layers:\n if (layer.name == layerName):\n if returnURLOnly:\n return ((fs.url + '/') + str(layer.id))\n else:\n return layer\n elif (not (layer.subLayers is None)):\n for sublayer in layer.subLayers:\n if (sublayer == layerName):\n return sublayer\n if (fs.tables is not None):\n for table in fs.tables:\n if (table.name == layerName):\n if returnURLOnly:\n return ((fs.url + '/') + str(layer.id))\n else:\n return table\n return None\n except:\n (line, filename, synerror) = trace()\n raise common.ArcRestHelperError({'function': 'GetLayerFromFeatureService', 'line': line, 'filename': filename, 'synerror': synerror})\n finally:\n layers = None\n table = None\n layer = None\n sublayer = None\n del layers\n del table\n del layer\n del sublayer\n gc.collect()", "docstring": "Obtains a layer from a feature service by feature service reference.\n\nArgs:\nfs (FeatureService): The feature service from which to obtain the layer.\nlayerName (str): The name of the layer. Defaults to ``\"\"``.\nreturnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``.\nReturns:\nWhen ``returnURLOnly`` is ``True``, the URL of the layer is returned.\n\nWhen ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.", "source": "codesearchnet"} {"code": "def __init__(\n self,\n names,\n aggregation_type='concat',\n axis=1,\n named_tensors=None,\n scope='input',\n summary_labels=()\n ):\n \n self.names = names\n self.aggregation_type = aggregation_type\n self.axis = axis\n super(Input, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Input layer.\n\nArgs:\nnames: A list of strings that name the inputs to merge\naxis: Axis to merge the inputs", "source": "juraj-google-style"} {"code": "def dbmax20years(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `dbmax20years`'.format(value))\n\n self._dbmax20years = value", "docstring": "Corresponds to IDD Field `dbmax20years`\n20-year return period values for maximum extreme dry-bulb temperature\n\nArgs:\nvalue (float): value for IDD Field `dbmax20years`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def __setitem__(self, key, value):\n \n if key is None:\n key = self.default_key(value)\n if key in self:\n raise KeyError(\n \"key %s already registered in registry %s\" % (key, self._name))\n if not callable(value):\n raise ValueError(\"value must be callable\")\n self.validate(key, value)\n self._registry[key] = value\n self.on_set(key, value)", "docstring": "Validate, set, and (if successful) call `on_set` for the given item.\n\nArgs:\nkey: key to store value under. If `None`, `self.default_key(value)` is\nused.\nvalue: callable stored under the given key.\n\nRaises:\nKeyError: if key is already in registry.", "source": "juraj-google-style"} {"code": "def _get_blocks_containing_index(self, axis, index):\n if (not axis):\n ErrorMessage.catch_bugs_and_request_email((index > sum(self.block_widths)))\n cumulative_column_widths = np.array(self.block_widths).cumsum()\n block_idx = int(np.digitize(index, cumulative_column_widths))\n if (block_idx == len(cumulative_column_widths)):\n block_idx -= 1\n internal_idx = (index if (not block_idx) else (index - cumulative_column_widths[(block_idx - 1)]))\n else:\n ErrorMessage.catch_bugs_and_request_email((index > sum(self.block_lengths)))\n cumulative_row_lengths = np.array(self.block_lengths).cumsum()\n block_idx = int(np.digitize(index, cumulative_row_lengths))\n internal_idx = (index if (not block_idx) else (index - cumulative_row_lengths[(block_idx - 1)]))\n return (block_idx, internal_idx)", "docstring": "Convert a global index to a block index and local index.\n\nNote: This method is primarily used to convert a global index into a\npartition index (along the axis provided) and local index (useful\nfor `iloc` or similar operations.\n\nArgs:\naxis: The axis along which to get the indices\n(0 - columns, 1 - rows)\nindex: The global index to convert.\n\nReturns:\nA tuple containing (block index and internal index).", "source": "codesearchnet"} {"code": "def _get_job_resources(args):\n \n logging = param_util.build_logging_param(\n args.logging) if args.logging else None\n timeout = param_util.timeout_in_seconds(args.timeout)\n log_interval = param_util.log_interval_in_seconds(args.log_interval)\n\n return job_model.Resources(\n min_cores=args.min_cores,\n min_ram=args.min_ram,\n machine_type=args.machine_type,\n disk_size=args.disk_size,\n disk_type=args.disk_type,\n boot_disk_size=args.boot_disk_size,\n preemptible=args.preemptible,\n image=args.image,\n regions=args.regions,\n zones=args.zones,\n logging=logging,\n logging_path=None,\n service_account=args.service_account,\n scopes=args.scopes,\n keep_alive=args.keep_alive,\n cpu_platform=args.cpu_platform,\n network=args.network,\n subnetwork=args.subnetwork,\n use_private_address=args.use_private_address,\n accelerator_type=args.accelerator_type,\n accelerator_count=args.accelerator_count,\n nvidia_driver_version=args.nvidia_driver_version,\n timeout=timeout,\n log_interval=log_interval,\n ssh=args.ssh)", "docstring": "Extract job-global resources requirements from input args.\n\nArgs:\nargs: parsed command-line arguments\n\nReturns:\nResources object containing the requested resources for the job", "source": "juraj-google-style"} {"code": "def get_palette(self, num_labels: int) -> List[Tuple[int, int]]:\n return build_palette(num_labels)", "docstring": "Build a palette to map the prompt mask from a single channel to a 3 channel RGB.\n\nArgs:\nnum_labels (`int`):\nNumber of classes in the segmentation task (excluding the background).\n\nReturns:\n`List[Tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB.", "source": "github-repos"} {"code": "def run_step(context):\n \n logger.debug(\"started\")\n\n context.clear()\n logger.info(f\"Context wiped. New context size: {len(context)}\")\n\n logger.debug(\"done\")", "docstring": "Wipe the entire context.\n\nArgs:\nContext is a dictionary or dictionary-like.\nDoes not require any specific keys in context.", "source": "juraj-google-style"} {"code": "def from_nested_row_splits(cls, flat_values, nested_row_splits, name=None, validate=True):\n if not isinstance(validate, bool):\n raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n if isinstance(nested_row_splits, tensor_lib.Tensor):\n raise TypeError(f'Argument `nested_row_splits` must be a list of Tensors. Received {nested_row_splits}.')\n with ops.name_scope(name, 'RaggedFromNestedRowSplits', [flat_values] + list(nested_row_splits)):\n result = flat_values\n for splits in reversed(nested_row_splits):\n result = cls.from_row_splits(result, splits, validate=validate)\n return result", "docstring": "Creates a `RaggedTensor` from a nested list of `row_splits` tensors.\n\nEquivalent to:\n\n```python\nresult = flat_values\nfor row_splits in reversed(nested_row_splits):\nresult = from_row_splits(result, row_splits)\n```\n\nArgs:\nflat_values: A potentially ragged tensor.\nnested_row_splits: A list of 1-D integer tensors. The `i`th tensor is\nused as the `row_splits` for the `i`th ragged dimension.\nname: A name prefix for the RaggedTensor (optional).\nvalidate: If true, then use assertions to check that the arguments form\na valid `RaggedTensor`. Note: these assertions incur a runtime cost,\nsince they must be checked for each tensor value.\n\nReturns:\nA `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).", "source": "github-repos"} {"code": "def regex(self, *patterns, **kwargs):\n start = kwargs.pop('start', 0)\n stop = kwargs.pop('stop', None)\n keys_only = kwargs.pop('keys_only', False)\n flags = kwargs.pop('flags', 0)\n results = {pattern: [] for pattern in patterns}\n stop = (stop if (stop is not None) else (- 1))\n for (i, line) in enumerate(self[start:stop]):\n for pattern in patterns:\n grps = re.search(pattern, line, flags=flags)\n if (grps and keys_only):\n results[pattern].append(i)\n elif (grps and grps.groups()):\n for group in grps.groups():\n results[pattern].append((i, group))\n elif grps:\n results[pattern].append((i, line))\n if (len(patterns) == 1):\n return results[patterns[0]]\n return results", "docstring": "Search the editor for lines matching the regular expression.\nre.MULTILINE is not currently supported.\n\nArgs:\n\\*patterns: Regular expressions to search each line for\nkeys_only (bool): Only return keys\nflags (re.FLAG): flags passed to re.search\n\nReturns:\nresults (dict): Dictionary of pattern keys, line values (or groups - default)", "source": "codesearchnet"} {"code": "def visit_inner_types(type_constraint, visitor, visitor_arg):\n if isinstance(type_constraint, TypeConstraint):\n return type_constraint.visit(visitor, visitor_arg)\n return visitor(type_constraint, visitor_arg)", "docstring": "Visitor pattern to visit all inner types of a type constraint.\n\nArgs:\ntype_constraint: A type constraint or a type.\nvisitor: A callable invoked for all nodes in the type tree comprising a\ncomposite type. The visitor will be called with the node visited and the\nvisitor argument specified here.\nvisitor_arg: Visitor callback second argument.\n\nNote:\nRaise and capture a StopIteration to terminate the visit, e.g.\n\n```\ndef visitor(type_constraint, visitor_arg):\nif ...:\nraise StopIteration\n\ntry:\nvisit_inner_types(type_constraint, visitor, visitor_arg)\nexcept StopIteration:\npass\n```", "source": "github-repos"} {"code": "def set_room_topic(self, room_id, topic, timestamp=None):\n \n body = {\n \"topic\": topic\n }\n return self.send_state_event(room_id, \"m.room.topic\", body, timestamp=timestamp)", "docstring": "Perform PUT /rooms/$room_id/state/m.room.topic\nArgs:\nroom_id (str): The room ID\ntopic (str): The new room topic\ntimestamp (int): Set origin_server_ts (For application services only)", "source": "juraj-google-style"} {"code": "def clean_output_files(self, follow_parents=True):\n paths = []\n if (self.status != self.S_OK):\n logger.warning('Calling task.clean_output_files on a task whose status != S_OK')\n self.tmpdir.clean()\n except_exts = set()\n for child in self.get_children():\n if (child.status == self.S_OK):\n continue\n i = [dep.node for dep in child.deps].index(self)\n except_exts.update(child.deps[i].exts)\n exts = self.gc.exts.difference(except_exts)\n paths += self.outdir.remove_exts(exts)\n if (not follow_parents):\n return paths\n for parent in self.get_parents():\n ext2nodes = collections.defaultdict(list)\n for child in parent.get_children():\n if (child.status == child.S_OK):\n continue\n i = [d.node for d in child.deps].index(parent)\n for ext in child.deps[i].exts:\n ext2nodes[ext].append(child)\n except_exts = [k for (k, lst) in ext2nodes.items() if lst]\n exts = self.gc.exts.difference(except_exts)\n paths += parent.outdir.remove_exts(exts)\n self.history.info(('Removed files: %s' % paths))\n return paths", "docstring": "This method is called when the task reaches S_OK. It removes all the output files\nproduced by the task that are not needed by its children as well as the output files\nproduced by its parents if no other node needs them.\n\nArgs:\nfollow_parents: If true, the output files of the parents nodes will be removed if possible.\n\nReturn:\nlist with the absolute paths of the files that have been removed.", "source": "codesearchnet"} {"code": "def _distort_color(image, color_ordering=0, scope=None):\n with tf.name_scope(scope, 'distort_color', [image]):\n if (color_ordering == 0):\n image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif (color_ordering == 1):\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n elif (color_ordering == 2):\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n elif (color_ordering == 3):\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))\n else:\n raise ValueError('color_ordering must be in [0, 3]')\n return tf.clip_by_value(image, 0.0, 1.0)", "docstring": "Distort the color of a Tensor image.\n\nEach color distortion is non-commutative and thus ordering of the color ops\nmatters. Ideally we would randomly permute the ordering of the color ops.\nRather then adding that level of complication, we select a distinct ordering\nof color ops for each preprocessing thread.\n\nArgs:\nimage: 3-D Tensor containing single image in [0, 1].\ncolor_ordering: Python int, a type of distortion (valid values: 0-3).\nscope: Optional scope for name_scope.\nReturns:\n3-D Tensor color-distorted image on range [0, 1]\nRaises:\nValueError: if color_ordering not in [0, 3]", "source": "codesearchnet"} {"code": "def pretty_repr_top_level(obj: Any, *, force: bool=False) -> str:\n if isinstance(obj, str):\n return repr(obj)\n elif py_utils.is_namedtuple(obj):\n return Lines.make_block(header=obj.__class__.__name__, content={field_name: getattr(obj, field_name) for field_name in type(obj)._fields})\n elif type(obj) in (list, tuple):\n lines = Lines.make_block(content=obj, braces='[' if isinstance(obj, list) else '(')\n if isinstance(obj, tuple) and len(obj) == 1:\n lines = lines.removesuffix(')') + ',)'\n return lines\n elif type(obj) is dict:\n return Lines.make_block(content={repr(k): v for k, v in obj.items()}, braces='{', equal=': ')\n elif _is_datclass(obj, force=force):\n all_fields = dataclasses.fields(obj)\n return Lines.make_block(header=obj.__class__.__name__, content={field.name: getattr(obj, field.name) for field in all_fields if field.repr})\n elif _is_attr(obj, force=force):\n import attr\n all_fields = attr.fields_dict(type(obj))\n return Lines.make_block(header=obj.__class__.__name__, content={field.name: getattr(obj, field.name) for field in all_fields.values() if field.repr})\n else:\n return repr(obj)", "docstring": "Pretty `repr(obj)` for nested list, dict, dataclasses,...\n\nThis version do not use `@reprlib.recursive_repr()` to avoid bug when used\ninside `__repr__`:\n\n```python\nclass A:\ndef __repr__(self):\nreturn epy.pretty_repr_top_level(self)\n\nepy.pretty_repr(A()) # Do not display `...`\n```\n\nArgs:\nobj: Object to display\nforce: Force the pretty_repr, even if the object has a custom `__repr__`.\nThis is useful when the `__repr__` implementation itself want to call\n`pretty_repr(self)`.\n\nReturns:\nRepr", "source": "github-repos"} {"code": "def __init__(self, context_reading_spec, sequence_reading_spec): \n \n self._context_reading_spec = context_reading_spec\n self._sequence_reading_spec = sequence_reading_spec", "docstring": "Constructs a TFRecordSequenceExampleAdapter.\n\nArgs:\ncontext_reading_spec: `dict`, feature name to tf.FixedLenFeature or\ntf.VarLenFeature for the context features. Passed to\ntf.io.parse_single_sequence_example.\nsequence_reading_spec: `dict`, feature name to tf.FixedLenFeature or\ntf.VarLenFeature for the sequence features. Passed to\ntf.io.parse_single_sequence_example.", "source": "juraj-google-style"} {"code": "def square(times: np.ndarray, amp: complex, period: float, phase: float=0) -> np.ndarray:\n x = ((times / period) + (phase / np.pi))\n return (amp * ((2 * ((2 * np.floor(x)) - np.floor((2 * x)))) + 1).astype(np.complex_))", "docstring": "Continuous square wave.\n\nArgs:\ntimes: Times to output wave for.\namp: Pulse amplitude. Wave range is [-amp, amp].\nperiod: Pulse period, units of dt.\nphase: Pulse phase.", "source": "codesearchnet"} {"code": "def openning(input_rasterfilename, times):\n input_raster = RasterUtilClass.read_raster(input_rasterfilename)\n openning_raster = input_raster\n for i in range(times):\n openning_raster = RasterUtilClass.raster_erosion(openning_raster)\n for i in range(times):\n openning_raster = RasterUtilClass.raster_dilation(openning_raster)\n return openning_raster", "docstring": "Do openning.\n\nOpenning: Erode firstly, then Dilate.\n\nArgs:\ninput_rasterfilename: input original raster image filename.\ntimes: Erode and Dilate times.\n\nReturns:\nopenning_raster: raster image after open.", "source": "codesearchnet"} {"code": "def copy_files(self, files=None, path_patterns=None, symbolic_links=True, root=None, conflicts='fail', **kwargs):\n _files = self.get(return_type='objects', **kwargs)\n if files:\n _files = list(set(files).intersection(_files))\n for f in _files:\n f.copy(path_patterns, symbolic_link=symbolic_links, root=self.root, conflicts=conflicts)", "docstring": "Copies one or more BIDSFiles to new locations defined by each\nBIDSFile's entities and the specified path_patterns.\n\nArgs:\nfiles (list): Optional list of BIDSFile objects to write out. If\nnone provided, use files from running a get() query using\nremaining **kwargs.\npath_patterns (str, list): Write patterns to pass to each file's\nwrite_file method.\nsymbolic_links (bool): Whether to copy each file as a symbolic link\nor a deep copy.\nroot (str): Optional root directory that all patterns are relative\nto. Defaults to current working directory.\nconflicts (str): Defines the desired action when the output path\nalready exists. Must be one of:\n'fail': raises an exception\n'skip' does nothing\n'overwrite': overwrites the existing file\n'append': adds a suffix to each file copy, starting with 1\nkwargs (kwargs): Optional key word arguments to pass into a get()\nquery.", "source": "codesearchnet"} {"code": "def __init__(self, initial_loss_scale=2 ** 15, increment_period=2000, multiplier=2.0):\n super(DynamicLossScale, self).__init__()\n self._initial_loss_scale = float(initial_loss_scale)\n self._increment_period = int(increment_period)\n self._multiplier = float(multiplier)\n self._current_loss_scale = self._add_weight(name='current_loss_scale', dtype=dtypes.float32, initial_value=self._initial_loss_scale)\n self._num_good_steps = self._add_weight(name='good_steps', dtype=dtypes.int64, initial_value=0)", "docstring": "Creates the dynamic loss scale.\n\nArgs:\ninitial_loss_scale: A Python float. The loss scale to use at the\nbeginning. It's better to start this at a very high number, because a\nloss scale that is too high gets lowered far more quickly than a loss\nscale that is too low gets raised. The default is 2 ** 15, which is\napproximately half the maximum float16 value.\nincrement_period: Increases loss scale every `increment_period`\nconsecutive steps that finite gradients are encountered. If a nonfinite\ngradient is encountered, the count is reset back to zero.\nmultiplier: The multiplier to use when increasing or decreasing the loss\nscale.", "source": "github-repos"} {"code": "def to_dict(ramons, flatten=False):\n if (type(ramons) is not list):\n ramons = [ramons]\n out_ramons = {}\n for r in ramons:\n out_ramons[r.id] = {'id': r.id, 'type': _reverse_ramon_types[type(r)], 'metadata': vars(r)}\n return out_ramons", "docstring": "Converts a RAMON object list to a JSON-style dictionary. Useful for going\nfrom an array of RAMONs to a dictionary, indexed by ID.\n\nArguments:\nramons (RAMON[]): A list of RAMON objects\nflatten (boolean: False): Not implemented\n\nReturns:\ndict: A python dictionary of RAMON objects.", "source": "codesearchnet"} {"code": "def _ExpandDirectories(filenames):\n \n expanded = set()\n for filename in filenames:\n if not os.path.isdir(filename):\n expanded.add(filename)\n continue\n\n for root, _, files in os.walk(filename):\n for loopfile in files:\n fullname = os.path.join(root, loopfile)\n if fullname.startswith('.' + os.path.sep):\n fullname = fullname[len('.' + os.path.sep):]\n expanded.add(fullname)\n\n filtered = []\n for filename in expanded:\n if os.path.splitext(filename)[1][1:] in GetAllExtensions():\n filtered.append(filename)\n\n return filtered", "docstring": "Searches a list of filenames and replaces directories in the list with\nall files descending from those directories. Files with extensions not in\nthe valid extensions list are excluded.\n\nArgs:\nfilenames: A list of files or directories\n\nReturns:\nA list of all files that are members of filenames or descended from a\ndirectory in filenames", "source": "juraj-google-style"} {"code": "def _execute(self, command, data=None, unpack=True):\n if (not data):\n data = {}\n data.setdefault('element_id', self.element_id)\n return self._driver._execute(command, data, unpack)", "docstring": "Private method to execute command with data.\n\nArgs:\ncommand(Command): The defined command.\ndata(dict): The uri variable and body.\n\nReturns:\nThe unwrapped value field in the json response.", "source": "codesearchnet"} {"code": "def _get_ssh_config(config_path='~/.ssh/config'):\n ssh_config = paramiko.SSHConfig()\n try:\n with open(os.path.realpath(os.path.expanduser(config_path))) as f:\n ssh_config.parse(f)\n except IOError:\n pass\n return ssh_config", "docstring": "Extract the configuration located at ``config_path``.\n\nReturns:\nparamiko.SSHConfig: the configuration instance.", "source": "codesearchnet"} {"code": "def status(self):\n if self._future.running():\n _status = JobStatus.RUNNING\n elif self._future.cancelled():\n _status = JobStatus.CANCELLED\n elif self._future.done():\n _status = (JobStatus.DONE if (self._future.exception() is None) else JobStatus.ERROR)\n else:\n _status = JobStatus.INITIALIZING\n return _status", "docstring": "Gets the status of the job by querying the Python's future\n\nReturns:\nqiskit.providers.JobStatus: The current JobStatus\n\nRaises:\nJobError: If the future is in unexpected state\nconcurrent.futures.TimeoutError: if timeout occurred.", "source": "codesearchnet"} {"code": "def resolve_attr(obj, path):\n \n if not path:\n return obj\n head, _, tail = path.partition('.')\n head_obj = getattr(obj, head)\n return resolve_attr(head_obj, tail)", "docstring": "A recursive version of getattr for navigating dotted paths.\n\nArgs:\nobj: An object for which we want to retrieve a nested attribute.\npath: A dot separated string containing zero or more attribute names.\n\nReturns:\nThe attribute referred to by obj.a1.a2.a3...\n\nRaises:\nAttributeError: If there is no such attribute.", "source": "juraj-google-style"} {"code": "def svds_descending(M, k):\n \n u, s, vt = svds(M, k=k)\n \n u = u[:, ::-1]\n \n s = s[::-1]\n \n vt = vt[::-1, :]\n return u, np.diag(s), vt.T", "docstring": "In contrast to MATLAB, numpy's svds() arranges the singular\nvalues in ascending order. In order to have matching codes,\nwe wrap it around by a function which re-sorts the singular\nvalues and singular vectors.\nArgs:\nM: 2D numpy array; the matrix whose SVD is to be computed.\nk: Number of singular values to be computed.\n\nReturns:\nu, s, vt = svds(M, k=k)", "source": "juraj-google-style"} {"code": "def preprocess_input(features, target, train_config, preprocess_output_dir,\n model_type):\n \n\n target_name = train_config['target_column']\n key_name = train_config['key_column']\n\n \n \n \n \n \n with tf.name_scope('numerical_feature_preprocess'):\n if train_config['numerical_columns']:\n numerical_analysis_file = os.path.join(preprocess_output_dir,\n NUMERICAL_ANALYSIS)\n if not file_io.file_exists(numerical_analysis_file):\n raise ValueError('File %s not found in %s' %\n (NUMERICAL_ANALYSIS, preprocess_output_dir))\n\n numerical_anlysis = json.loads(\n python_portable_string(\n file_io.read_file_to_string(numerical_analysis_file)))\n\n for name in train_config['numerical_columns']:\n if name == target_name or name == key_name:\n continue\n\n transform_config = train_config['transforms'].get(name, {})\n transform_name = transform_config.get('transform', None)\n if transform_name == 'scale':\n value = float(transform_config.get('value', 1.0))\n features[name] = _scale_tensor(\n features[name],\n range_min=numerical_anlysis[name]['min'],\n range_max=numerical_anlysis[name]['max'],\n scale_min=-value,\n scale_max=value)\n elif transform_name == 'identity' or transform_name is None:\n pass\n else:\n raise ValueError(('For numerical variables, only scale '\n 'and identity are supported: '\n 'Error for %s') % name)\n\n \n if target is not None:\n with tf.name_scope('target_feature_preprocess'):\n if target_name in train_config['categorical_columns']:\n labels = train_config['vocab_stats'][target_name]['labels']\n table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)\n target = table.lookup(target)\n \n\n \n \n with tf.name_scope('categorical_feature_preprocess'):\n for name in train_config['categorical_columns']:\n if name == key_name or name == target_name:\n continue\n transform_config = train_config['transforms'].get(name, {})\n transform_name = transform_config.get('transform', None)\n\n if is_dnn_model(model_type):\n if transform_name == 'embedding' or transform_name == 'one_hot' or transform_name is None:\n map_vocab = True\n else:\n raise ValueError('Unknown transform %s' % transform_name)\n elif is_linear_model(model_type):\n if (transform_name == 'one_hot' or transform_name is None):\n map_vocab = True\n elif transform_name == 'embedding':\n map_vocab = False\n else:\n raise ValueError('Unknown transform %s' % transform_name)\n if map_vocab:\n labels = train_config['vocab_stats'][name]['labels']\n table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)\n features[name] = table.lookup(features[name])\n\n return features, target", "docstring": "Perform some transformations after reading in the input tensors.\n\nArgs:\nfeatures: dict of feature_name to tensor\ntarget: tensor\ntrain_config: our training config object\npreprocess_output_dir: folder should contain the vocab files.\nmodel_type: the tf model type.\n\nRaises:\nValueError: if wrong transforms are used\n\nReturns:\nNew features dict and new target tensor.", "source": "juraj-google-style"} {"code": "def ToStream(value):\n \n ms = StreamManager.GetStream()\n writer = BinaryWriter(ms)\n\n value.Serialize(writer)\n\n retVal = ms.getvalue()\n StreamManager.ReleaseStream(ms)\n\n return retVal", "docstring": "Serialize the given `value` to a an array of bytes.\n\nArgs:\nvalue (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.\n\nReturns:\nbytes: not hexlified", "source": "juraj-google-style"} {"code": "def check_usufy(self, query, **kwargs):\n \n data = self.launchQueryForMode(query=query, mode=\"usufy\")\n if self._somethingFound(data, mode=\"usufy\"):\n return data\n return None", "docstring": "Verifying a mailfy query in this platform.\n\nThis might be redefined in any class inheriting from Platform. The only\ncondition is that any of this should return a dictionary as defined.\n\nArgs:\n-----\nquery: The element to be searched.\nkwargs: Dictionary with extra parameters. Just in case.\n\nReturn:\n-------\nReturns the collected data if exists or None if not.", "source": "juraj-google-style"} {"code": "def split_pair(pair_string, separator, nullable_idx=1):\n \n\n pair = pair_string.split(separator, 1)\n if len(pair) == 1:\n if nullable_idx == 0:\n return [None, pair[0]]\n elif nullable_idx == 1:\n return [pair[0], None]\n else:\n raise IndexError('nullable_idx should be either 0 or 1.')\n else:\n return pair", "docstring": "Split a string into a pair, which can have one empty value.\n\nArgs:\npair_string: The string to be split.\nseparator: The separator to be used for splitting.\nnullable_idx: The location to be set to null if the separator is not in the\ninput string. Should be either 0 or 1.\n\nReturns:\nA list containing the pair.\n\nRaises:\nIndexError: If nullable_idx is not 0 or 1.", "source": "juraj-google-style"} {"code": "def _pool_one_shape(features_2d, area_width, area_height, batch_size, width, height, depth, fn=tf.reduce_max, name=None):\n with tf.name_scope(name, default_name='pool_one_shape'):\n images = []\n for y_shift in range(area_height):\n image_height = tf.maximum((((height - area_height) + 1) + y_shift), 0)\n for x_shift in range(area_width):\n image_width = tf.maximum((((width - area_width) + 1) + x_shift), 0)\n area = features_2d[(:, y_shift:image_height, x_shift:image_width, :)]\n flatten_area = tf.reshape(area, [batch_size, (- 1), depth, 1])\n images.append(flatten_area)\n image_tensor = tf.concat(images, axis=3)\n max_tensor = fn(image_tensor, axis=3)\n return max_tensor", "docstring": "Pools for an area in features_2d.\n\nArgs:\nfeatures_2d: a Tensor in a shape of [batch_size, height, width, depth].\narea_width: the max width allowed for an area.\narea_height: the max height allowed for an area.\nbatch_size: the batch size.\nwidth: the width of the memory.\nheight: the height of the memory.\ndepth: the depth of the features.\nfn: the TF function for the pooling.\nname: the op name.\nReturns:\npool_tensor: A Tensor of shape [batch_size, num_areas, depth]", "source": "codesearchnet"} {"code": "def raise_for_status(\n status: int, headers: MutableMapping, data: MutableMapping\n) -> None:\n \n if status != 200:\n if status == 429:\n\n if isinstance(data, str):\n error = data\n else:\n error = data.get(\"error\", \"ratelimited\")\n\n try:\n retry_after = int(headers.get(\"Retry-After\", 1))\n except ValueError:\n retry_after = 1\n raise exceptions.RateLimited(retry_after, error, status, headers, data)\n else:\n raise exceptions.HTTPException(status, headers, data)", "docstring": "Check request response status\n\nArgs:\nstatus: Response status\nheaders: Response headers\ndata: Response data\n\nRaises:\n:class:`slack.exceptions.RateLimited`: For 429 status code\n:class:`slack.exceptions:HTTPException`:", "source": "juraj-google-style"} {"code": "def annotate_and_average(self, gpl, expression_column, group_by_column, rename=True, force=False, merge_on_column=None, gsm_on=None, gpl_on=None):\n if ((gpl.name != self.metadata['platform_id'][0]) and (not force)):\n raise KeyError((('Platforms from GSM (%s) and from GPL (%s)' % (gpl.name, self.metadata['platform_id'])) + ' are incompatible. Use force=True to use this GPL.'))\n if ((merge_on_column is None) and (gpl_on is None) and (gsm_on is None)):\n raise Exception('You have to provide one of the two: merge_on_column or gpl_on and gsm_on parameters')\n if merge_on_column:\n logger.info('merge_on_column is not None. Using this option.')\n tmp_data = self.table.merge(gpl.table, on=merge_on_column, how='outer')\n tmp_data = tmp_data.groupby(group_by_column).mean()[[expression_column]]\n else:\n if ((gpl_on is None) or (gsm_on is None)):\n raise Exception('Please provide both gpl_on and gsm_on or provide merge_on_column only')\n tmp_data = self.table.merge(gpl.table, left_on=gsm_on, right_on=gpl_on, how='outer')\n tmp_data = tmp_data.groupby(group_by_column).mean()[[expression_column]]\n if rename:\n tmp_data.columns = [self.name]\n return tmp_data", "docstring": "Annotate GSM table with provided GPL.\n\nArgs:\ngpl (:obj:`GEOTypes.GPL`): Platform for annotations\nexpression_column (:obj:`str`): Column name which \"expressions\"\nare represented\ngroup_by_column (:obj:`str`): The data will be grouped and averaged\nover this column and only this column will be kept\nrename (:obj:`bool`): Rename output column to the\nself.name. Defaults to True.\nforce (:obj:`bool`): If the name of the GPL does not match the platform\nname in GSM proceed anyway. Defaults to False.\nmerge_on_column (:obj:`str`): Column to merge the data\non. Defaults to None.\ngsm_on (:obj:`str`): In the case columns to merge are different in GSM\nand GPL use this column in GSM. Defaults to None.\ngpl_on (:obj:`str`): In the case columns to merge are different in GSM\nand GPL use this column in GPL. Defaults to None.\n\nReturns:\n:obj:`pandas.DataFrame`: Annotated data", "source": "codesearchnet"} {"code": "def coalesce(*series):\n series = [pd.Series(s) for s in series]\n coalescer = pd.concat(series, axis=1)\n min_nonna = np.argmin(pd.isnull(coalescer).values, axis=1)\n min_nonna = [coalescer.columns[i] for i in min_nonna]\n return coalescer.lookup(np.arange(coalescer.shape[0]), min_nonna)", "docstring": "Takes the first non-NaN value in order across the specified series,\nreturning a new series. Mimics the coalesce function in dplyr and SQL.\n\nArgs:\n*series: Series objects, typically represented in their symbolic form\n(like X.series).\n\nExample:\ndf = pd.DataFrame({\n'a':[1,np.nan,np.nan,np.nan,np.nan],\n'b':[2,3,np.nan,np.nan,np.nan],\n'c':[np.nan,np.nan,4,5,np.nan],\n'd':[6,7,8,9,np.nan]\n})\ndf >> transmute(coal=coalesce(X.a, X.b, X.c, X.d))\n\ncoal\n0 1\n1 3\n2 4\n3 5\n4 np.nan", "source": "codesearchnet"} {"code": "def make_parser():\n parser = argparse.ArgumentParser(usage='%(prog)s [options] input [input ...]')\n parser.register('action', 'flatten', _FlattenAction)\n modes = parser.add_mutually_exclusive_group()\n modes.add_argument('--tree', dest='tree', action='store_true', default=False, help='Display import tree.')\n modes.add_argument('--unresolved', dest='unresolved', action='store_true', default=False, help='Display unresolved dependencies.')\n modes.add_argument('--generate-config', dest='generate_config', type=str, action='store', default='', help='Write out a dummy configuration file.')\n parser.add_argument('-v', '--verbosity', dest='verbosity', type=int, action='store', default=1, help='Set logging level: 0=ERROR, 1=WARNING (default), 2=INFO.')\n parser.add_argument('--config', dest='config', type=str, action='store', default='', help='Configuration file.')\n parser.add_argument('--version', action='store_true', dest='version', default=None, help='Display pytype version and exit.')\n types = config.make_converters()\n for option in [(('-x', '--exclude'), {'nargs': '*', 'action': 'flatten'}), (('inputs',), {'metavar': 'input', 'nargs': '*', 'action': 'flatten'}), (('-k', '--keep-going'), {'action': 'store_true', 'type': None}), (('-j', '--jobs'), {'action': 'store', 'metavar': 'N'}), (('--platform',),), (('-P', '--pythonpath'),), (('-V', '--python-version'),)]:\n _add_file_argument(parser, types, *option)\n output = parser.add_mutually_exclusive_group()\n _add_file_argument(output, types, ('-o', '--output'))\n output.add_argument('-n', '--no-cache', dest='no_cache', action='store_true', default=False, help='Send pytype output to a temporary directory.')\n wrapper = datatypes.ParserWrapper(parser)\n pytype_config.add_basic_options(wrapper)\n pytype_config.add_feature_flags(wrapper)\n return Parser(parser, pytype_single_args=wrapper.actions)", "docstring": "Make parser for command line args.\n\nReturns:\nA Parser object.", "source": "github-repos"} {"code": "def decode_jwt_payload(self, access_token=None):\n \n c = self.get_credentials()\n jwt = access_token or c.access_token\n try:\n _, payload, _ = jwt.split('.') \n rem = len(payload) % 4\n if rem > 0: \n payload += '=' * (4 - rem)\n try:\n decoded_jwt = b64decode(payload).decode(\"utf-8\")\n except TypeError as e:\n raise PanCloudError(\n \"Failed to base64 decode JWT: %s\" % e)\n else:\n try:\n x = loads(decoded_jwt)\n except ValueError as e:\n raise PanCloudError(\"Invalid JSON: %s\" % e)\n except (AttributeError, ValueError) as e:\n raise PanCloudError(\"Invalid JWT: %s\" % e)\n\n return x", "docstring": "Extract payload field from JWT.\n\nArgs:\naccess_token (str): Access token to decode. Defaults to ``None``.\n\nReturns:\ndict: JSON object that contains the claims conveyed by the JWT.", "source": "juraj-google-style"} {"code": "def iplot_state_qsphere(rho, figsize=None):\n html_template = Template('\\n

\\n

\\n
\\n
\\n

\\n ')\n javascript_template = Template('\\n