{"code": "def in_flight_request_count(self, node_id=None):\n \n if node_id is not None:\n conn = self._conns.get(node_id)\n if conn is None:\n return 0\n return len(conn.in_flight_requests)\n else:\n return sum([len(conn.in_flight_requests)\n for conn in list(self._conns.values())])", "docstring": "Get the number of in-flight requests for a node or all nodes.\n\nArguments:\nnode_id (int, optional): a specific node to check. If unspecified,\nreturn the total for all nodes\n\nReturns:\nint: pending in-flight requests for the node, or all nodes if None", "source": "juraj-google-style"}
{"code": "def set_size(self, w, h):\n self.attributes['width'] = str(w)\n self.attributes['height'] = str(h)", "docstring": "Sets the rectangle size.\n\nArgs:\nw (int): width of the rectangle\nh (int): height of the rectangle", "source": "codesearchnet"}
{"code": "def validate_stats(stats_path, schema_path, anomalies_path):\n print('Validating schema against the computed statistics.')\n schema = taxi.read_schema(schema_path)\n stats = tfdv.load_statistics(stats_path)\n anomalies = tfdv.validate_statistics(stats, schema)\n print('Detected following anomalies:')\n print(text_format.MessageToString(anomalies))\n print('Writing anomalies to anomalies path.')\n file_io.write_string_to_file(anomalies_path, text_format.MessageToString(anomalies))", "docstring": "Validates the statistics against the schema and materializes anomalies.\n\nArgs:\nstats_path: Location of the stats used to infer the schema.\nschema_path: Location of the schema to be used for validation.\nanomalies_path: Location where the detected anomalies are materialized.", "source": "github-repos"}
{"code": "def failed_rows_with_errors(self) -> PCollection[Tuple[str, dict, list]]:\n self.validate([WriteToBigQuery.Method.STREAMING_INSERTS, WriteToBigQuery.Method.STORAGE_WRITE_API], 'FAILED_ROWS_WITH_ERRORS')\n return self._failed_rows_with_errors", "docstring": "A ``[STREAMING_INSERTS, STORAGE_WRITE_API]`` method attribute\n\nReturns:\nA PCollection of rows that failed when inserting to BigQuery,\nalong with their errors.\n\nRaises:\nAttributeError: if accessed with a write method\nbesides ``[STREAMING_INSERTS, STORAGE_WRITE_API]``.", "source": "github-repos"}
{"code": "def run(self, dag):\n \n \n for node in dag.op_nodes():\n basic_insts = ['measure', 'reset', 'barrier', 'snapshot']\n if node.name in basic_insts:\n \n \n \n continue\n if node.name in self.basis: \n continue\n\n \n rule = node.op.definition\n if not rule:\n raise QiskitError(\"Cannot unroll the circuit to the given basis, %s. \"\n \"No rule to expand instruction %s.\" %\n (str(self.basis), node.op.name))\n\n \n \n decomposition = DAGCircuit()\n decomposition.add_qreg(rule[0][1][0][0])\n for inst in rule:\n decomposition.apply_operation_back(*inst)\n\n unrolled_dag = self.run(decomposition) \n dag.substitute_node_with_dag(node, unrolled_dag)\n return dag", "docstring": "Expand all op nodes to the given basis.\n\nArgs:\ndag(DAGCircuit): input dag\n\nRaises:\nQiskitError: if unable to unroll given the basis due to undefined\ndecomposition rules (such as a bad basis) or excessive recursion.\n\nReturns:\nDAGCircuit: output unrolled dag", "source": "juraj-google-style"}
{"code": "def _make_static_axis_non_negative_list(axis, ndims):\n axis = distribution_util.make_non_negative_axis(axis, ndims)\n axis_const = tf.get_static_value(axis)\n if (axis_const is None):\n raise ValueError(('Expected argument `axis` to be statically available. Found: %s' % axis))\n axis = (axis_const + np.zeros([1], dtype=axis_const.dtype))\n return list((int(dim) for dim in axis))", "docstring": "Convert possibly negatively indexed axis to non-negative list of ints.\n\nArgs:\naxis: Integer Tensor.\nndims: Number of dimensions into which axis indexes.\n\nReturns:\nA list of non-negative Python integers.\n\nRaises:\nValueError: If `axis` is not statically defined.", "source": "codesearchnet"}
{"code": "def _dump(self):\n return {'topic': self.topic, 'headers': self._headers, 'id': self.id, 'body': self.body, 'queue': self.queue}", "docstring": "Dump message attributes.\n\nReturns:\ndict: A dictionary of message attributes.", "source": "codesearchnet"}
{"code": "def quad_genz_keister_18(order):\n order = sorted(GENZ_KEISTER_18.keys())[order]\n (abscissas, weights) = GENZ_KEISTER_18[order]\n abscissas = numpy.array(abscissas)\n weights = numpy.array(weights)\n weights /= numpy.sum(weights)\n abscissas *= numpy.sqrt(2)\n return (abscissas, weights)", "docstring": "Hermite Genz-Keister 18 rule.\n\nArgs:\norder (int):\nThe quadrature order. Must be in the interval (0, 8).\n\nReturns:\n(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):\nAbscissas and weights\n\nExamples:\n>>> abscissas, weights = quad_genz_keister_18(1)\n>>> print(numpy.around(abscissas, 4))\n[-1.7321 0. 1.7321]\n>>> print(numpy.around(weights, 4))\n[0.1667 0.6667 0.1667]", "source": "codesearchnet"}
{"code": "def _buckets_nearly_equal(a_dist, b_dist):\n \n a_type, a_buckets = _detect_bucket_option(a_dist)\n b_type, b_buckets = _detect_bucket_option(b_dist)\n if a_type != b_type:\n return False\n elif a_type == u'linearBuckets':\n return _linear_buckets_nearly_equal(a_buckets, b_buckets)\n elif a_type == u'exponentialBuckets':\n return _exponential_buckets_nearly_equal(a_buckets, b_buckets)\n elif a_type == u'explicitBuckets':\n return _explicit_buckets_nearly_equal(a_buckets, b_buckets)\n else:\n return False", "docstring": "Determines whether two `Distributions` are nearly equal.\n\nArgs:\na_dist (:class:`Distribution`): an instance\nb_dist (:class:`Distribution`): another instance\n\nReturn:\nboolean: `True` if the two instances are approximately equal, otherwise\nFalse", "source": "juraj-google-style"}
{"code": "def __init__(self, dist_cls_a, dist_cls_b):\n self._key = (dist_cls_a, dist_cls_b)", "docstring": "Initialize the KL registrar.\n\nArgs:\ndist_cls_a: the class of the first argument of the KL divergence.\ndist_cls_b: the class of the second argument of the KL divergence.", "source": "github-repos"}
{"code": "def add_values_to_bundle_safe(connection, bundle, values):\n \n for value in values:\n try:\n connection.addValueToBundle(bundle, value)\n except YouTrackException as e:\n if e.response.status == 409:\n print(\"Value with name [ %s ] already exists in bundle [ %s ]\" %\n (utf8encode(value.name), utf8encode(bundle.name)))\n else:\n raise e", "docstring": "Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.\n\nArgs:\nconnection: An opened Connection instance.\nbundle: Bundle instance to add values in.\nvalues: Values, that should be added in bundle.\n\nRaises:\nYouTrackException: if something is wrong with queries.", "source": "juraj-google-style"}
{"code": "def plot_script(self, script):\n \n\n script.plot([self.matplotlibwidget_1.figure, self.matplotlibwidget_2.figure])\n self.matplotlibwidget_1.draw()\n self.matplotlibwidget_2.draw()", "docstring": "Calls the plot function of the script, and redraws both plots\nArgs:\nscript: script to be plotted", "source": "juraj-google-style"}
{"code": "def set_servo_position(self, goalposition, goaltime, led):\n \n goalposition_msb = int(goalposition) >> 8\n goalposition_lsb = int(goalposition) & 0xff\n\n data = []\n data.append(0x0C)\n data.append(self.servoid)\n data.append(I_JOG_REQ)\n data.append(goalposition_lsb)\n data.append(goalposition_msb)\n data.append(led)\n data.append(self.servoid)\n data.append(goaltime)\n send_data(data)", "docstring": "Set the position of Herkulex\n\nEnable torque using torque_on function before calling this\n\nArgs:\n\ngoalposition (int): The desired position, min-0 & max-1023\ngoaltime (int): the time taken to move from present\nposition to goalposition\nled (int): the LED color\n0x00 LED off\n0x04 GREEN\n0x08 BLUE\n0x10 RED", "source": "juraj-google-style"}
{"code": "def __init__(self, counter_name, delta=1):\n \n self.counter_name = counter_name\n self.delta = delta", "docstring": "Constructor.\n\nArgs:\ncounter_name: name of the counter as string\ndelta: increment delta as int.", "source": "juraj-google-style"}
{"code": "def tent_transform(value: types.FloatTensor) -> types.FloatTensor:\n return tf.where(value < 0.5, 2 * value, 2 * (1 - value))", "docstring": "Returns the tent transform of a given `Tensor`.\n\n#### Examples\n\n```python\nimport tensorflow as tf\nimport tf_quant_finance as tff\n\n# Example: Commputing the tent transform of a given vector.\n\ntff.math.qmc.utils.tent_transform(tf.constant([0, .2, .4, .6, .8, 1]))\n# ==> tf.Tensor([0, .4, .8, .8, .4, 0.], shape=(4,), dtype=float32)\n```\n\nArgs:\nvalue: Scalar `Tensor` of real values in the `[0, 1)` range.\n\nReturns:\n`Tensor` with the same `shape` as `value` equal to `2 ** value` if `value`\nis less than `0.5` or `2 * (1 - value)` otherwise.", "source": "github-repos"}
{"code": "def list_media_services(access_token, subscription_id):\n \n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])\n return do_get(endpoint, access_token)", "docstring": "List the media services in a subscription.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\n\nReturns:\nHTTP response. JSON body.", "source": "juraj-google-style"}
{"code": "def icon_description(self, **kwargs):\n params = {'language': util.language_code(kwargs.get('lang'))}\n result = self.make_request('icon_description', {}, **params)\n if (not util.check_result(result)):\n return (False, result.get('message', 'UNKNOWN ERROR'))\n values = util.response_list(result, 'Data')\n return (True, [emtype.IconDescription(**a) for a in values])", "docstring": "Obtain a list of elements that have an associated icon.\n\nArgs:\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[IconDescription]), or\nmessage string in case of error.", "source": "codesearchnet"}
{"code": "def has_succeed(self):\n status_code = self._response.status_code\n if (status_code in [HTTP_CODE_ZERO, HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY, HTTP_CODE_MULTIPLE_CHOICES]):\n return True\n if (status_code in [HTTP_CODE_BAD_REQUEST, HTTP_CODE_UNAUTHORIZED, HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_NOT_FOUND, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_CONNECTION_TIMEOUT, HTTP_CODE_CONFLICT, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_INTERNAL_SERVER_ERROR, HTTP_CODE_SERVICE_UNAVAILABLE]):\n return False\n raise Exception('Unknown status code %s.', status_code)", "docstring": "Check if the connection has succeed\n\nReturns:\nReturns True if connection has succeed.\nFalse otherwise.", "source": "codesearchnet"}
{"code": "def train_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None):\n inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)\n outs, total_loss, output_losses, masks = _process_single_batch(model, inputs, targets, sample_weights=sample_weights, training=True, output_loss_metrics=output_loss_metrics)\n if not isinstance(outs, list):\n outs = [outs]\n metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks)\n total_loss = nest.flatten(total_loss)\n return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}", "docstring": "Calculates the loss and gradient updates for one input batch.\n\nArgs:\nmodel: Model whose loss has to be calculated.\ninputs: Input batch data.\ntargets: Target batch data.\nsample_weights: Sample weight batch data.\noutput_loss_metrics: List of metrics that are used to aggregated output\nloss values.\n\nReturns:\nDict with three items:\n'total_loss': list with a single tensor for overall loss,\n'output_losses': list of tensors for loss corresponding to each of the\nmodel output. Could be a empty list when model has only one output.\n'metrics': list of tensors for metric specified.", "source": "github-repos"}
{"code": "def loads(s, single=False):\n corpus = etree.fromstring(s)\n if single:\n ds = _deserialize_dmrs(next(iter(corpus)))\n else:\n ds = (_deserialize_dmrs(dmrs_elem) for dmrs_elem in corpus)\n return ds", "docstring": "Deserialize DMRX string representations\n\nArgs:\ns (str): a DMRX string\nsingle (bool): if `True`, only return the first Xmrs object\nReturns:\na generator of Xmrs objects (unless *single* is `True`)", "source": "codesearchnet"}
{"code": "def verify_reset_restored_iterator(self, ds_fn, num_outputs, break_point=None, sparse_tensors=False, verify_exhausted=True, assert_items_equal=False):\n if context.executing_eagerly():\n self.skipTest('Eager mode iteration do not support re-initialization.')\n break_point = num_outputs \n expected = self.gen_outputs(ds_fn, [], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted)\n self.gen_outputs(ds_fn, [], break_point, sparse_tensors=sparse_tensors, verify_exhausted=False)\n actual = []\n with ops.Graph().as_default() as g:\n saver = self._import_meta_graph()\n init_op, get_next_op = self._get_iterator_ops_from_collection(ds_fn, sparse_tensors=sparse_tensors)\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n self._initialize(init_op, sess)\n self._restore(saver, sess)\n self._initialize(init_op, sess)\n for _ in range(num_outputs):\n actual.append(sess.run(get_next_op))\n if verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n self.match(expected, actual, assert_items_equal=assert_items_equal)", "docstring": "Attempts to re-initialize a restored iterator.\n\nThis is useful when restoring a training checkpoint during validation.\n\nArgs:\nds_fn: 0-argument function that returns a Dataset.\nnum_outputs: Total number of outputs expected from this Dataset.\nbreak_point: Break point. Optional. Defaults to num_outputs/2.\nsparse_tensors: Whether dataset is built from SparseTensor(s).\nverify_exhausted: Whether to verify that the iterator has been exhausted\nafter producing `num_outputs` elements.\nassert_items_equal: Tests the output has the expected elements regardless\nof order.\n\nRaises:\nAssertionError if any test fails.", "source": "github-repos"}
{"code": "class RandomInvert(BaseImagePreprocessingLayer):\n _USE_BASE_FACTOR = False\n _FACTOR_BOUNDS = (0, 1)\n\n def __init__(self, factor=1.0, value_range=(0, 255), seed=None, data_format=None, **kwargs):\n super().__init__(data_format=data_format, **kwargs)\n self._set_factor(factor)\n self.value_range = value_range\n self.seed = seed\n self.generator = self.backend.random.SeedGenerator(seed)\n\n def get_random_transformation(self, data, training=True, seed=None):\n if not training:\n return None\n if isinstance(data, dict):\n images = data['images']\n else:\n images = data\n seed = seed or self._get_seed_generator(self.backend._backend)\n images_shape = self.backend.shape(images)\n rank = len(images_shape)\n if rank == 3:\n batch_size = 1\n elif rank == 4:\n batch_size = images_shape[0]\n else:\n raise ValueError(f'Expected the input image to be rank 3 or 4. Received inputs.shape={images_shape}')\n invert_probability = self.backend.random.uniform(shape=(batch_size,), minval=self.factor[0], maxval=self.factor[1], seed=seed)\n random_threshold = self.backend.random.uniform(shape=(batch_size,), minval=0, maxval=1, seed=seed)\n apply_inversion = random_threshold < invert_probability\n return {'apply_inversion': apply_inversion}\n\n def transform_images(self, images, transformation, training=True):\n if training:\n images = self.backend.cast(images, self.compute_dtype)\n apply_inversion = transformation['apply_inversion']\n return self.backend.numpy.where(apply_inversion[:, None, None, None], self.value_range[1] - images, images)\n return images\n\n def transform_labels(self, labels, transformation, training=True):\n return labels\n\n def transform_bounding_boxes(self, bounding_boxes, transformation, training=True):\n return bounding_boxes\n\n def transform_segmentation_masks(self, segmentation_masks, transformation, training=True):\n return segmentation_masks\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'factor': self.factor, 'value_range': self.value_range, 'seed': self.seed}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Preprocessing layer for random inversion of image colors.\n\nThis layer randomly inverts the colors of input images with a specified\nprobability range. When applied, each image has a chance of having its\ncolors inverted, where the pixel values are transformed to their\ncomplementary values. Images that are not selected for inversion\nremain unchanged.\n\nArgs:\nfactor: A single float or a tuple of two floats.\n`factor` controls the probability of inverting the image colors.\nIf a tuple is provided, the value is sampled between the two values\nfor each image, where `factor[0]` is the minimum and `factor[1]` is\nthe maximum probability. If a single float is provided, a value\nbetween `0.0` and the provided float is sampled.\nDefaults to `(0, 1)`.\nvalue_range: a tuple or a list of two elements. The first value\nrepresents the lower bound for values in passed images, the second\nrepresents the upper bound. Images passed to the layer should have\nvalues within `value_range`. Defaults to `(0, 255)`.\nseed: Integer. Used to create a random seed.", "source": "github-repos"}
{"code": "def scroll(self, x, y):\n assert isinstance(x, _INTTYPES), ('x must be an integer, got %s' % repr(x))\n assert isinstance(y, _INTTYPES), ('y must be an integer, got %s' % repr(x))\n\n def getSlide(x, length):\n 'get the parameters needed to scroll the console in the given\\n direction with x\\n returns (x, length, srcx)\\n '\n if (x > 0):\n srcx = 0\n length -= x\n elif (x < 0):\n srcx = abs(x)\n x = 0\n length -= srcx\n else:\n srcx = 0\n return (x, length, srcx)\n\n def getCover(x, length):\n 'return the (x, width) ranges of what is covered and uncovered'\n cover = (0, length)\n uncover = None\n if (x > 0):\n cover = (x, (length - x))\n uncover = (0, x)\n elif (x < 0):\n x = abs(x)\n cover = (0, (length - x))\n uncover = ((length - x), x)\n return (cover, uncover)\n (width, height) = self.get_size()\n if ((abs(x) >= width) or (abs(y) >= height)):\n return self.clear()\n (coverX, uncoverX) = getCover(x, width)\n (coverY, uncoverY) = getCover(y, height)\n (x, width, srcx) = getSlide(x, width)\n (y, height, srcy) = getSlide(y, height)\n self.blit(self, x, y, width, height, srcx, srcy)\n if uncoverX:\n self.draw_rect(uncoverX[0], coverY[0], uncoverX[1], coverY[1], 32, self._fg, self._bg)\n if uncoverY:\n self.draw_rect(coverX[0], uncoverY[0], coverX[1], uncoverY[1], 32, self._fg, self._bg)\n if (uncoverX and uncoverY):\n self.draw_rect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1], 32, self._fg, self._bg)", "docstring": "Scroll the contents of the console in the direction of x,y.\n\nUncovered areas will be cleared to the default background color.\nDoes not move the virutal cursor.\n\nArgs:\nx (int): Distance to scroll along the x-axis.\ny (int): Distance to scroll along the y-axis.\n\nReturns:\nIterator[Tuple[int, int]]: An iterator over the (x, y) coordinates\nof any tile uncovered after scrolling.\n\n.. seealso:: :any:`set_colors`", "source": "codesearchnet"}
{"code": "def clean_decodes(ids, vocab_size, eos_id=1):\n ret = []\n for i in ids:\n if (i == eos_id):\n break\n if (i >= vocab_size):\n break\n ret.append(int(i))\n return ret", "docstring": "Stop at EOS or padding or OOV.\n\nArgs:\nids: a list of integers\nvocab_size: an integer\neos_id: EOS id\n\nReturns:\na list of integers", "source": "codesearchnet"}
{"code": "def pivot_and_annotate(self, values, gpl, annotation_column, gpl_on='ID', gsm_on='ID_REF'):\n if isinstance(gpl, GPL):\n annotation_table = gpl.table\n elif isinstance(gpl, DataFrame):\n annotation_table = gpl\n else:\n raise TypeError('gpl should be a GPL object or a pandas.DataFrame')\n pivoted_samples = self.pivot_samples(values=values, index=gsm_on)\n ndf = pivoted_samples.reset_index().merge(annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on).set_index(gsm_on)\n del ndf[gpl_on]\n ndf.columns.name = 'name'\n return ndf", "docstring": "Annotate GSM with provided GPL.\n\nArgs:\nvalues (:obj:`str`): Column to use as values eg. \"VALUES\"\ngpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or\nDataFrame to annotate with.\nannotation_column (:obj:`str`): Column in table for annotation.\ngpl_on (:obj:`str`, optional): Use this column in GPL to merge.\nDefaults to \"ID\".\ngsm_on (:obj:`str`, optional): Use this column in GSM to merge.\nDefaults to \"ID_REF\".\n\nReturns:\npandas.DataFrame: Pivoted and annotated table of results", "source": "codesearchnet"}
{"code": "def __init__(self, num_packs=1):\n if num_packs < 0:\n raise ValueError('NCCL all-reduce requires num_packs >= 0, but {} is specified'.format(num_packs))\n super(NcclAllReduce, self).__init__(all_reduce_alg='nccl', num_packs=num_packs)", "docstring": "Initializes the object.\n\nArgs:\nnum_packs: a non-negative integer. The number of packs to split values\ninto. If zero, no packing will be done.\n\nRaises:\nValueError: if `num_packs` is negative.", "source": "github-repos"}
{"code": "def _create_initial_state(self, initial_ids, initial_cache):\n \n \n cur_index = tf.constant(0)\n\n \n alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)\n alive_seq = tf.expand_dims(alive_seq, axis=2)\n\n \n \n initial_log_probs = tf.constant(\n [[0.] + [-float(\"inf\")] * (self.beam_size - 1)])\n alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])\n\n \n \n alive_cache = nest.map_structure(\n lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)\n\n \n finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)\n\n \n finished_scores = tf.ones([self.batch_size, self.beam_size]) * -INF\n\n \n finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)\n\n \n state = {\n _StateKeys.CUR_INDEX: cur_index,\n _StateKeys.ALIVE_SEQ: alive_seq,\n _StateKeys.ALIVE_LOG_PROBS: alive_log_probs,\n _StateKeys.ALIVE_CACHE: alive_cache,\n _StateKeys.FINISHED_SEQ: finished_seq,\n _StateKeys.FINISHED_SCORES: finished_scores,\n _StateKeys.FINISHED_FLAGS: finished_flags\n }\n\n \n \n \n \n \n state_shape_invariants = {\n _StateKeys.CUR_INDEX: tf.TensorShape([]),\n _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]),\n _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]),\n _StateKeys.ALIVE_CACHE: nest.map_structure(\n _get_shape_keep_last_dim, alive_cache),\n _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]),\n _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]),\n _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])\n }\n\n return state, state_shape_invariants", "docstring": "Return initial state dictionary and its shape invariants.\n\nArgs:\ninitial_ids: initial ids to pass into the symbols_to_logits_fn.\nint tensor with shape [batch_size, 1]\ninitial_cache: dictionary storing values to be passed into the\nsymbols_to_logits_fn.\n\nReturns:\nstate and shape invariant dictionaries with keys from _StateKeys", "source": "juraj-google-style"}
{"code": "def variables(self):\n current_graph = ops.get_default_graph()\n\n def _from_current_graph(variable):\n if variable._in_graph_mode:\n return variable.op.graph is current_graph\n else:\n return variable._graph_key == current_graph._graph_key\n optimizer_variables = [v for v in self._non_slot_variables() if _from_current_graph(v)]\n for _, variable_dict in self._slots.items():\n for _, slot_for_variable in variable_dict.items():\n if _from_current_graph(slot_for_variable):\n optimizer_variables.append(slot_for_variable)\n return sorted(optimizer_variables, key=lambda v: v.name)", "docstring": "A list of variables which encode the current state of `Optimizer`.\n\nIncludes slot variables and additional global variables created by the\noptimizer in the current default graph.\n\nReturns:\nA list of variables.", "source": "github-repos"}
{"code": "def _get_bond_data(line):\n line = line.split()\n length = float(line[2])\n sites = line[0].replace('/', '-').split('-')\n site_indices = tuple(((int(ind) - 1) for ind in sites[1:4:2]))\n species = tuple((re.split('\\\\d+', spec)[0] for spec in sites[0:3:2]))\n label = ('%s%d-%s%d' % (species[0], (site_indices[0] + 1), species[1], (site_indices[1] + 1)))\n return (label, length, site_indices)", "docstring": "Subroutine to extract bond label, site indices, and length from\na COPL header line. The site indices are zero-based, so they\ncan be easily used with a Structure object.\n\nExample header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.\n\nArgs:\nline: line in the COHPCAR header describing the bond.\n\nReturns:\nThe bond label, the bond length and a tuple of the site\nindices.", "source": "codesearchnet"}
{"code": "def get_dns_zone_ids(env='dev', facing='internal'):\n client = boto3.Session(profile_name=env).client('route53')\n zones = client.list_hosted_zones_by_name(DNSName='.'.join([env, DOMAIN]))\n zone_ids = []\n for zone in zones['HostedZones']:\n LOG.debug('Found Hosted Zone: %s', zone)\n if ((facing == 'external') or zone['Config']['PrivateZone']):\n LOG.info('Using %(Id)s for \"%(Name)s\", %(Config)s', zone)\n zone_ids.append(zone['Id'])\n LOG.debug('Zone IDs: %s', zone_ids)\n return zone_ids", "docstring": "Get Route 53 Hosted Zone IDs for _env_.\n\nArgs:\nenv (str): Deployment environment.\nfacing (str): Type of ELB, external or internal.\n\nReturns:\nlist: Hosted Zone IDs for _env_. Only *PrivateZone* when _facing_ is\ninternal.", "source": "codesearchnet"}
{"code": "def _mutation(candidate, rate=0.1):\n sample_index = np.random.choice(len(candidate))\n sample = candidate[sample_index]\n idx_list = []\n for i in range(int(max((len(sample) * rate), 1))):\n idx = np.random.choice(len(sample))\n idx_list.append(idx)\n field = sample[idx]\n field[np.argmax(field)] = 0\n bit = np.random.choice(field.shape[0])\n field[bit] = 1\n logger.info((LOGGING_PREFIX + 'Perform mutation on %sth at index=%s'), sample_index, str(idx_list))\n return sample", "docstring": "Perform mutation action to candidates.\n\nFor example, randomly change 10% of original sample\n\nArgs:\ncandidate: List of candidate genes (encodings).\nrate: Percentage of mutation bits\n\nExamples:\n>>> # Genes that represent 3 parameters\n>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])\n>>> new_gene = _mutation([gene1])\n>>> # new_gene could be the gene1 with the 3rd parameter changed\n>>> # new_gene[0] = gene1[0]\n>>> # new_gene[1] = gene1[1]\n>>> # new_gene[2] = [0, 1] != gene1[2]\n\nReturns:\nNew gene (encoding)", "source": "codesearchnet"}
{"code": "def build_listen(self, listen_node):\n \n proxy_name = listen_node.listen_header.proxy_name.text\n service_address_node = listen_node.listen_header.service_address\n\n \n config_block_lines = self.__build_config_block(\n listen_node.config_block)\n\n \n host, port = '', ''\n if isinstance(service_address_node, pegnode.ServiceAddress):\n host = service_address_node.host.text\n port = service_address_node.port.text\n else:\n \n \n for line in config_block_lines:\n if isinstance(line, config.Bind):\n host, port = line.host, line.port\n break\n else:\n raise Exception(\n 'Not specify host and port in `listen` definition')\n return config.Listen(\n name=proxy_name, host=host, port=port,\n config_block=config_block_lines)", "docstring": "parse `listen` sections, and return a config.Listen\n\nArgs:\nlisten_node (TreeNode): Description\n\nReturns:\nconfig.Listen: an object", "source": "juraj-google-style"}
{"code": "def parent_index(self, relations=None):\n g = None\n if (relations is None):\n g = self.get_graph()\n else:\n g = self.get_filtered_graph(relations)\n l = []\n for n in g:\n l.append(([n] + list(g.predecessors(n))))\n return l", "docstring": "Returns a mapping of nodes to all direct parents\n\nArguments\n---------\nrelations : list[str]\nlist of relations used to filter\n\nReturns:\nlist\nlist of lists [[CLASS_1, PARENT_1,1, ..., PARENT_1,N], [CLASS_2, PARENT_2,1, PARENT_2,2, ... ] ... ]", "source": "codesearchnet"}
{"code": "def remove(text, exclude):\n exclude = ''.join((str(symbol) for symbol in exclude))\n return text.translate(str.maketrans('', '', exclude))", "docstring": "Remove ``exclude`` symbols from ``text``.\n\nExample:\n>>> remove(\"example text\", string.whitespace)\n'exampletext'\n\nArgs:\ntext (str): The text to modify\nexclude (iterable): The symbols to exclude\n\nReturns:\n``text`` with ``exclude`` symbols removed", "source": "codesearchnet"}
{"code": "def _ParseEntryArrayObject(self, file_object, file_offset):\n entry_array_object_map = self._GetDataTypeMap('systemd_journal_entry_array_object')\n try:\n (entry_array_object, _) = self._ReadStructureFromFileObject(file_object, file_offset, entry_array_object_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError('Unable to parse entry array object at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))\n if (entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY):\n raise errors.ParseError('Unsupported object type: {0:d}.'.format(entry_array_object.object_type))\n if (entry_array_object.object_flags != 0):\n raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(entry_array_object.object_flags))\n return entry_array_object", "docstring": "Parses an entry array object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the entry array object relative to the start\nof the file-like object.\n\nReturns:\nsystemd_journal_entry_array_object: entry array object.\n\nRaises:\nParseError: if the entry array object cannot be parsed.", "source": "codesearchnet"}
{"code": "def merge_tags(left, right, factory=Tags):\n if isinstance(left, Mapping):\n tags = dict(left)\n elif hasattr(left, 'tags'):\n tags = _tags_to_dict(left.tags)\n else:\n tags = _tags_to_dict(left)\n if isinstance(right, Mapping):\n tags.update(right)\n elif hasattr(left, 'tags'):\n tags.update(_tags_to_dict(right.tags))\n else:\n tags.update(_tags_to_dict(right))\n return factory(**tags)", "docstring": "Merge two sets of tags into a new troposphere object\n\nArgs:\nleft (Union[dict, troposphere.Tags]): dictionary or Tags object to be\nmerged with lower priority\nright (Union[dict, troposphere.Tags]): dictionary or Tags object to be\nmerged with higher priority\nfactory (type): Type of object to create. Defaults to the troposphere\nTags class.", "source": "codesearchnet"}
{"code": "def write_uint32(self, value, little_endian=True):\n if little_endian:\n endian = '<'\n else:\n endian = '>'\n return self.pack(('%sI' % endian), value)", "docstring": "Pack the value as an unsigned integer and write 4 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"}
{"code": "def _ParseItems(self, parser_mediator, msiecf_file):\n \n format_version = msiecf_file.format_version\n\n decode_error = False\n cache_directories = []\n for cache_directory_name in iter(msiecf_file.cache_directories):\n try:\n cache_directory_name = cache_directory_name.decode('ascii')\n except UnicodeDecodeError:\n decode_error = True\n cache_directory_name = cache_directory_name.decode(\n 'ascii', errors='replace')\n\n cache_directories.append(cache_directory_name)\n\n if decode_error:\n parser_mediator.ProduceExtractionWarning((\n 'unable to decode cache directory names. Characters that cannot '\n 'be decoded will be replaced with \"?\" or \"\\\\ufffd\".'))\n\n for item_index in range(0, msiecf_file.number_of_items):\n try:\n msiecf_item = msiecf_file.get_item(item_index)\n if isinstance(msiecf_item, pymsiecf.leak):\n self._ParseLeak(parser_mediator, cache_directories, msiecf_item)\n\n elif isinstance(msiecf_item, pymsiecf.redirected):\n self._ParseRedirected(parser_mediator, msiecf_item)\n\n elif isinstance(msiecf_item, pymsiecf.url):\n self._ParseUrl(\n parser_mediator, format_version, cache_directories, msiecf_item)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to parse item: {0:d} with error: {1!s}'.format(\n item_index, exception))\n\n for item_index in range(0, msiecf_file.number_of_recovered_items):\n try:\n msiecf_item = msiecf_file.get_recovered_item(item_index)\n if isinstance(msiecf_item, pymsiecf.leak):\n self._ParseLeak(\n parser_mediator, cache_directories, msiecf_item, recovered=True)\n\n elif isinstance(msiecf_item, pymsiecf.redirected):\n self._ParseRedirected(parser_mediator, msiecf_item, recovered=True)\n\n elif isinstance(msiecf_item, pymsiecf.url):\n self._ParseUrl(\n parser_mediator, format_version, cache_directories, msiecf_item,\n recovered=True)\n\n except IOError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to parse recovered item: {0:d} with error: {1!s}'.format(\n item_index, exception))", "docstring": "Parses a MSIE Cache File (MSIECF) items.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nmsiecf_file (pymsiecf.file): MSIECF file.", "source": "juraj-google-style"}
{"code": "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n if isinstance(graph, func_graph.FuncGraph) or isinstance(graph, function._FuncGraph):\n logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. Ignoring tracing.')\n return tensor_fetches\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n self._parameters = tensor_tracer_flags.TTParameters()\n self._tt_config.device_type = _DEVICE_TYPE_CPU\n self._tt_config.num_replicas = 1\n self._tt_config.num_replicas_per_host = 1\n self._tt_config.num_hosts = 1\n self._replica_id = 0\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path, 'graph_before_tt.pbtxt')\n with graph.as_default():\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches, on_tpu=False)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path, 'graph_after_tt.pbtxt')\n return tensor_fetches", "docstring": "Traces the tensors generated by CPU Ops in a TF graph.\n\nArgs:\ngraph: the graph of Ops executed on the CPU.\ntensor_fetches: a (list,tuple,or a single object) of tensor fetches\nreturned by model_fn given to session.run. Function must be provided\nwith as least one tensor to fetch.\nop_fetches: A list of op fetches returned by model_fn given to\nsession.run. op_fetches and tensor_fetches are used to determine the\nnodes that will be executed. Can be None.\n\nReturns:\ntensor_fetches: an exact copy of tensor_fetches that has additional\ndependencies.", "source": "github-repos"}
{"code": "def fitness(self, width, height): \n \n assert(width > 0 and height > 0)\n \n rect, max_rect = self._select_position(width, height)\n if rect is None:\n return None\n\n \n return self._rect_fitness(max_rect, rect.width, rect.height)", "docstring": "Metric used to rate how much space is wasted if a rectangle is placed.\nReturns a value greater or equal to zero, the smaller the value the more\n'fit' is the rectangle. If the rectangle can't be placed, returns None.\n\nArguments:\nwidth (int, float): Rectangle width\nheight (int, float): Rectangle height\n\nReturns:\nint, float: Rectangle fitness\nNone: Rectangle can't be placed", "source": "juraj-google-style"}
{"code": "def _create_events_writer(self, directory):\n \n total_size = 0\n events_files = self._fetch_events_files_on_disk()\n for file_name in events_files:\n file_path = os.path.join(self._events_directory, file_name)\n total_size += tf.io.gfile.stat(file_path).length\n\n if total_size >= self.total_file_size_cap_bytes:\n \n \n for file_name in events_files:\n if total_size < self.total_file_size_cap_bytes:\n break\n\n file_path = os.path.join(self._events_directory, file_name)\n file_size = tf.io.gfile.stat(file_path).length\n try:\n tf.io.gfile.remove(file_path)\n total_size -= file_size\n logger.info(\n \"Deleted %s because events files take up over %d bytes\",\n file_path, self.total_file_size_cap_bytes)\n except IOError as err:\n logger.error(\"Deleting %s failed: %s\", file_path, err)\n\n \n self._events_file_count += 1\n file_path = \"%s.%d.%d\" % (\n os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),\n time.time(), self._events_file_count)\n logger.info(\"Creating events file %s\", file_path)\n return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))", "docstring": "Creates a new events writer.\n\nArgs:\ndirectory: The directory in which to write files containing events.\n\nReturns:\nA new events writer, which corresponds to a new events file.", "source": "juraj-google-style"}
{"code": "def iter_cast(inputs, dst_type, return_type=None):\n \n if not isinstance(inputs, collections_abc.Iterable):\n raise TypeError('inputs must be an iterable object')\n if not isinstance(dst_type, type):\n raise TypeError('\"dst_type\" must be a valid type')\n\n out_iterable = six.moves.map(dst_type, inputs)\n\n if return_type is None:\n return out_iterable\n else:\n return return_type(out_iterable)", "docstring": "Cast elements of an iterable object into some type.\n\nArgs:\ninputs (Iterable): The input object.\ndst_type (type): Destination type.\nreturn_type (type, optional): If specified, the output object will be\nconverted to this type, otherwise an iterator.\n\nReturns:\niterator or specified type: The converted object.", "source": "juraj-google-style"}
{"code": "def __init__(\n self, password=None, parent=None, recovery_password=None,\n startup_key=None, **kwargs):\n \n if not parent:\n raise ValueError('Missing parent value.')\n\n super(BDEPathSpec, self).__init__(parent=parent, **kwargs)\n self.password = password\n self.recovery_password = recovery_password\n self.startup_key = startup_key", "docstring": "Initializes a path specification.\n\nNote that the BDE path specification must have a parent.\n\nArgs:\npassword (Optional[str]): password.\nparent (Optional[PathSpec]): parent path specification.\nrecovery_password (Optional[str]): recovery password.\nstartup_key (Optional[str]): name of the startup key file.\n\nRaises:\nValueError: when parent is not set.", "source": "juraj-google-style"}
{"code": "def install_antivirus(version=None, latest=False, synch=False, skip_commit=False):\n if ((not version) and (latest is False)):\n raise CommandExecutionError('Version option must not be none.')\n if (synch is True):\n s = 'yes'\n else:\n s = 'no'\n if (skip_commit is True):\n c = 'yes'\n else:\n c = 'no'\n if (latest is True):\n query = {'type': 'op', 'cmd': '