repository
				 
			stringclasses 11
				values  | repo_id
				 
			stringlengths 1 
			3 
			 | target_module_path
				 
			stringlengths 16 
			72 
			 | prompt
				 
			stringlengths 407 
			21.7k 
			 | relavent_test_path
				 
			stringlengths 51 
			97 
			 | full_function
				 
			stringlengths 2.6k 
			33.8k 
			 | function_name
				 
			stringlengths 3 
			49 
			 | context-complexity
				 
			stringclasses 3
				values  | 
|---|---|---|---|---|---|---|---|
	plotly.py 
 | 
	17 
 | 
	packages/python/plotly/plotly/express/_imshow.py 
 | 
	def imshow(
    img,
    zmin=None,
    zmax=None,
    origin=None,
    labels={},
    x=None,
    y=None,
    animation_frame=None,
    facet_col=None,
    facet_col_wrap=None,
    facet_col_spacing=None,
    facet_row_spacing=None,
    color_continuous_scale=None,
    color_continuous_midpoint=None,
    range_color=None,
    title=None,
    template=None,
    width=None,
    height=None,
    aspect=None,
    contrast_rescaling=None,
    binary_string=None,
    binary_backend="auto",
    binary_compression_level=4,
    binary_format="png",
    text_auto=False,
) -> go.Figure:
    """
    Display an image, i.e. data on a 2D regular raster.
    Parameters
    ----------
    img: array-like image, or xarray
        The image data. Supported array shapes are
        - (M, N): an image with scalar data. The data is visualized
          using a colormap.
        - (M, N, 3): an image with RGB values.
        - (M, N, 4): an image with RGBA values, i.e. including transparency.
    zmin, zmax : scalar or iterable, optional
        zmin and zmax define the scalar range that the colormap covers. By default,
        zmin and zmax correspond to the min and max values of the datatype for integer
        datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
        a multichannel image of floats, the max of the image is computed and zmax is the
        smallest power of 256 (1, 255, 65535) greater than this max value,
        with a 5% tolerance. For a single-channel image, the max of the image is used.
        Overridden by range_color.
    origin : str, 'upper' or 'lower' (default 'upper')
        position of the [0, 0] pixel of the image array, in the upper left or lower left
        corner. The convention 'upper' is typically used for matrices and images.
    labels : dict with str keys and str values (default `{}`)
        Sets names used in the figure for axis titles (keys ``x`` and ``y``),
        colorbar title and hoverlabel (key ``color``). The values should correspond
        to the desired label to be displayed. If ``img`` is an xarray, dimension
        names are used for axis titles, and long name for the colorbar title
        (unless overridden in ``labels``). Possible keys are: x, y, and color.
    x, y: list-like, optional
        x and y are used to label the axes of single-channel heatmap visualizations and
        their lengths must match the lengths of the second and first dimensions of the
        img argument. They are auto-populated if the input is an xarray.
    animation_frame: int or str, optional (default None)
        axis number along which the image array is sliced to create an animation plot.
        If `img` is an xarray, `animation_frame` can be the name of one the dimensions.
    facet_col: int or str, optional (default None)
        axis number along which the image array is sliced to create a facetted plot.
        If `img` is an xarray, `facet_col` can be the name of one the dimensions.
    facet_col_wrap: int
        Maximum number of facet columns. Wraps the column variable at this width,
        so that the column facets span multiple rows.
        Ignored if `facet_col` is None.
    facet_col_spacing: float between 0 and 1
        Spacing between facet columns, in paper units. Default is 0.02.
    facet_row_spacing: float between 0 and 1
        Spacing between facet rows created when ``facet_col_wrap`` is used, in
        paper units. Default is 0.0.7.
    color_continuous_scale : str or list of str
        colormap used to map scalar data to colors (for a 2D image). This parameter is
        not used for RGB or RGBA images. If a string is provided, it should be the name
        of a known color scale, and if a list is provided, it should be a list of CSS-
        compatible colors.
    color_continuous_midpoint : number
        If set, computes the bounds of the continuous color scale to have the desired
        midpoint. Overridden by range_color or zmin and zmax.
    range_color : list of two numbers
        If provided, overrides auto-scaling on the continuous color scale, including
        overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
        for single-channel images.
    title : str
        The figure title.
    template : str or dict or plotly.graph_objects.layout.Template instance
        The figure template name or definition.
    width : number
        The figure width in pixels.
    height: number
        The figure height in pixels.
    aspect: 'equal', 'auto', or None
      - 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
      - 'auto': The axes is kept fixed and the aspect ratio of pixels is
        adjusted so that the data fit in the axes. In general, this will
        result in non-square pixels.
      - if None, 'equal' is used for numpy arrays and 'auto' for xarrays
        (which have typically heterogeneous coordinates)
    contrast_rescaling: 'minmax', 'infer', or None
        how to determine data values corresponding to the bounds of the color
        range, when zmin or zmax are not passed. If `minmax`, the min and max
        values of the image are used. If `infer`, a heuristic based on the image
        data type is used.
    binary_string: bool, default None
        if True, the image data are first rescaled and encoded as uint8 and
        then passed to plotly.js as a b64 PNG string. If False, data are passed
        unchanged as a numerical array. Setting to True may lead to performance
        gains, at the cost of a loss of precision depending on the original data
        type. If None, use_binary_string is set to True for multichannel (eg) RGB
        arrays, and to False for single-channel (2D) arrays. 2D arrays are
        represented as grayscale and with no colorbar if use_binary_string is
        True.
    binary_backend: str, 'auto' (default), 'pil' or 'pypng'
        Third-party package for the transformation of numpy arrays to
        png b64 strings. If 'auto', Pillow is used if installed,  otherwise
        pypng.
    binary_compression_level: int, between 0 and 9 (default 4)
        png compression level to be passed to the backend when transforming an
        array to a png b64 string. Increasing `binary_compression` decreases the
        size of the png string, but the compression step takes more time. For most
        images it is not worth using levels greater than 5, but it's possible to
        test `len(fig.data[0].source)` and to time the execution of `imshow` to
        tune the level of compression. 0 means no compression (not recommended).
    binary_format: str, 'png' (default) or 'jpg'
        compression format used to generate b64 string. 'png' is recommended
        since it uses lossless compression, but 'jpg' (lossy) compression can
        result if smaller binary strings for natural images.
    text_auto: bool or str (default `False`)
        If `True` or a string, single-channel `img` values will be displayed as text.
        A string like `'.2f'` will be interpreted as a `texttemplate` numeric formatting directive.
    Returns
    -------
    fig : graph_objects.Figure containing the displayed image
    See also
    --------
    plotly.graph_objects.Image : image trace
    plotly.graph_objects.Heatmap : heatmap trace
    Notes
    -----
    In order to update and customize the returned figure, use
    `go.Figure.update_traces` or `go.Figure.update_layout`.
    If an xarray is passed, dimensions names and coordinates are used for
    axes labels and ticks.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__imshow.imshow.txt 
 | 
	def imshow(
    img,
    zmin=None,
    zmax=None,
    origin=None,
    labels={},
    x=None,
    y=None,
    animation_frame=None,
    facet_col=None,
    facet_col_wrap=None,
    facet_col_spacing=None,
    facet_row_spacing=None,
    color_continuous_scale=None,
    color_continuous_midpoint=None,
    range_color=None,
    title=None,
    template=None,
    width=None,
    height=None,
    aspect=None,
    contrast_rescaling=None,
    binary_string=None,
    binary_backend="auto",
    binary_compression_level=4,
    binary_format="png",
    text_auto=False,
) -> go.Figure:
    """
    Display an image, i.e. data on a 2D regular raster.
    Parameters
    ----------
    img: array-like image, or xarray
        The image data. Supported array shapes are
        - (M, N): an image with scalar data. The data is visualized
          using a colormap.
        - (M, N, 3): an image with RGB values.
        - (M, N, 4): an image with RGBA values, i.e. including transparency.
    zmin, zmax : scalar or iterable, optional
        zmin and zmax define the scalar range that the colormap covers. By default,
        zmin and zmax correspond to the min and max values of the datatype for integer
        datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
        a multichannel image of floats, the max of the image is computed and zmax is the
        smallest power of 256 (1, 255, 65535) greater than this max value,
        with a 5% tolerance. For a single-channel image, the max of the image is used.
        Overridden by range_color.
    origin : str, 'upper' or 'lower' (default 'upper')
        position of the [0, 0] pixel of the image array, in the upper left or lower left
        corner. The convention 'upper' is typically used for matrices and images.
    labels : dict with str keys and str values (default `{}`)
        Sets names used in the figure for axis titles (keys ``x`` and ``y``),
        colorbar title and hoverlabel (key ``color``). The values should correspond
        to the desired label to be displayed. If ``img`` is an xarray, dimension
        names are used for axis titles, and long name for the colorbar title
        (unless overridden in ``labels``). Possible keys are: x, y, and color.
    x, y: list-like, optional
        x and y are used to label the axes of single-channel heatmap visualizations and
        their lengths must match the lengths of the second and first dimensions of the
        img argument. They are auto-populated if the input is an xarray.
    animation_frame: int or str, optional (default None)
        axis number along which the image array is sliced to create an animation plot.
        If `img` is an xarray, `animation_frame` can be the name of one the dimensions.
    facet_col: int or str, optional (default None)
        axis number along which the image array is sliced to create a facetted plot.
        If `img` is an xarray, `facet_col` can be the name of one the dimensions.
    facet_col_wrap: int
        Maximum number of facet columns. Wraps the column variable at this width,
        so that the column facets span multiple rows.
        Ignored if `facet_col` is None.
    facet_col_spacing: float between 0 and 1
        Spacing between facet columns, in paper units. Default is 0.02.
    facet_row_spacing: float between 0 and 1
        Spacing between facet rows created when ``facet_col_wrap`` is used, in
        paper units. Default is 0.0.7.
    color_continuous_scale : str or list of str
        colormap used to map scalar data to colors (for a 2D image). This parameter is
        not used for RGB or RGBA images. If a string is provided, it should be the name
        of a known color scale, and if a list is provided, it should be a list of CSS-
        compatible colors.
    color_continuous_midpoint : number
        If set, computes the bounds of the continuous color scale to have the desired
        midpoint. Overridden by range_color or zmin and zmax.
    range_color : list of two numbers
        If provided, overrides auto-scaling on the continuous color scale, including
        overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
        for single-channel images.
    title : str
        The figure title.
    template : str or dict or plotly.graph_objects.layout.Template instance
        The figure template name or definition.
    width : number
        The figure width in pixels.
    height: number
        The figure height in pixels.
    aspect: 'equal', 'auto', or None
      - 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
      - 'auto': The axes is kept fixed and the aspect ratio of pixels is
        adjusted so that the data fit in the axes. In general, this will
        result in non-square pixels.
      - if None, 'equal' is used for numpy arrays and 'auto' for xarrays
        (which have typically heterogeneous coordinates)
    contrast_rescaling: 'minmax', 'infer', or None
        how to determine data values corresponding to the bounds of the color
        range, when zmin or zmax are not passed. If `minmax`, the min and max
        values of the image are used. If `infer`, a heuristic based on the image
        data type is used.
    binary_string: bool, default None
        if True, the image data are first rescaled and encoded as uint8 and
        then passed to plotly.js as a b64 PNG string. If False, data are passed
        unchanged as a numerical array. Setting to True may lead to performance
        gains, at the cost of a loss of precision depending on the original data
        type. If None, use_binary_string is set to True for multichannel (eg) RGB
        arrays, and to False for single-channel (2D) arrays. 2D arrays are
        represented as grayscale and with no colorbar if use_binary_string is
        True.
    binary_backend: str, 'auto' (default), 'pil' or 'pypng'
        Third-party package for the transformation of numpy arrays to
        png b64 strings. If 'auto', Pillow is used if installed,  otherwise
        pypng.
    binary_compression_level: int, between 0 and 9 (default 4)
        png compression level to be passed to the backend when transforming an
        array to a png b64 string. Increasing `binary_compression` decreases the
        size of the png string, but the compression step takes more time. For most
        images it is not worth using levels greater than 5, but it's possible to
        test `len(fig.data[0].source)` and to time the execution of `imshow` to
        tune the level of compression. 0 means no compression (not recommended).
    binary_format: str, 'png' (default) or 'jpg'
        compression format used to generate b64 string. 'png' is recommended
        since it uses lossless compression, but 'jpg' (lossy) compression can
        result if smaller binary strings for natural images.
    text_auto: bool or str (default `False`)
        If `True` or a string, single-channel `img` values will be displayed as text.
        A string like `'.2f'` will be interpreted as a `texttemplate` numeric formatting directive.
    Returns
    -------
    fig : graph_objects.Figure containing the displayed image
    See also
    --------
    plotly.graph_objects.Image : image trace
    plotly.graph_objects.Heatmap : heatmap trace
    Notes
    -----
    In order to update and customize the returned figure, use
    `go.Figure.update_traces` or `go.Figure.update_layout`.
    If an xarray is passed, dimensions names and coordinates are used for
    axes labels and ticks.
    """
    args = locals()
    apply_default_cascade(args)
    labels = labels.copy()
    nslices_facet = 1
    if facet_col is not None:
        if isinstance(facet_col, str):
            facet_col = img.dims.index(facet_col)
        nslices_facet = img.shape[facet_col]
        facet_slices = range(nslices_facet)
        ncols = int(facet_col_wrap) if facet_col_wrap is not None else nslices_facet
        nrows = (
            nslices_facet // ncols + 1
            if nslices_facet % ncols
            else nslices_facet // ncols
        )
    else:
        nrows = 1
        ncols = 1
    if animation_frame is not None:
        if isinstance(animation_frame, str):
            animation_frame = img.dims.index(animation_frame)
        nslices_animation = img.shape[animation_frame]
        animation_slices = range(nslices_animation)
    slice_dimensions = (facet_col is not None) + (
        animation_frame is not None
    )  # 0, 1, or 2
    facet_label = None
    animation_label = None
    img_is_xarray = False
    # ----- Define x and y, set labels if img is an xarray -------------------
    if xarray_imported and isinstance(img, xarray.DataArray):
        dims = list(img.dims)
        img_is_xarray = True
        pop_indexes = []
        if facet_col is not None:
            facet_slices = img.coords[img.dims[facet_col]].values
            pop_indexes.append(facet_col)
            facet_label = img.dims[facet_col]
        if animation_frame is not None:
            animation_slices = img.coords[img.dims[animation_frame]].values
            pop_indexes.append(animation_frame)
            animation_label = img.dims[animation_frame]
        # Remove indices in sorted order.
        for index in sorted(pop_indexes, reverse=True):
            _ = dims.pop(index)
        y_label, x_label = dims[0], dims[1]
        # np.datetime64 is not handled correctly by go.Heatmap
        for ax in [x_label, y_label]:
            if np.issubdtype(img.coords[ax].dtype, np.datetime64):
                img.coords[ax] = img.coords[ax].astype(str)
        if x is None:
            x = img.coords[x_label].values
        if y is None:
            y = img.coords[y_label].values
        if aspect is None:
            aspect = "auto"
        if labels.get("x", None) is None:
            labels["x"] = x_label
        if labels.get("y", None) is None:
            labels["y"] = y_label
        if labels.get("animation_frame", None) is None:
            labels["animation_frame"] = animation_label
        if labels.get("facet_col", None) is None:
            labels["facet_col"] = facet_label
        if labels.get("color", None) is None:
            labels["color"] = xarray.plot.utils.label_from_attrs(img)
            labels["color"] = labels["color"].replace("\n", "<br>")
    else:
        if hasattr(img, "columns") and hasattr(img.columns, "__len__"):
            if x is None:
                x = img.columns
            if labels.get("x", None) is None and hasattr(img.columns, "name"):
                labels["x"] = img.columns.name or ""
        if hasattr(img, "index") and hasattr(img.index, "__len__"):
            if y is None:
                y = img.index
            if labels.get("y", None) is None and hasattr(img.index, "name"):
                labels["y"] = img.index.name or ""
        if labels.get("x", None) is None:
            labels["x"] = ""
        if labels.get("y", None) is None:
            labels["y"] = ""
        if labels.get("color", None) is None:
            labels["color"] = ""
        if aspect is None:
            aspect = "equal"
    # --- Set the value of binary_string (forbidden for pandas)
    if isinstance(img, pd.DataFrame):
        if binary_string:
            raise ValueError("Binary strings cannot be used with pandas arrays")
        is_dataframe = True
    else:
        is_dataframe = False
    # --------------- Starting from here img is always a numpy array --------
    img = np.asanyarray(img)
    # Reshape array so that animation dimension comes first, then facets, then images
    if facet_col is not None:
        img = np.moveaxis(img, facet_col, 0)
        if animation_frame is not None and animation_frame < facet_col:
            animation_frame += 1
        facet_col = True
    if animation_frame is not None:
        img = np.moveaxis(img, animation_frame, 0)
        animation_frame = True
        args["animation_frame"] = (
            "animation_frame"
            if labels.get("animation_frame") is None
            else labels["animation_frame"]
        )
    iterables = ()
    if animation_frame is not None:
        iterables += (range(nslices_animation),)
    if facet_col is not None:
        iterables += (range(nslices_facet),)
    # Default behaviour of binary_string: True for RGB images, False for 2D
    if binary_string is None:
        binary_string = img.ndim >= (3 + slice_dimensions) and not is_dataframe
    # Cast bools to uint8 (also one byte)
    if img.dtype == bool:
        img = 255 * img.astype(np.uint8)
    if range_color is not None:
        zmin = range_color[0]
        zmax = range_color[1]
    # -------- Contrast rescaling: either minmax or infer ------------------
    if contrast_rescaling is None:
        contrast_rescaling = "minmax" if img.ndim == (2 + slice_dimensions) else "infer"
    # We try to set zmin and zmax only if necessary, because traces have good defaults
    if contrast_rescaling == "minmax":
        # When using binary_string and minmax we need to set zmin and zmax to rescale the image
        if (zmin is not None or binary_string) and zmax is None:
            zmax = img.max()
        if (zmax is not None or binary_string) and zmin is None:
            zmin = img.min()
    else:
        # For uint8 data and infer we let zmin and zmax to be None if passed as None
        if zmax is None and img.dtype != np.uint8:
            zmax = _infer_zmax_from_type(img)
        if zmin is None and zmax is not None:
            zmin = 0
    # For 2d data, use Heatmap trace, unless binary_string is True
    if img.ndim == 2 + slice_dimensions and not binary_string:
        y_index = slice_dimensions
        if y is not None and img.shape[y_index] != len(y):
            raise ValueError(
                "The length of the y vector must match the length of the first "
                + "dimension of the img matrix."
            )
        x_index = slice_dimensions + 1
        if x is not None and img.shape[x_index] != len(x):
            raise ValueError(
                "The length of the x vector must match the length of the second "
                + "dimension of the img matrix."
            )
        texttemplate = None
        if text_auto is True:
            texttemplate = "%{z}"
        elif text_auto is not False:
            texttemplate = "%{z:" + text_auto + "}"
        traces = [
            go.Heatmap(
                x=x,
                y=y,
                z=img[index_tup],
                coloraxis="coloraxis1",
                name=str(i),
                texttemplate=texttemplate,
            )
            for i, index_tup in enumerate(itertools.product(*iterables))
        ]
        autorange = True if origin == "lower" else "reversed"
        layout = dict(yaxis=dict(autorange=autorange))
        if aspect == "equal":
            layout["xaxis"] = dict(scaleanchor="y", constrain="domain")
            layout["yaxis"]["constrain"] = "domain"
        colorscale_validator = ColorscaleValidator("colorscale", "imshow")
        layout["coloraxis1"] = dict(
            colorscale=colorscale_validator.validate_coerce(
                args["color_continuous_scale"]
            ),
            cmid=color_continuous_midpoint,
            cmin=zmin,
            cmax=zmax,
        )
        if labels["color"]:
            layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])
    # For 2D+RGB data, use Image trace
    elif (
        img.ndim >= 3
        and (img.shape[-1] in [3, 4] or slice_dimensions and binary_string)
    ) or (img.ndim == 2 and binary_string):
        rescale_image = True  # to check whether image has been modified
        if zmin is not None and zmax is not None:
            zmin, zmax = (
                _vectorize_zvalue(zmin, mode="min"),
                _vectorize_zvalue(zmax, mode="max"),
            )
        x0, y0, dx, dy = (None,) * 4
        error_msg_xarray = (
            "Non-numerical coordinates were passed with xarray `img`, but "
            "the Image trace cannot handle it. Please use `binary_string=False` "
            "for 2D data or pass instead the numpy array `img.values` to `px.imshow`."
        )
        if x is not None:
            x = np.asanyarray(x)
            if np.issubdtype(x.dtype, np.number):
                x0 = x[0]
                dx = x[1] - x[0]
            else:
                error_msg = (
                    error_msg_xarray
                    if img_is_xarray
                    else (
                        "Only numerical values are accepted for the `x` parameter "
                        "when an Image trace is used."
                    )
                )
                raise ValueError(error_msg)
        if y is not None:
            y = np.asanyarray(y)
            if np.issubdtype(y.dtype, np.number):
                y0 = y[0]
                dy = y[1] - y[0]
            else:
                error_msg = (
                    error_msg_xarray
                    if img_is_xarray
                    else (
                        "Only numerical values are accepted for the `y` parameter "
                        "when an Image trace is used."
                    )
                )
                raise ValueError(error_msg)
        if binary_string:
            if zmin is None and zmax is None:  # no rescaling, faster
                img_rescaled = img
                rescale_image = False
            elif img.ndim == 2 + slice_dimensions:  # single-channel image
                img_rescaled = rescale_intensity(
                    img, in_range=(zmin[0], zmax[0]), out_range=np.uint8
                )
            else:
                img_rescaled = np.stack(
                    [
                        rescale_intensity(
                            img[..., ch],
                            in_range=(zmin[ch], zmax[ch]),
                            out_range=np.uint8,
                        )
                        for ch in range(img.shape[-1])
                    ],
                    axis=-1,
                )
            img_str = [
                image_array_to_data_uri(
                    img_rescaled[index_tup],
                    backend=binary_backend,
                    compression=binary_compression_level,
                    ext=binary_format,
                )
                for index_tup in itertools.product(*iterables)
            ]
            traces = [
                go.Image(source=img_str_slice, name=str(i), x0=x0, y0=y0, dx=dx, dy=dy)
                for i, img_str_slice in enumerate(img_str)
            ]
        else:
            colormodel = "rgb" if img.shape[-1] == 3 else "rgba256"
            traces = [
                go.Image(
                    z=img[index_tup],
                    zmin=zmin,
                    zmax=zmax,
                    colormodel=colormodel,
                    x0=x0,
                    y0=y0,
                    dx=dx,
                    dy=dy,
                )
                for index_tup in itertools.product(*iterables)
            ]
        layout = {}
        if origin == "lower" or (dy is not None and dy < 0):
            layout["yaxis"] = dict(autorange=True)
        if dx is not None and dx < 0:
            layout["xaxis"] = dict(autorange="reversed")
    else:
        raise ValueError(
            "px.imshow only accepts 2D single-channel, RGB or RGBA images. "
            "An image of shape %s was provided. "
            "Alternatively, 3- or 4-D single or multichannel datasets can be "
            "visualized using the `facet_col` or/and `animation_frame` arguments."
            % str(img.shape)
        )
    # Now build figure
    col_labels = []
    if facet_col is not None:
        slice_label = (
            "facet_col" if labels.get("facet_col") is None else labels["facet_col"]
        )
        col_labels = [f"{slice_label}={i}" for i in facet_slices]
    fig = init_figure(args, "xy", [], nrows, ncols, col_labels, [])
    for attr_name in ["height", "width"]:
        if args[attr_name]:
            layout[attr_name] = args[attr_name]
    if args["title"]:
        layout["title_text"] = args["title"]
    elif args["template"].layout.margin.t is None:
        layout["margin"] = {"t": 60}
    frame_list = []
    for index, trace in enumerate(traces):
        if (facet_col and index < nrows * ncols) or index == 0:
            fig.add_trace(trace, row=nrows - index // ncols, col=index % ncols + 1)
    if animation_frame is not None:
        for i, index in zip(range(nslices_animation), animation_slices):
            frame_list.append(
                dict(
                    data=traces[nslices_facet * i : nslices_facet * (i + 1)],
                    layout=layout,
                    name=str(index),
                )
            )
    if animation_frame:
        fig.frames = frame_list
    fig.update_layout(layout)
    # Hover name, z or color
    if binary_string and rescale_image and not np.all(img == img_rescaled):
        # we rescaled the image, hence z is not displayed in hover since it does
        # not correspond to img values
        hovertemplate = "%s: %%{x}<br>%s: %%{y}<extra></extra>" % (
            labels["x"] or "x",
            labels["y"] or "y",
        )
    else:
        if trace["type"] == "heatmap":
            hover_name = "%{z}"
        elif img.ndim == 2:
            hover_name = "%{z[0]}"
        elif img.ndim == 3 and img.shape[-1] == 3:
            hover_name = "[%{z[0]}, %{z[1]}, %{z[2]}]"
        else:
            hover_name = "%{z}"
        hovertemplate = "%s: %%{x}<br>%s: %%{y}<br>%s: %s<extra></extra>" % (
            labels["x"] or "x",
            labels["y"] or "y",
            labels["color"] or "color",
            hover_name,
        )
    fig.update_traces(hovertemplate=hovertemplate)
    if labels["x"]:
        fig.update_xaxes(title_text=labels["x"], row=1)
    if labels["y"]:
        fig.update_yaxes(title_text=labels["y"], col=1)
    configure_animation_controls(args, go.Image, fig)
    fig.update_layout(template=args["template"], overwrite=True)
    return fig
 
 | 
	_imshow.imshow 
 | 
	Repo-Level 
 | 
					
	plotly.py 
 | 
	29 
 | 
	packages/python/plotly/plotly/graph_objs/layout/_legend.py 
 | 
	    def __init__(
        self,
        arg=None,
        bgcolor=None,
        bordercolor=None,
        borderwidth=None,
        entrywidth=None,
        entrywidthmode=None,
        font=None,
        groupclick=None,
        grouptitlefont=None,
        indentation=None,
        itemclick=None,
        itemdoubleclick=None,
        itemsizing=None,
        itemwidth=None,
        orientation=None,
        title=None,
        tracegroupgap=None,
        traceorder=None,
        uirevision=None,
        valign=None,
        visible=None,
        x=None,
        xanchor=None,
        xref=None,
        y=None,
        yanchor=None,
        yref=None,
        **kwargs,
    ):
        """
        Construct a new Legend object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.layout.Legend`
        bgcolor
            Sets the legend background color. Defaults to
            `layout.paper_bgcolor`.
        bordercolor
            Sets the color of the border enclosing the legend.
        borderwidth
            Sets the width (in px) of the border enclosing the
            legend.
        entrywidth
            Sets the width (in px or fraction) of the legend. Use 0
            to size the entry based on the text width, when
            `entrywidthmode` is set to "pixels".
        entrywidthmode
            Determines what entrywidth means.
        font
            Sets the font used to text the legend items.
        groupclick
            Determines the behavior on legend group item click.
            "toggleitem" toggles the visibility of the individual
            item clicked on the graph. "togglegroup" toggles the
            visibility of all items in the same legendgroup as the
            item clicked on the graph.
        grouptitlefont
            Sets the font for group titles in legend. Defaults to
            `legend.font` with its size increased about 10%.
        indentation
            Sets the indentation (in px) of the legend entries.
        itemclick
            Determines the behavior on legend item click. "toggle"
            toggles the visibility of the item clicked on the
            graph. "toggleothers" makes the clicked item the sole
            visible item on the graph. False disables legend item
            click interactions.
        itemdoubleclick
            Determines the behavior on legend item double-click.
            "toggle" toggles the visibility of the item clicked on
            the graph. "toggleothers" makes the clicked item the
            sole visible item on the graph. False disables legend
            item double-click interactions.
        itemsizing
            Determines if the legend items symbols scale with their
            corresponding "trace" attributes or remain "constant"
            independent of the symbol size on the graph.
        itemwidth
            Sets the width (in px) of the legend item symbols (the
            part other than the title.text).
        orientation
            Sets the orientation of the legend.
        title
            :class:`plotly.graph_objects.layout.legend.Title`
            instance or dict with compatible properties
        tracegroupgap
            Sets the amount of vertical space (in px) between
            legend groups.
        traceorder
            Determines the order at which the legend items are
            displayed. If "normal", the items are displayed top-to-
            bottom in the same order as the input data. If
            "reversed", the items are displayed in the opposite
            order as "normal". If "grouped", the items are
            displayed in groups (when a trace `legendgroup` is
            provided). if "grouped+reversed", the items are
            displayed in the opposite order as "grouped".
        uirevision
            Controls persistence of legend-driven changes in trace
            and pie label visibility. Defaults to
            `layout.uirevision`.
        valign
            Sets the vertical alignment of the symbols with respect
            to their associated text.
        visible
            Determines whether or not this legend is visible.
        x
            Sets the x position with respect to `xref` (in
            normalized coordinates) of the legend. When `xref` is
            "paper", defaults to 1.02 for vertical legends and
            defaults to 0 for horizontal legends. When `xref` is
            "container", defaults to 1 for vertical legends and
            defaults to 0 for horizontal legends. Must be between 0
            and 1 if `xref` is "container". and between "-2" and 3
            if `xref` is "paper".
        xanchor
            Sets the legend's horizontal position anchor. This
            anchor binds the `x` position to the "left", "center"
            or "right" of the legend. Value "auto" anchors legends
            to the right for `x` values greater than or equal to
            2/3, anchors legends to the left for `x` values less
            than or equal to 1/3 and anchors legends with respect
            to their center otherwise.
        xref
            Sets the container `x` refers to. "container" spans the
            entire `width` of the plot. "paper" refers to the width
            of the plotting area only.
        y
            Sets the y position with respect to `yref` (in
            normalized coordinates) of the legend. When `yref` is
            "paper", defaults to 1 for vertical legends, defaults
            to "-0.1" for horizontal legends on graphs w/o range
            sliders and defaults to 1.1 for horizontal legends on
            graph with one or multiple range sliders. When `yref`
            is "container", defaults to 1. Must be between 0 and 1
            if `yref` is "container" and between "-2" and 3 if
            `yref` is "paper".
        yanchor
            Sets the legend's vertical position anchor This anchor
            binds the `y` position to the "top", "middle" or
            "bottom" of the legend. Value "auto" anchors legends at
            their bottom for `y` values less than or equal to 1/3,
            anchors legends to at their top for `y` values greater
            than or equal to 2/3 and anchors legends with respect
            to their middle otherwise.
        yref
            Sets the container `y` refers to. "container" spans the
            entire `height` of the plot. "paper" refers to the
            height of the plotting area only.
        Returns
        -------
        Legend
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__legend.Legend.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        bgcolor=None,
        bordercolor=None,
        borderwidth=None,
        entrywidth=None,
        entrywidthmode=None,
        font=None,
        groupclick=None,
        grouptitlefont=None,
        indentation=None,
        itemclick=None,
        itemdoubleclick=None,
        itemsizing=None,
        itemwidth=None,
        orientation=None,
        title=None,
        tracegroupgap=None,
        traceorder=None,
        uirevision=None,
        valign=None,
        visible=None,
        x=None,
        xanchor=None,
        xref=None,
        y=None,
        yanchor=None,
        yref=None,
        **kwargs,
    ):
        """
        Construct a new Legend object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.layout.Legend`
        bgcolor
            Sets the legend background color. Defaults to
            `layout.paper_bgcolor`.
        bordercolor
            Sets the color of the border enclosing the legend.
        borderwidth
            Sets the width (in px) of the border enclosing the
            legend.
        entrywidth
            Sets the width (in px or fraction) of the legend. Use 0
            to size the entry based on the text width, when
            `entrywidthmode` is set to "pixels".
        entrywidthmode
            Determines what entrywidth means.
        font
            Sets the font used to text the legend items.
        groupclick
            Determines the behavior on legend group item click.
            "toggleitem" toggles the visibility of the individual
            item clicked on the graph. "togglegroup" toggles the
            visibility of all items in the same legendgroup as the
            item clicked on the graph.
        grouptitlefont
            Sets the font for group titles in legend. Defaults to
            `legend.font` with its size increased about 10%.
        indentation
            Sets the indentation (in px) of the legend entries.
        itemclick
            Determines the behavior on legend item click. "toggle"
            toggles the visibility of the item clicked on the
            graph. "toggleothers" makes the clicked item the sole
            visible item on the graph. False disables legend item
            click interactions.
        itemdoubleclick
            Determines the behavior on legend item double-click.
            "toggle" toggles the visibility of the item clicked on
            the graph. "toggleothers" makes the clicked item the
            sole visible item on the graph. False disables legend
            item double-click interactions.
        itemsizing
            Determines if the legend items symbols scale with their
            corresponding "trace" attributes or remain "constant"
            independent of the symbol size on the graph.
        itemwidth
            Sets the width (in px) of the legend item symbols (the
            part other than the title.text).
        orientation
            Sets the orientation of the legend.
        title
            :class:`plotly.graph_objects.layout.legend.Title`
            instance or dict with compatible properties
        tracegroupgap
            Sets the amount of vertical space (in px) between
            legend groups.
        traceorder
            Determines the order at which the legend items are
            displayed. If "normal", the items are displayed top-to-
            bottom in the same order as the input data. If
            "reversed", the items are displayed in the opposite
            order as "normal". If "grouped", the items are
            displayed in groups (when a trace `legendgroup` is
            provided). if "grouped+reversed", the items are
            displayed in the opposite order as "grouped".
        uirevision
            Controls persistence of legend-driven changes in trace
            and pie label visibility. Defaults to
            `layout.uirevision`.
        valign
            Sets the vertical alignment of the symbols with respect
            to their associated text.
        visible
            Determines whether or not this legend is visible.
        x
            Sets the x position with respect to `xref` (in
            normalized coordinates) of the legend. When `xref` is
            "paper", defaults to 1.02 for vertical legends and
            defaults to 0 for horizontal legends. When `xref` is
            "container", defaults to 1 for vertical legends and
            defaults to 0 for horizontal legends. Must be between 0
            and 1 if `xref` is "container". and between "-2" and 3
            if `xref` is "paper".
        xanchor
            Sets the legend's horizontal position anchor. This
            anchor binds the `x` position to the "left", "center"
            or "right" of the legend. Value "auto" anchors legends
            to the right for `x` values greater than or equal to
            2/3, anchors legends to the left for `x` values less
            than or equal to 1/3 and anchors legends with respect
            to their center otherwise.
        xref
            Sets the container `x` refers to. "container" spans the
            entire `width` of the plot. "paper" refers to the width
            of the plotting area only.
        y
            Sets the y position with respect to `yref` (in
            normalized coordinates) of the legend. When `yref` is
            "paper", defaults to 1 for vertical legends, defaults
            to "-0.1" for horizontal legends on graphs w/o range
            sliders and defaults to 1.1 for horizontal legends on
            graph with one or multiple range sliders. When `yref`
            is "container", defaults to 1. Must be between 0 and 1
            if `yref` is "container" and between "-2" and 3 if
            `yref` is "paper".
        yanchor
            Sets the legend's vertical position anchor This anchor
            binds the `y` position to the "top", "middle" or
            "bottom" of the legend. Value "auto" anchors legends at
            their bottom for `y` values less than or equal to 1/3,
            anchors legends to at their top for `y` values greater
            than or equal to 2/3 and anchors legends with respect
            to their middle otherwise.
        yref
            Sets the container `y` refers to. "container" spans the
            entire `height` of the plot. "paper" refers to the
            height of the plotting area only.
        Returns
        -------
        Legend
        """
        super(Legend, self).__init__("legend")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.layout.Legend
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Legend`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("bgcolor", None)
        _v = bgcolor if bgcolor is not None else _v
        if _v is not None:
            self["bgcolor"] = _v
        _v = arg.pop("bordercolor", None)
        _v = bordercolor if bordercolor is not None else _v
        if _v is not None:
            self["bordercolor"] = _v
        _v = arg.pop("borderwidth", None)
        _v = borderwidth if borderwidth is not None else _v
        if _v is not None:
            self["borderwidth"] = _v
        _v = arg.pop("entrywidth", None)
        _v = entrywidth if entrywidth is not None else _v
        if _v is not None:
            self["entrywidth"] = _v
        _v = arg.pop("entrywidthmode", None)
        _v = entrywidthmode if entrywidthmode is not None else _v
        if _v is not None:
            self["entrywidthmode"] = _v
        _v = arg.pop("font", None)
        _v = font if font is not None else _v
        if _v is not None:
            self["font"] = _v
        _v = arg.pop("groupclick", None)
        _v = groupclick if groupclick is not None else _v
        if _v is not None:
            self["groupclick"] = _v
        _v = arg.pop("grouptitlefont", None)
        _v = grouptitlefont if grouptitlefont is not None else _v
        if _v is not None:
            self["grouptitlefont"] = _v
        _v = arg.pop("indentation", None)
        _v = indentation if indentation is not None else _v
        if _v is not None:
            self["indentation"] = _v
        _v = arg.pop("itemclick", None)
        _v = itemclick if itemclick is not None else _v
        if _v is not None:
            self["itemclick"] = _v
        _v = arg.pop("itemdoubleclick", None)
        _v = itemdoubleclick if itemdoubleclick is not None else _v
        if _v is not None:
            self["itemdoubleclick"] = _v
        _v = arg.pop("itemsizing", None)
        _v = itemsizing if itemsizing is not None else _v
        if _v is not None:
            self["itemsizing"] = _v
        _v = arg.pop("itemwidth", None)
        _v = itemwidth if itemwidth is not None else _v
        if _v is not None:
            self["itemwidth"] = _v
        _v = arg.pop("orientation", None)
        _v = orientation if orientation is not None else _v
        if _v is not None:
            self["orientation"] = _v
        _v = arg.pop("title", None)
        _v = title if title is not None else _v
        if _v is not None:
            self["title"] = _v
        _v = arg.pop("tracegroupgap", None)
        _v = tracegroupgap if tracegroupgap is not None else _v
        if _v is not None:
            self["tracegroupgap"] = _v
        _v = arg.pop("traceorder", None)
        _v = traceorder if traceorder is not None else _v
        if _v is not None:
            self["traceorder"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("valign", None)
        _v = valign if valign is not None else _v
        if _v is not None:
            self["valign"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        _v = arg.pop("x", None)
        _v = x if x is not None else _v
        if _v is not None:
            self["x"] = _v
        _v = arg.pop("xanchor", None)
        _v = xanchor if xanchor is not None else _v
        if _v is not None:
            self["xanchor"] = _v
        _v = arg.pop("xref", None)
        _v = xref if xref is not None else _v
        if _v is not None:
            self["xref"] = _v
        _v = arg.pop("y", None)
        _v = y if y is not None else _v
        if _v is not None:
            self["y"] = _v
        _v = arg.pop("yanchor", None)
        _v = yanchor if yanchor is not None else _v
        if _v is not None:
            self["yanchor"] = _v
        _v = arg.pop("yref", None)
        _v = yref if yref is not None else _v
        if _v is not None:
            self["yref"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_legend.Legend.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	31 
 | 
	packages/python/plotly/plotly/graph_objs/layout/_modebar.py 
 | 
	    def __init__(
        self,
        arg=None,
        activecolor=None,
        add=None,
        addsrc=None,
        bgcolor=None,
        color=None,
        orientation=None,
        remove=None,
        removesrc=None,
        uirevision=None,
        **kwargs,
    ):
        """
        Construct a new Modebar object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Modebar`
        activecolor
            Sets the color of the active or hovered on icons in the
            modebar.
        add
            Determines which predefined modebar buttons to add.
            Please note that these buttons will only be shown if
            they are compatible with all trace types used in a
            graph. Similar to `config.modeBarButtonsToAdd` option.
            This may include "v1hovermode", "hoverclosest",
            "hovercompare", "togglehover", "togglespikelines",
            "drawline", "drawopenpath", "drawclosedpath",
            "drawcircle", "drawrect", "eraseshape".
        addsrc
            Sets the source reference on Chart Studio Cloud for
            `add`.
        bgcolor
            Sets the background color of the modebar.
        color
            Sets the color of the icons in the modebar.
        orientation
            Sets the orientation of the modebar.
        remove
            Determines which predefined modebar buttons to remove.
            Similar to `config.modeBarButtonsToRemove` option. This
            may include "autoScale2d", "autoscale",
            "editInChartStudio", "editinchartstudio",
            "hoverCompareCartesian", "hovercompare", "lasso",
            "lasso2d", "orbitRotation", "orbitrotation", "pan",
            "pan2d", "pan3d", "reset", "resetCameraDefault3d",
            "resetCameraLastSave3d", "resetGeo",
            "resetSankeyGroup", "resetScale2d", "resetViewMap",
            "resetViewMapbox", "resetViews", "resetcameradefault",
            "resetcameralastsave", "resetsankeygroup",
            "resetscale", "resetview", "resetviews", "select",
            "select2d", "sendDataToCloud", "senddatatocloud",
            "tableRotation", "tablerotation", "toImage",
            "toggleHover", "toggleSpikelines", "togglehover",
            "togglespikelines", "toimage", "zoom", "zoom2d",
            "zoom3d", "zoomIn2d", "zoomInGeo", "zoomInMap",
            "zoomInMapbox", "zoomOut2d", "zoomOutGeo",
            "zoomOutMap", "zoomOutMapbox", "zoomin", "zoomout".
        removesrc
            Sets the source reference on Chart Studio Cloud for
            `remove`.
        uirevision
            Controls persistence of user-driven changes related to
            the modebar, including `hovermode`, `dragmode`, and
            `showspikes` at both the root level and inside
            subplots. Defaults to `layout.uirevision`.
        Returns
        -------
        Modebar
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__modebar.Modebar.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        activecolor=None,
        add=None,
        addsrc=None,
        bgcolor=None,
        color=None,
        orientation=None,
        remove=None,
        removesrc=None,
        uirevision=None,
        **kwargs,
    ):
        """
        Construct a new Modebar object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Modebar`
        activecolor
            Sets the color of the active or hovered on icons in the
            modebar.
        add
            Determines which predefined modebar buttons to add.
            Please note that these buttons will only be shown if
            they are compatible with all trace types used in a
            graph. Similar to `config.modeBarButtonsToAdd` option.
            This may include "v1hovermode", "hoverclosest",
            "hovercompare", "togglehover", "togglespikelines",
            "drawline", "drawopenpath", "drawclosedpath",
            "drawcircle", "drawrect", "eraseshape".
        addsrc
            Sets the source reference on Chart Studio Cloud for
            `add`.
        bgcolor
            Sets the background color of the modebar.
        color
            Sets the color of the icons in the modebar.
        orientation
            Sets the orientation of the modebar.
        remove
            Determines which predefined modebar buttons to remove.
            Similar to `config.modeBarButtonsToRemove` option. This
            may include "autoScale2d", "autoscale",
            "editInChartStudio", "editinchartstudio",
            "hoverCompareCartesian", "hovercompare", "lasso",
            "lasso2d", "orbitRotation", "orbitrotation", "pan",
            "pan2d", "pan3d", "reset", "resetCameraDefault3d",
            "resetCameraLastSave3d", "resetGeo",
            "resetSankeyGroup", "resetScale2d", "resetViewMap",
            "resetViewMapbox", "resetViews", "resetcameradefault",
            "resetcameralastsave", "resetsankeygroup",
            "resetscale", "resetview", "resetviews", "select",
            "select2d", "sendDataToCloud", "senddatatocloud",
            "tableRotation", "tablerotation", "toImage",
            "toggleHover", "toggleSpikelines", "togglehover",
            "togglespikelines", "toimage", "zoom", "zoom2d",
            "zoom3d", "zoomIn2d", "zoomInGeo", "zoomInMap",
            "zoomInMapbox", "zoomOut2d", "zoomOutGeo",
            "zoomOutMap", "zoomOutMapbox", "zoomin", "zoomout".
        removesrc
            Sets the source reference on Chart Studio Cloud for
            `remove`.
        uirevision
            Controls persistence of user-driven changes related to
            the modebar, including `hovermode`, `dragmode`, and
            `showspikes` at both the root level and inside
            subplots. Defaults to `layout.uirevision`.
        Returns
        -------
        Modebar
        """
        super(Modebar, self).__init__("modebar")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.layout.Modebar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Modebar`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("activecolor", None)
        _v = activecolor if activecolor is not None else _v
        if _v is not None:
            self["activecolor"] = _v
        _v = arg.pop("add", None)
        _v = add if add is not None else _v
        if _v is not None:
            self["add"] = _v
        _v = arg.pop("addsrc", None)
        _v = addsrc if addsrc is not None else _v
        if _v is not None:
            self["addsrc"] = _v
        _v = arg.pop("bgcolor", None)
        _v = bgcolor if bgcolor is not None else _v
        if _v is not None:
            self["bgcolor"] = _v
        _v = arg.pop("color", None)
        _v = color if color is not None else _v
        if _v is not None:
            self["color"] = _v
        _v = arg.pop("orientation", None)
        _v = orientation if orientation is not None else _v
        if _v is not None:
            self["orientation"] = _v
        _v = arg.pop("remove", None)
        _v = remove if remove is not None else _v
        if _v is not None:
            self["remove"] = _v
        _v = arg.pop("removesrc", None)
        _v = removesrc if removesrc is not None else _v
        if _v is not None:
            self["removesrc"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_modebar.Modebar.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	32 
 | 
	packages/python/plotly/plotly/graph_objs/layout/_newshape.py 
 | 
	    def __init__(
        self,
        arg=None,
        drawdirection=None,
        fillcolor=None,
        fillrule=None,
        label=None,
        layer=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        line=None,
        name=None,
        opacity=None,
        showlegend=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Newshape object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Newshape`
        drawdirection
            When `dragmode` is set to "drawrect", "drawline" or
            "drawcircle" this limits the drag to be horizontal,
            vertical or diagonal. Using "diagonal" there is no
            limit e.g. in drawing lines in any direction. "ortho"
            limits the draw to be either horizontal or vertical.
            "horizontal" allows horizontal extend. "vertical"
            allows vertical extend.
        fillcolor
            Sets the color filling new shapes' interior. Please
            note that if using a fillcolor with alpha greater than
            half, drag inside the active shape starts moving the
            shape underneath, otherwise a new shape could be
            started over.
        fillrule
            Determines the path's interior. For more info please
            visit https://developer.mozilla.org/en-
            US/docs/Web/SVG/Attribute/fill-rule
        label
            :class:`plotly.graph_objects.layout.newshape.Label`
            instance or dict with compatible properties
        layer
            Specifies whether new shapes are drawn below gridlines
            ("below"), between gridlines and traces ("between") or
            above traces ("above").
        legend
            Sets the reference to a legend to show new shape in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for new shape. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.layout.newshape.Legendgrou
            ptitle` instance or dict with compatible properties
        legendrank
            Sets the legend rank for new shape. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            new shape.
        line
            :class:`plotly.graph_objects.layout.newshape.Line`
            instance or dict with compatible properties
        name
            Sets new shape name. The name appears as the legend
            item.
        opacity
            Sets the opacity of new shapes.
        showlegend
            Determines whether or not new shape is shown in the
            legend.
        visible
            Determines whether or not new shape is visible. If
            "legendonly", the shape is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Newshape
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__newshape.Newshape.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        drawdirection=None,
        fillcolor=None,
        fillrule=None,
        label=None,
        layer=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        line=None,
        name=None,
        opacity=None,
        showlegend=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Newshape object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Newshape`
        drawdirection
            When `dragmode` is set to "drawrect", "drawline" or
            "drawcircle" this limits the drag to be horizontal,
            vertical or diagonal. Using "diagonal" there is no
            limit e.g. in drawing lines in any direction. "ortho"
            limits the draw to be either horizontal or vertical.
            "horizontal" allows horizontal extend. "vertical"
            allows vertical extend.
        fillcolor
            Sets the color filling new shapes' interior. Please
            note that if using a fillcolor with alpha greater than
            half, drag inside the active shape starts moving the
            shape underneath, otherwise a new shape could be
            started over.
        fillrule
            Determines the path's interior. For more info please
            visit https://developer.mozilla.org/en-
            US/docs/Web/SVG/Attribute/fill-rule
        label
            :class:`plotly.graph_objects.layout.newshape.Label`
            instance or dict with compatible properties
        layer
            Specifies whether new shapes are drawn below gridlines
            ("below"), between gridlines and traces ("between") or
            above traces ("above").
        legend
            Sets the reference to a legend to show new shape in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for new shape. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.layout.newshape.Legendgrou
            ptitle` instance or dict with compatible properties
        legendrank
            Sets the legend rank for new shape. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            new shape.
        line
            :class:`plotly.graph_objects.layout.newshape.Line`
            instance or dict with compatible properties
        name
            Sets new shape name. The name appears as the legend
            item.
        opacity
            Sets the opacity of new shapes.
        showlegend
            Determines whether or not new shape is shown in the
            legend.
        visible
            Determines whether or not new shape is visible. If
            "legendonly", the shape is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Newshape
        """
        super(Newshape, self).__init__("newshape")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.layout.Newshape
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Newshape`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("drawdirection", None)
        _v = drawdirection if drawdirection is not None else _v
        if _v is not None:
            self["drawdirection"] = _v
        _v = arg.pop("fillcolor", None)
        _v = fillcolor if fillcolor is not None else _v
        if _v is not None:
            self["fillcolor"] = _v
        _v = arg.pop("fillrule", None)
        _v = fillrule if fillrule is not None else _v
        if _v is not None:
            self["fillrule"] = _v
        _v = arg.pop("label", None)
        _v = label if label is not None else _v
        if _v is not None:
            self["label"] = _v
        _v = arg.pop("layer", None)
        _v = layer if layer is not None else _v
        if _v is not None:
            self["layer"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgroup", None)
        _v = legendgroup if legendgroup is not None else _v
        if _v is not None:
            self["legendgroup"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("line", None)
        _v = line if line is not None else _v
        if _v is not None:
            self["line"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("showlegend", None)
        _v = showlegend if showlegend is not None else _v
        if _v is not None:
            self["showlegend"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_newshape.Newshape.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	33 
 | 
	packages/python/plotly/plotly/graph_objs/_parcats.py 
 | 
	    def __init__(
        self,
        arg=None,
        arrangement=None,
        bundlecolors=None,
        counts=None,
        countssrc=None,
        dimensions=None,
        dimensiondefaults=None,
        domain=None,
        hoverinfo=None,
        hoveron=None,
        hovertemplate=None,
        labelfont=None,
        legendgrouptitle=None,
        legendwidth=None,
        line=None,
        meta=None,
        metasrc=None,
        name=None,
        sortpaths=None,
        stream=None,
        tickfont=None,
        uid=None,
        uirevision=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Parcats object
        Parallel categories diagram for multidimensional categorical
        data.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Parcats`
        arrangement
            Sets the drag interaction mode for categories and
            dimensions. If `perpendicular`, the categories can only
            move along a line perpendicular to the paths. If
            `freeform`, the categories can freely move on the
            plane. If `fixed`, the categories and dimensions are
            stationary.
        bundlecolors
            Sort paths so that like colors are bundled together
            within each category.
        counts
            The number of observations represented by each state.
            Defaults to 1 so that each state represents one
            observation
        countssrc
            Sets the source reference on Chart Studio Cloud for
            `counts`.
        dimensions
            The dimensions (variables) of the parallel categories
            diagram.
        dimensiondefaults
            When used in a template (as
            layout.template.data.parcats.dimensiondefaults), sets
            the default property values to use for elements of
            parcats.dimensions
        domain
            :class:`plotly.graph_objects.parcats.Domain` instance
            or dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoveron
            Sets the hover interaction mode for the parcats
            diagram. If `category`, hover interaction take place
            per category. If `color`, hover interactions take place
            per color per category. If `dimension`, hover
            interactions take place across all categories per
            dimension.
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available.  This value here applies when hovering
            over dimensions. Note that `*categorycount`,
            "colorcount" and "bandcolorcount" are only available
            when `hoveron` contains the "color" flagFinally, the
            template string has access to variables `count`,
            `probability`, `category`, `categorycount`,
            `colorcount` and `bandcolorcount`. Anything contained
            in tag `<extra>` is displayed in the secondary box, for
            example "<extra>{fullData.name}</extra>". To hide the
            secondary box completely, use an empty tag
            `<extra></extra>`.
        labelfont
            Sets the font for the `dimension` labels.
        legendgrouptitle
            :class:`plotly.graph_objects.parcats.Legendgrouptitle`
            instance or dict with compatible properties
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        line
            :class:`plotly.graph_objects.parcats.Line` instance or
            dict with compatible properties
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        sortpaths
            Sets the path sorting algorithm. If `forward`, sort
            paths based on dimension categories from left to right.
            If `backward`, sort paths based on dimensions
            categories from right to left.
        stream
            :class:`plotly.graph_objects.parcats.Stream` instance
            or dict with compatible properties
        tickfont
            Sets the font for the `category` labels.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Parcats
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__parcats.Parcats.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        arrangement=None,
        bundlecolors=None,
        counts=None,
        countssrc=None,
        dimensions=None,
        dimensiondefaults=None,
        domain=None,
        hoverinfo=None,
        hoveron=None,
        hovertemplate=None,
        labelfont=None,
        legendgrouptitle=None,
        legendwidth=None,
        line=None,
        meta=None,
        metasrc=None,
        name=None,
        sortpaths=None,
        stream=None,
        tickfont=None,
        uid=None,
        uirevision=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Parcats object
        Parallel categories diagram for multidimensional categorical
        data.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Parcats`
        arrangement
            Sets the drag interaction mode for categories and
            dimensions. If `perpendicular`, the categories can only
            move along a line perpendicular to the paths. If
            `freeform`, the categories can freely move on the
            plane. If `fixed`, the categories and dimensions are
            stationary.
        bundlecolors
            Sort paths so that like colors are bundled together
            within each category.
        counts
            The number of observations represented by each state.
            Defaults to 1 so that each state represents one
            observation
        countssrc
            Sets the source reference on Chart Studio Cloud for
            `counts`.
        dimensions
            The dimensions (variables) of the parallel categories
            diagram.
        dimensiondefaults
            When used in a template (as
            layout.template.data.parcats.dimensiondefaults), sets
            the default property values to use for elements of
            parcats.dimensions
        domain
            :class:`plotly.graph_objects.parcats.Domain` instance
            or dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoveron
            Sets the hover interaction mode for the parcats
            diagram. If `category`, hover interaction take place
            per category. If `color`, hover interactions take place
            per color per category. If `dimension`, hover
            interactions take place across all categories per
            dimension.
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available.  This value here applies when hovering
            over dimensions. Note that `*categorycount`,
            "colorcount" and "bandcolorcount" are only available
            when `hoveron` contains the "color" flagFinally, the
            template string has access to variables `count`,
            `probability`, `category`, `categorycount`,
            `colorcount` and `bandcolorcount`. Anything contained
            in tag `<extra>` is displayed in the secondary box, for
            example "<extra>{fullData.name}</extra>". To hide the
            secondary box completely, use an empty tag
            `<extra></extra>`.
        labelfont
            Sets the font for the `dimension` labels.
        legendgrouptitle
            :class:`plotly.graph_objects.parcats.Legendgrouptitle`
            instance or dict with compatible properties
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        line
            :class:`plotly.graph_objects.parcats.Line` instance or
            dict with compatible properties
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        sortpaths
            Sets the path sorting algorithm. If `forward`, sort
            paths based on dimension categories from left to right.
            If `backward`, sort paths based on dimensions
            categories from right to left.
        stream
            :class:`plotly.graph_objects.parcats.Stream` instance
            or dict with compatible properties
        tickfont
            Sets the font for the `category` labels.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Parcats
        """
        super(Parcats, self).__init__("parcats")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Parcats
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Parcats`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("arrangement", None)
        _v = arrangement if arrangement is not None else _v
        if _v is not None:
            self["arrangement"] = _v
        _v = arg.pop("bundlecolors", None)
        _v = bundlecolors if bundlecolors is not None else _v
        if _v is not None:
            self["bundlecolors"] = _v
        _v = arg.pop("counts", None)
        _v = counts if counts is not None else _v
        if _v is not None:
            self["counts"] = _v
        _v = arg.pop("countssrc", None)
        _v = countssrc if countssrc is not None else _v
        if _v is not None:
            self["countssrc"] = _v
        _v = arg.pop("dimensions", None)
        _v = dimensions if dimensions is not None else _v
        if _v is not None:
            self["dimensions"] = _v
        _v = arg.pop("dimensiondefaults", None)
        _v = dimensiondefaults if dimensiondefaults is not None else _v
        if _v is not None:
            self["dimensiondefaults"] = _v
        _v = arg.pop("domain", None)
        _v = domain if domain is not None else _v
        if _v is not None:
            self["domain"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoveron", None)
        _v = hoveron if hoveron is not None else _v
        if _v is not None:
            self["hoveron"] = _v
        _v = arg.pop("hovertemplate", None)
        _v = hovertemplate if hovertemplate is not None else _v
        if _v is not None:
            self["hovertemplate"] = _v
        _v = arg.pop("labelfont", None)
        _v = labelfont if labelfont is not None else _v
        if _v is not None:
            self["labelfont"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("line", None)
        _v = line if line is not None else _v
        if _v is not None:
            self["line"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("sortpaths", None)
        _v = sortpaths if sortpaths is not None else _v
        if _v is not None:
            self["sortpaths"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("tickfont", None)
        _v = tickfont if tickfont is not None else _v
        if _v is not None:
            self["tickfont"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "parcats"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_parcats.Parcats.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	38 
 | 
	packages/python/plotly/plotly/figure_factory/_quiver.py 
 | 
	def create_quiver(
    x, y, u, v, scale=0.1, arrow_scale=0.3, angle=math.pi / 9, scaleratio=None, **kwargs
):
    """
    Returns data for a quiver plot.
    :param (list|ndarray) x: x coordinates of the arrow locations
    :param (list|ndarray) y: y coordinates of the arrow locations
    :param (list|ndarray) u: x components of the arrow vectors
    :param (list|ndarray) v: y components of the arrow vectors
    :param (float in [0,1]) scale: scales size of the arrows(ideally to
        avoid overlap). Default = .1
    :param (float in [0,1]) arrow_scale: value multiplied to length of barb
        to get length of arrowhead. Default = .3
    :param (angle in radians) angle: angle of arrowhead. Default = pi/9
    :param (positive float) scaleratio: the ratio between the scale of the y-axis
        and the scale of the x-axis (scale_y / scale_x). Default = None, the
        scale ratio is not fixed.
    :param kwargs: kwargs passed through plotly.graph_objs.Scatter
        for more information on valid kwargs call
        help(plotly.graph_objs.Scatter)
    :rtype (dict): returns a representation of quiver figure.
    Example 1: Trivial Quiver
    >>> from plotly.figure_factory import create_quiver
    >>> import math
    >>> # 1 Arrow from (0,0) to (1,1)
    >>> fig = create_quiver(x=[0], y=[0], u=[1], v=[1], scale=1)
    >>> fig.show()
    Example 2: Quiver plot using meshgrid
    >>> from plotly.figure_factory import create_quiver
    >>> import numpy as np
    >>> import math
    >>> # Add data
    >>> x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))
    >>> u = np.cos(x)*y
    >>> v = np.sin(x)*y
    >>> #Create quiver
    >>> fig = create_quiver(x, y, u, v)
    >>> fig.show()
    Example 3: Styling the quiver plot
    >>> from plotly.figure_factory import create_quiver
    >>> import numpy as np
    >>> import math
    >>> # Add data
    >>> x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),
    ...                    np.arange(-math.pi, math.pi, .5))
    >>> u = np.cos(x)*y
    >>> v = np.sin(x)*y
    >>> # Create quiver
    >>> fig = create_quiver(x, y, u, v, scale=.2, arrow_scale=.3, angle=math.pi/6,
    ...                     name='Wind Velocity', line=dict(width=1))
    >>> # Add title to layout
    >>> fig.update_layout(title='Quiver Plot') # doctest: +SKIP
    >>> fig.show()
    Example 4: Forcing a fix scale ratio to maintain the arrow length
    >>> from plotly.figure_factory import create_quiver
    >>> import numpy as np
    >>> # Add data
    >>> x,y = np.meshgrid(np.arange(0.5, 3.5, .5), np.arange(0.5, 4.5, .5))
    >>> u = x
    >>> v = y
    >>> angle = np.arctan(v / u)
    >>> norm = 0.25
    >>> u = norm * np.cos(angle)
    >>> v = norm * np.sin(angle)
    >>> # Create quiver with a fix scale ratio
    >>> fig = create_quiver(x, y, u, v, scale = 1, scaleratio = 0.5)
    >>> fig.show()
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__quiver.create_quiver.txt 
 | 
	def create_quiver(
    x, y, u, v, scale=0.1, arrow_scale=0.3, angle=math.pi / 9, scaleratio=None, **kwargs
):
    """
    Returns data for a quiver plot.
    :param (list|ndarray) x: x coordinates of the arrow locations
    :param (list|ndarray) y: y coordinates of the arrow locations
    :param (list|ndarray) u: x components of the arrow vectors
    :param (list|ndarray) v: y components of the arrow vectors
    :param (float in [0,1]) scale: scales size of the arrows(ideally to
        avoid overlap). Default = .1
    :param (float in [0,1]) arrow_scale: value multiplied to length of barb
        to get length of arrowhead. Default = .3
    :param (angle in radians) angle: angle of arrowhead. Default = pi/9
    :param (positive float) scaleratio: the ratio between the scale of the y-axis
        and the scale of the x-axis (scale_y / scale_x). Default = None, the
        scale ratio is not fixed.
    :param kwargs: kwargs passed through plotly.graph_objs.Scatter
        for more information on valid kwargs call
        help(plotly.graph_objs.Scatter)
    :rtype (dict): returns a representation of quiver figure.
    Example 1: Trivial Quiver
    >>> from plotly.figure_factory import create_quiver
    >>> import math
    >>> # 1 Arrow from (0,0) to (1,1)
    >>> fig = create_quiver(x=[0], y=[0], u=[1], v=[1], scale=1)
    >>> fig.show()
    Example 2: Quiver plot using meshgrid
    >>> from plotly.figure_factory import create_quiver
    >>> import numpy as np
    >>> import math
    >>> # Add data
    >>> x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))
    >>> u = np.cos(x)*y
    >>> v = np.sin(x)*y
    >>> #Create quiver
    >>> fig = create_quiver(x, y, u, v)
    >>> fig.show()
    Example 3: Styling the quiver plot
    >>> from plotly.figure_factory import create_quiver
    >>> import numpy as np
    >>> import math
    >>> # Add data
    >>> x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),
    ...                    np.arange(-math.pi, math.pi, .5))
    >>> u = np.cos(x)*y
    >>> v = np.sin(x)*y
    >>> # Create quiver
    >>> fig = create_quiver(x, y, u, v, scale=.2, arrow_scale=.3, angle=math.pi/6,
    ...                     name='Wind Velocity', line=dict(width=1))
    >>> # Add title to layout
    >>> fig.update_layout(title='Quiver Plot') # doctest: +SKIP
    >>> fig.show()
    Example 4: Forcing a fix scale ratio to maintain the arrow length
    >>> from plotly.figure_factory import create_quiver
    >>> import numpy as np
    >>> # Add data
    >>> x,y = np.meshgrid(np.arange(0.5, 3.5, .5), np.arange(0.5, 4.5, .5))
    >>> u = x
    >>> v = y
    >>> angle = np.arctan(v / u)
    >>> norm = 0.25
    >>> u = norm * np.cos(angle)
    >>> v = norm * np.sin(angle)
    >>> # Create quiver with a fix scale ratio
    >>> fig = create_quiver(x, y, u, v, scale = 1, scaleratio = 0.5)
    >>> fig.show()
    """
    utils.validate_equal_length(x, y, u, v)
    utils.validate_positive_scalars(arrow_scale=arrow_scale, scale=scale)
    if scaleratio is None:
        quiver_obj = _Quiver(x, y, u, v, scale, arrow_scale, angle)
    else:
        quiver_obj = _Quiver(x, y, u, v, scale, arrow_scale, angle, scaleratio)
    barb_x, barb_y = quiver_obj.get_barbs()
    arrow_x, arrow_y = quiver_obj.get_quiver_arrows()
    quiver_plot = graph_objs.Scatter(
        x=barb_x + arrow_x, y=barb_y + arrow_y, mode="lines", **kwargs
    )
    data = [quiver_plot]
    if scaleratio is None:
        layout = graph_objs.Layout(hovermode="closest")
    else:
        layout = graph_objs.Layout(
            hovermode="closest", yaxis=dict(scaleratio=scaleratio, scaleanchor="x")
        )
    return graph_objs.Figure(data=data, layout=layout)
 
 | 
	_quiver.create_quiver 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	39 
 | 
	packages/python/plotly/plotly/graph_objs/_sankey.py 
 | 
	    def __init__(
        self,
        arg=None,
        arrangement=None,
        customdata=None,
        customdatasrc=None,
        domain=None,
        hoverinfo=None,
        hoverlabel=None,
        ids=None,
        idssrc=None,
        legend=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        link=None,
        meta=None,
        metasrc=None,
        name=None,
        node=None,
        orientation=None,
        selectedpoints=None,
        stream=None,
        textfont=None,
        uid=None,
        uirevision=None,
        valueformat=None,
        valuesuffix=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Sankey object
        Sankey plots for network flow data analysis. The nodes are
        specified in `nodes` and the links between sources and targets
        in `links`. The colors are set in `nodes[i].color` and
        `links[i].color`, otherwise defaults are used.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Sankey`
        arrangement
            If value is `snap` (the default), the node arrangement
            is assisted by automatic snapping of elements to
            preserve space between nodes specified via `nodepad`.
            If value is `perpendicular`, the nodes can only move
            along a line perpendicular to the flow. If value is
            `freeform`, the nodes can freely move on the plane. If
            value is `fixed`, the nodes are stationary.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        domain
            :class:`plotly.graph_objects.sankey.Domain` instance or
            dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired. Note that this attribute is
            superseded by `node.hoverinfo` and `node.hoverinfo` for
            nodes and links respectively.
        hoverlabel
            :class:`plotly.graph_objects.sankey.Hoverlabel`
            instance or dict with compatible properties
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgrouptitle
            :class:`plotly.graph_objects.sankey.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        link
            The links of the Sankey plot.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        node
            The nodes of the Sankey plot.
        orientation
            Sets the orientation of the Sankey diagram.
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        stream
            :class:`plotly.graph_objects.sankey.Stream` instance or
            dict with compatible properties
        textfont
            Sets the font for node labels
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        valueformat
            Sets the value formatting rule using d3 formatting
            mini-languages which are very similar to those in
            Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
        valuesuffix
            Adds a unit to follow the value in the hover tooltip.
            Add a space if a separation is necessary from the
            value.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Sankey
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__sankey.Sankey.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        arrangement=None,
        customdata=None,
        customdatasrc=None,
        domain=None,
        hoverinfo=None,
        hoverlabel=None,
        ids=None,
        idssrc=None,
        legend=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        link=None,
        meta=None,
        metasrc=None,
        name=None,
        node=None,
        orientation=None,
        selectedpoints=None,
        stream=None,
        textfont=None,
        uid=None,
        uirevision=None,
        valueformat=None,
        valuesuffix=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Sankey object
        Sankey plots for network flow data analysis. The nodes are
        specified in `nodes` and the links between sources and targets
        in `links`. The colors are set in `nodes[i].color` and
        `links[i].color`, otherwise defaults are used.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Sankey`
        arrangement
            If value is `snap` (the default), the node arrangement
            is assisted by automatic snapping of elements to
            preserve space between nodes specified via `nodepad`.
            If value is `perpendicular`, the nodes can only move
            along a line perpendicular to the flow. If value is
            `freeform`, the nodes can freely move on the plane. If
            value is `fixed`, the nodes are stationary.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        domain
            :class:`plotly.graph_objects.sankey.Domain` instance or
            dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired. Note that this attribute is
            superseded by `node.hoverinfo` and `node.hoverinfo` for
            nodes and links respectively.
        hoverlabel
            :class:`plotly.graph_objects.sankey.Hoverlabel`
            instance or dict with compatible properties
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgrouptitle
            :class:`plotly.graph_objects.sankey.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        link
            The links of the Sankey plot.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        node
            The nodes of the Sankey plot.
        orientation
            Sets the orientation of the Sankey diagram.
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        stream
            :class:`plotly.graph_objects.sankey.Stream` instance or
            dict with compatible properties
        textfont
            Sets the font for node labels
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        valueformat
            Sets the value formatting rule using d3 formatting
            mini-languages which are very similar to those in
            Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
        valuesuffix
            Adds a unit to follow the value in the hover tooltip.
            Add a space if a separation is necessary from the
            value.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Sankey
        """
        super(Sankey, self).__init__("sankey")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Sankey
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Sankey`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("arrangement", None)
        _v = arrangement if arrangement is not None else _v
        if _v is not None:
            self["arrangement"] = _v
        _v = arg.pop("customdata", None)
        _v = customdata if customdata is not None else _v
        if _v is not None:
            self["customdata"] = _v
        _v = arg.pop("customdatasrc", None)
        _v = customdatasrc if customdatasrc is not None else _v
        if _v is not None:
            self["customdatasrc"] = _v
        _v = arg.pop("domain", None)
        _v = domain if domain is not None else _v
        if _v is not None:
            self["domain"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoverlabel", None)
        _v = hoverlabel if hoverlabel is not None else _v
        if _v is not None:
            self["hoverlabel"] = _v
        _v = arg.pop("ids", None)
        _v = ids if ids is not None else _v
        if _v is not None:
            self["ids"] = _v
        _v = arg.pop("idssrc", None)
        _v = idssrc if idssrc is not None else _v
        if _v is not None:
            self["idssrc"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("link", None)
        _v = link if link is not None else _v
        if _v is not None:
            self["link"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("node", None)
        _v = node if node is not None else _v
        if _v is not None:
            self["node"] = _v
        _v = arg.pop("orientation", None)
        _v = orientation if orientation is not None else _v
        if _v is not None:
            self["orientation"] = _v
        _v = arg.pop("selectedpoints", None)
        _v = selectedpoints if selectedpoints is not None else _v
        if _v is not None:
            self["selectedpoints"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("textfont", None)
        _v = textfont if textfont is not None else _v
        if _v is not None:
            self["textfont"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("valueformat", None)
        _v = valueformat if valueformat is not None else _v
        if _v is not None:
            self["valueformat"] = _v
        _v = arg.pop("valuesuffix", None)
        _v = valuesuffix if valuesuffix is not None else _v
        if _v is not None:
            self["valuesuffix"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "sankey"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_sankey.Sankey.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	45 
 | 
	packages/python/plotly/plotly/figure_factory/_scatterplot.py 
 | 
	def create_scatterplotmatrix(
    df,
    index=None,
    endpts=None,
    diag="scatter",
    height=500,
    width=500,
    size=6,
    title="Scatterplot Matrix",
    colormap=None,
    colormap_type="cat",
    dataframe=None,
    headers=None,
    index_vals=None,
    **kwargs,
):
    """
    Returns data for a scatterplot matrix;
    **deprecated**,
    use instead the plotly.graph_objects trace
    :class:`plotly.graph_objects.Splom`.
    :param (array) df: array of the data with column headers
    :param (str) index: name of the index column in data array
    :param (list|tuple) endpts: takes an increasing sequece of numbers
        that defines intervals on the real line. They are used to group
        the entries in an index of numbers into their corresponding
        interval and therefore can be treated as categorical data
    :param (str) diag: sets the chart type for the main diagonal plots.
        The options are 'scatter', 'histogram' and 'box'.
    :param (int|float) height: sets the height of the chart
    :param (int|float) width: sets the width of the chart
    :param (float) size: sets the marker size (in px)
    :param (str) title: the title label of the scatterplot matrix
    :param (str|tuple|list|dict) colormap: either a plotly scale name,
        an rgb or hex color, a color tuple, a list of colors or a
        dictionary. An rgb color is of the form 'rgb(x, y, z)' where
        x, y and z belong to the interval [0, 255] and a color tuple is a
        tuple of the form (a, b, c) where a, b and c belong to [0, 1].
        If colormap is a list, it must contain valid color types as its
        members.
        If colormap is a dictionary, all the string entries in
        the index column must be a key in colormap. In this case, the
        colormap_type is forced to 'cat' or categorical
    :param (str) colormap_type: determines how colormap is interpreted.
        Valid choices are 'seq' (sequential) and 'cat' (categorical). If
        'seq' is selected, only the first two colors in colormap will be
        considered (when colormap is a list) and the index values will be
        linearly interpolated between those two colors. This option is
        forced if all index values are numeric.
        If 'cat' is selected, a color from colormap will be assigned to
        each category from index, including the intervals if endpts is
        being used
    :param (dict) **kwargs: a dictionary of scatterplot arguments
        The only forbidden parameters are 'size', 'color' and
        'colorscale' in 'marker'
    Example 1: Vanilla Scatterplot Matrix
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe
    >>> df = pd.DataFrame(np.random.randn(10, 2),
    ...                 columns=['Column 1', 'Column 2'])
    >>> # Create scatterplot matrix
    >>> fig = create_scatterplotmatrix(df)
    >>> fig.show()
    Example 2: Indexing a Column
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with index
    >>> df = pd.DataFrame(np.random.randn(10, 2),
    ...                    columns=['A', 'B'])
    >>> # Add another column of strings to the dataframe
    >>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
    ...                          'grape', 'pear', 'pear', 'apple', 'pear'])
    >>> # Create scatterplot matrix
    >>> fig = create_scatterplotmatrix(df, index='Fruit', size=10)
    >>> fig.show()
    Example 3: Styling the Diagonal Subplots
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with index
    >>> df = pd.DataFrame(np.random.randn(10, 4),
    ...                    columns=['A', 'B', 'C', 'D'])
    >>> # Add another column of strings to the dataframe
    >>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
    ...                          'grape', 'pear', 'pear', 'apple', 'pear'])
    >>> # Create scatterplot matrix
    >>> fig = create_scatterplotmatrix(df, diag='box', index='Fruit', height=1000,
    ...                                width=1000)
    >>> fig.show()
    Example 4: Use a Theme to Style the Subplots
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with random data
    >>> df = pd.DataFrame(np.random.randn(100, 3),
    ...                    columns=['A', 'B', 'C'])
    >>> # Create scatterplot matrix using a built-in
    >>> # Plotly palette scale and indexing column 'A'
    >>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
    ...                                colormap='Blues', height=800, width=800)
    >>> fig.show()
    Example 5: Example 4 with Interval Factoring
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with random data
    >>> df = pd.DataFrame(np.random.randn(100, 3),
    ...                    columns=['A', 'B', 'C'])
    >>> # Create scatterplot matrix using a list of 2 rgb tuples
    >>> # and endpoints at -1, 0 and 1
    >>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
    ...                                colormap=['rgb(140, 255, 50)',
    ...                                          'rgb(170, 60, 115)', '#6c4774',
    ...                                          (0.5, 0.1, 0.8)],
    ...                                endpts=[-1, 0, 1], height=800, width=800)
    >>> fig.show()
    Example 6: Using the colormap as a Dictionary
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> import random
    >>> # Create dataframe with random data
    >>> df = pd.DataFrame(np.random.randn(100, 3),
    ...                    columns=['Column A',
    ...                             'Column B',
    ...                             'Column C'])
    >>> # Add new color column to dataframe
    >>> new_column = []
    >>> strange_colors = ['turquoise', 'limegreen', 'goldenrod']
    >>> for j in range(100):
    ...     new_column.append(random.choice(strange_colors))
    >>> df['Colors'] = pd.Series(new_column, index=df.index)
    >>> # Create scatterplot matrix using a dictionary of hex color values
    >>> # which correspond to actual color names in 'Colors' column
    >>> fig = create_scatterplotmatrix(
    ...     df, diag='box', index='Colors',
    ...     colormap= dict(
    ...         turquoise = '#00F5FF',
    ...         limegreen = '#32CD32',
    ...         goldenrod = '#DAA520'
    ...     ),
    ...     colormap_type='cat',
    ...     height=800, width=800
    ... )
    >>> fig.show()
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__scatterplot.create_scatterplotmatrix.txt 
 | 
	def create_scatterplotmatrix(
    df,
    index=None,
    endpts=None,
    diag="scatter",
    height=500,
    width=500,
    size=6,
    title="Scatterplot Matrix",
    colormap=None,
    colormap_type="cat",
    dataframe=None,
    headers=None,
    index_vals=None,
    **kwargs,
):
    """
    Returns data for a scatterplot matrix;
    **deprecated**,
    use instead the plotly.graph_objects trace
    :class:`plotly.graph_objects.Splom`.
    :param (array) df: array of the data with column headers
    :param (str) index: name of the index column in data array
    :param (list|tuple) endpts: takes an increasing sequece of numbers
        that defines intervals on the real line. They are used to group
        the entries in an index of numbers into their corresponding
        interval and therefore can be treated as categorical data
    :param (str) diag: sets the chart type for the main diagonal plots.
        The options are 'scatter', 'histogram' and 'box'.
    :param (int|float) height: sets the height of the chart
    :param (int|float) width: sets the width of the chart
    :param (float) size: sets the marker size (in px)
    :param (str) title: the title label of the scatterplot matrix
    :param (str|tuple|list|dict) colormap: either a plotly scale name,
        an rgb or hex color, a color tuple, a list of colors or a
        dictionary. An rgb color is of the form 'rgb(x, y, z)' where
        x, y and z belong to the interval [0, 255] and a color tuple is a
        tuple of the form (a, b, c) where a, b and c belong to [0, 1].
        If colormap is a list, it must contain valid color types as its
        members.
        If colormap is a dictionary, all the string entries in
        the index column must be a key in colormap. In this case, the
        colormap_type is forced to 'cat' or categorical
    :param (str) colormap_type: determines how colormap is interpreted.
        Valid choices are 'seq' (sequential) and 'cat' (categorical). If
        'seq' is selected, only the first two colors in colormap will be
        considered (when colormap is a list) and the index values will be
        linearly interpolated between those two colors. This option is
        forced if all index values are numeric.
        If 'cat' is selected, a color from colormap will be assigned to
        each category from index, including the intervals if endpts is
        being used
    :param (dict) **kwargs: a dictionary of scatterplot arguments
        The only forbidden parameters are 'size', 'color' and
        'colorscale' in 'marker'
    Example 1: Vanilla Scatterplot Matrix
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe
    >>> df = pd.DataFrame(np.random.randn(10, 2),
    ...                 columns=['Column 1', 'Column 2'])
    >>> # Create scatterplot matrix
    >>> fig = create_scatterplotmatrix(df)
    >>> fig.show()
    Example 2: Indexing a Column
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with index
    >>> df = pd.DataFrame(np.random.randn(10, 2),
    ...                    columns=['A', 'B'])
    >>> # Add another column of strings to the dataframe
    >>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
    ...                          'grape', 'pear', 'pear', 'apple', 'pear'])
    >>> # Create scatterplot matrix
    >>> fig = create_scatterplotmatrix(df, index='Fruit', size=10)
    >>> fig.show()
    Example 3: Styling the Diagonal Subplots
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with index
    >>> df = pd.DataFrame(np.random.randn(10, 4),
    ...                    columns=['A', 'B', 'C', 'D'])
    >>> # Add another column of strings to the dataframe
    >>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
    ...                          'grape', 'pear', 'pear', 'apple', 'pear'])
    >>> # Create scatterplot matrix
    >>> fig = create_scatterplotmatrix(df, diag='box', index='Fruit', height=1000,
    ...                                width=1000)
    >>> fig.show()
    Example 4: Use a Theme to Style the Subplots
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with random data
    >>> df = pd.DataFrame(np.random.randn(100, 3),
    ...                    columns=['A', 'B', 'C'])
    >>> # Create scatterplot matrix using a built-in
    >>> # Plotly palette scale and indexing column 'A'
    >>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
    ...                                colormap='Blues', height=800, width=800)
    >>> fig.show()
    Example 5: Example 4 with Interval Factoring
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> # Create dataframe with random data
    >>> df = pd.DataFrame(np.random.randn(100, 3),
    ...                    columns=['A', 'B', 'C'])
    >>> # Create scatterplot matrix using a list of 2 rgb tuples
    >>> # and endpoints at -1, 0 and 1
    >>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
    ...                                colormap=['rgb(140, 255, 50)',
    ...                                          'rgb(170, 60, 115)', '#6c4774',
    ...                                          (0.5, 0.1, 0.8)],
    ...                                endpts=[-1, 0, 1], height=800, width=800)
    >>> fig.show()
    Example 6: Using the colormap as a Dictionary
    >>> from plotly.graph_objs import graph_objs
    >>> from plotly.figure_factory import create_scatterplotmatrix
    >>> import numpy as np
    >>> import pandas as pd
    >>> import random
    >>> # Create dataframe with random data
    >>> df = pd.DataFrame(np.random.randn(100, 3),
    ...                    columns=['Column A',
    ...                             'Column B',
    ...                             'Column C'])
    >>> # Add new color column to dataframe
    >>> new_column = []
    >>> strange_colors = ['turquoise', 'limegreen', 'goldenrod']
    >>> for j in range(100):
    ...     new_column.append(random.choice(strange_colors))
    >>> df['Colors'] = pd.Series(new_column, index=df.index)
    >>> # Create scatterplot matrix using a dictionary of hex color values
    >>> # which correspond to actual color names in 'Colors' column
    >>> fig = create_scatterplotmatrix(
    ...     df, diag='box', index='Colors',
    ...     colormap= dict(
    ...         turquoise = '#00F5FF',
    ...         limegreen = '#32CD32',
    ...         goldenrod = '#DAA520'
    ...     ),
    ...     colormap_type='cat',
    ...     height=800, width=800
    ... )
    >>> fig.show()
    """
    # TODO: protected until #282
    if dataframe is None:
        dataframe = []
    if headers is None:
        headers = []
    if index_vals is None:
        index_vals = []
    validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs)
    # Validate colormap
    if isinstance(colormap, dict):
        colormap = clrs.validate_colors_dict(colormap, "rgb")
    elif isinstance(colormap, str) and "rgb" not in colormap and "#" not in colormap:
        if colormap not in clrs.PLOTLY_SCALES.keys():
            raise exceptions.PlotlyError(
                "If 'colormap' is a string, it must be the name "
                "of a Plotly Colorscale. The available colorscale "
                "names are {}".format(clrs.PLOTLY_SCALES.keys())
            )
        else:
            # TODO change below to allow the correct Plotly colorscale
            colormap = clrs.colorscale_to_colors(clrs.PLOTLY_SCALES[colormap])
            # keep only first and last item - fix later
            colormap = [colormap[0]] + [colormap[-1]]
        colormap = clrs.validate_colors(colormap, "rgb")
    else:
        colormap = clrs.validate_colors(colormap, "rgb")
    if not index:
        for name in df:
            headers.append(name)
        for name in headers:
            dataframe.append(df[name].values.tolist())
        # Check for same data-type in df columns
        utils.validate_dataframe(dataframe)
        figure = scatterplot(
            dataframe, headers, diag, size, height, width, title, **kwargs
        )
        return figure
    else:
        # Validate index selection
        if index not in df:
            raise exceptions.PlotlyError(
                "Make sure you set the index "
                "input variable to one of the "
                "column names of your "
                "dataframe."
            )
        index_vals = df[index].values.tolist()
        for name in df:
            if name != index:
                headers.append(name)
        for name in headers:
            dataframe.append(df[name].values.tolist())
        # check for same data-type in each df column
        utils.validate_dataframe(dataframe)
        utils.validate_index(index_vals)
        # check if all colormap keys are in the index
        # if colormap is a dictionary
        if isinstance(colormap, dict):
            for key in colormap:
                if not all(index in colormap for index in index_vals):
                    raise exceptions.PlotlyError(
                        "If colormap is a "
                        "dictionary, all the "
                        "names in the index "
                        "must be keys."
                    )
            figure = scatterplot_dict(
                dataframe,
                headers,
                diag,
                size,
                height,
                width,
                title,
                index,
                index_vals,
                endpts,
                colormap,
                colormap_type,
                **kwargs,
            )
            return figure
        else:
            figure = scatterplot_theme(
                dataframe,
                headers,
                diag,
                size,
                height,
                width,
                title,
                index,
                index_vals,
                endpts,
                colormap,
                colormap_type,
                **kwargs,
            )
            return figure
 
 | 
	_scatterplot.create_scatterplotmatrix 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	48 
 | 
	packages/python/plotly/plotly/graph_objs/layout/_selection.py 
 | 
	    def __init__(
        self,
        arg=None,
        line=None,
        name=None,
        opacity=None,
        path=None,
        templateitemname=None,
        type=None,
        x0=None,
        x1=None,
        xref=None,
        y0=None,
        y1=None,
        yref=None,
        **kwargs,
    ):
        """
        Construct a new Selection object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Selection`
        line
            :class:`plotly.graph_objects.layout.selection.Line`
            instance or dict with compatible properties
        name
            When used in a template, named items are created in the
            output figure in addition to any items the figure
            already has in this array. You can modify these items
            in the output figure by making your own item with
            `templateitemname` matching this `name` alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). Has no effect outside of a
            template.
        opacity
            Sets the opacity of the selection.
        path
            For `type` "path" - a valid SVG path similar to
            `shapes.path` in data coordinates. Allowed segments
            are: M, L and Z.
        templateitemname
            Used to refer to a named item in this array in the
            template. Named items from the template will be created
            even without a matching item in the input figure, but
            you can modify one by making an item with
            `templateitemname` matching its `name`, alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). If there is no template or no
            matching item, this item will be hidden unless you
            explicitly show it with `visible: true`.
        type
            Specifies the selection type to be drawn. If "rect", a
            rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
            (`x1`,`y1`) and (`x0`,`y1`). If "path", draw a custom
            SVG path using `path`.
        x0
            Sets the selection's starting x position.
        x1
            Sets the selection's end x position.
        xref
            Sets the selection's x coordinate axis. If set to a x
            axis id (e.g. "x" or "x2"), the `x` position refers to
            a x coordinate. If set to "paper", the `x` position
            refers to the distance from the left of the plotting
            area in normalized coordinates where 0 (1) corresponds
            to the left (right). If set to a x axis ID followed by
            "domain" (separated by a space), the position behaves
            like for "paper", but refers to the distance in
            fractions of the domain length from the left of the
            domain of that axis: e.g., *x2 domain* refers to the
            domain of the second x  axis and a x position of 0.5
            refers to the point between the left and the right of
            the domain of the second x axis.
        y0
            Sets the selection's starting y position.
        y1
            Sets the selection's end y position.
        yref
            Sets the selection's x coordinate axis. If set to a y
            axis id (e.g. "y" or "y2"), the `y` position refers to
            a y coordinate. If set to "paper", the `y` position
            refers to the distance from the bottom of the plotting
            area in normalized coordinates where 0 (1) corresponds
            to the bottom (top). If set to a y axis ID followed by
            "domain" (separated by a space), the position behaves
            like for "paper", but refers to the distance in
            fractions of the domain length from the bottom of the
            domain of that axis: e.g., *y2 domain* refers to the
            domain of the second y  axis and a y position of 0.5
            refers to the point between the bottom and the top of
            the domain of the second y axis.
        Returns
        -------
        Selection
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__selection.Selection.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        line=None,
        name=None,
        opacity=None,
        path=None,
        templateitemname=None,
        type=None,
        x0=None,
        x1=None,
        xref=None,
        y0=None,
        y1=None,
        yref=None,
        **kwargs,
    ):
        """
        Construct a new Selection object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Selection`
        line
            :class:`plotly.graph_objects.layout.selection.Line`
            instance or dict with compatible properties
        name
            When used in a template, named items are created in the
            output figure in addition to any items the figure
            already has in this array. You can modify these items
            in the output figure by making your own item with
            `templateitemname` matching this `name` alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). Has no effect outside of a
            template.
        opacity
            Sets the opacity of the selection.
        path
            For `type` "path" - a valid SVG path similar to
            `shapes.path` in data coordinates. Allowed segments
            are: M, L and Z.
        templateitemname
            Used to refer to a named item in this array in the
            template. Named items from the template will be created
            even without a matching item in the input figure, but
            you can modify one by making an item with
            `templateitemname` matching its `name`, alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). If there is no template or no
            matching item, this item will be hidden unless you
            explicitly show it with `visible: true`.
        type
            Specifies the selection type to be drawn. If "rect", a
            rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
            (`x1`,`y1`) and (`x0`,`y1`). If "path", draw a custom
            SVG path using `path`.
        x0
            Sets the selection's starting x position.
        x1
            Sets the selection's end x position.
        xref
            Sets the selection's x coordinate axis. If set to a x
            axis id (e.g. "x" or "x2"), the `x` position refers to
            a x coordinate. If set to "paper", the `x` position
            refers to the distance from the left of the plotting
            area in normalized coordinates where 0 (1) corresponds
            to the left (right). If set to a x axis ID followed by
            "domain" (separated by a space), the position behaves
            like for "paper", but refers to the distance in
            fractions of the domain length from the left of the
            domain of that axis: e.g., *x2 domain* refers to the
            domain of the second x  axis and a x position of 0.5
            refers to the point between the left and the right of
            the domain of the second x axis.
        y0
            Sets the selection's starting y position.
        y1
            Sets the selection's end y position.
        yref
            Sets the selection's x coordinate axis. If set to a y
            axis id (e.g. "y" or "y2"), the `y` position refers to
            a y coordinate. If set to "paper", the `y` position
            refers to the distance from the bottom of the plotting
            area in normalized coordinates where 0 (1) corresponds
            to the bottom (top). If set to a y axis ID followed by
            "domain" (separated by a space), the position behaves
            like for "paper", but refers to the distance in
            fractions of the domain length from the bottom of the
            domain of that axis: e.g., *y2 domain* refers to the
            domain of the second y  axis and a y position of 0.5
            refers to the point between the bottom and the top of
            the domain of the second y axis.
        Returns
        -------
        Selection
        """
        super(Selection, self).__init__("selections")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.layout.Selection
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Selection`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("line", None)
        _v = line if line is not None else _v
        if _v is not None:
            self["line"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("path", None)
        _v = path if path is not None else _v
        if _v is not None:
            self["path"] = _v
        _v = arg.pop("templateitemname", None)
        _v = templateitemname if templateitemname is not None else _v
        if _v is not None:
            self["templateitemname"] = _v
        _v = arg.pop("type", None)
        _v = type if type is not None else _v
        if _v is not None:
            self["type"] = _v
        _v = arg.pop("x0", None)
        _v = x0 if x0 is not None else _v
        if _v is not None:
            self["x0"] = _v
        _v = arg.pop("x1", None)
        _v = x1 if x1 is not None else _v
        if _v is not None:
            self["x1"] = _v
        _v = arg.pop("xref", None)
        _v = xref if xref is not None else _v
        if _v is not None:
            self["xref"] = _v
        _v = arg.pop("y0", None)
        _v = y0 if y0 is not None else _v
        if _v is not None:
            self["y0"] = _v
        _v = arg.pop("y1", None)
        _v = y1 if y1 is not None else _v
        if _v is not None:
            self["y1"] = _v
        _v = arg.pop("yref", None)
        _v = yref if yref is not None else _v
        if _v is not None:
            self["yref"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_selection.Selection.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	49 
 | 
	packages/python/plotly/plotly/graph_objs/layout/_slider.py 
 | 
	    def __init__(
        self,
        arg=None,
        active=None,
        activebgcolor=None,
        bgcolor=None,
        bordercolor=None,
        borderwidth=None,
        currentvalue=None,
        font=None,
        len=None,
        lenmode=None,
        minorticklen=None,
        name=None,
        pad=None,
        steps=None,
        stepdefaults=None,
        templateitemname=None,
        tickcolor=None,
        ticklen=None,
        tickwidth=None,
        transition=None,
        visible=None,
        x=None,
        xanchor=None,
        y=None,
        yanchor=None,
        **kwargs,
    ):
        """
        Construct a new Slider object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.layout.Slider`
        active
            Determines which button (by index starting from 0) is
            considered active.
        activebgcolor
            Sets the background color of the slider grip while
            dragging.
        bgcolor
            Sets the background color of the slider.
        bordercolor
            Sets the color of the border enclosing the slider.
        borderwidth
            Sets the width (in px) of the border enclosing the
            slider.
        currentvalue
            :class:`plotly.graph_objects.layout.slider.Currentvalue
            ` instance or dict with compatible properties
        font
            Sets the font of the slider step labels.
        len
            Sets the length of the slider This measure excludes the
            padding of both ends. That is, the slider's length is
            this length minus the padding on both ends.
        lenmode
            Determines whether this slider length is set in units
            of plot "fraction" or in *pixels. Use `len` to set the
            value.
        minorticklen
            Sets the length in pixels of minor step tick marks
        name
            When used in a template, named items are created in the
            output figure in addition to any items the figure
            already has in this array. You can modify these items
            in the output figure by making your own item with
            `templateitemname` matching this `name` alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). Has no effect outside of a
            template.
        pad
            Set the padding of the slider component along each
            side.
        steps
            A tuple of
            :class:`plotly.graph_objects.layout.slider.Step`
            instances or dicts with compatible properties
        stepdefaults
            When used in a template (as
            layout.template.layout.slider.stepdefaults), sets the
            default property values to use for elements of
            layout.slider.steps
        templateitemname
            Used to refer to a named item in this array in the
            template. Named items from the template will be created
            even without a matching item in the input figure, but
            you can modify one by making an item with
            `templateitemname` matching its `name`, alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). If there is no template or no
            matching item, this item will be hidden unless you
            explicitly show it with `visible: true`.
        tickcolor
            Sets the color of the border enclosing the slider.
        ticklen
            Sets the length in pixels of step tick marks
        tickwidth
            Sets the tick width (in px).
        transition
            :class:`plotly.graph_objects.layout.slider.Transition`
            instance or dict with compatible properties
        visible
            Determines whether or not the slider is visible.
        x
            Sets the x position (in normalized coordinates) of the
            slider.
        xanchor
            Sets the slider's horizontal position anchor. This
            anchor binds the `x` position to the "left", "center"
            or "right" of the range selector.
        y
            Sets the y position (in normalized coordinates) of the
            slider.
        yanchor
            Sets the slider's vertical position anchor This anchor
            binds the `y` position to the "top", "middle" or
            "bottom" of the range selector.
        Returns
        -------
        Slider
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__slider.Slider.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        active=None,
        activebgcolor=None,
        bgcolor=None,
        bordercolor=None,
        borderwidth=None,
        currentvalue=None,
        font=None,
        len=None,
        lenmode=None,
        minorticklen=None,
        name=None,
        pad=None,
        steps=None,
        stepdefaults=None,
        templateitemname=None,
        tickcolor=None,
        ticklen=None,
        tickwidth=None,
        transition=None,
        visible=None,
        x=None,
        xanchor=None,
        y=None,
        yanchor=None,
        **kwargs,
    ):
        """
        Construct a new Slider object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.layout.Slider`
        active
            Determines which button (by index starting from 0) is
            considered active.
        activebgcolor
            Sets the background color of the slider grip while
            dragging.
        bgcolor
            Sets the background color of the slider.
        bordercolor
            Sets the color of the border enclosing the slider.
        borderwidth
            Sets the width (in px) of the border enclosing the
            slider.
        currentvalue
            :class:`plotly.graph_objects.layout.slider.Currentvalue
            ` instance or dict with compatible properties
        font
            Sets the font of the slider step labels.
        len
            Sets the length of the slider This measure excludes the
            padding of both ends. That is, the slider's length is
            this length minus the padding on both ends.
        lenmode
            Determines whether this slider length is set in units
            of plot "fraction" or in *pixels. Use `len` to set the
            value.
        minorticklen
            Sets the length in pixels of minor step tick marks
        name
            When used in a template, named items are created in the
            output figure in addition to any items the figure
            already has in this array. You can modify these items
            in the output figure by making your own item with
            `templateitemname` matching this `name` alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). Has no effect outside of a
            template.
        pad
            Set the padding of the slider component along each
            side.
        steps
            A tuple of
            :class:`plotly.graph_objects.layout.slider.Step`
            instances or dicts with compatible properties
        stepdefaults
            When used in a template (as
            layout.template.layout.slider.stepdefaults), sets the
            default property values to use for elements of
            layout.slider.steps
        templateitemname
            Used to refer to a named item in this array in the
            template. Named items from the template will be created
            even without a matching item in the input figure, but
            you can modify one by making an item with
            `templateitemname` matching its `name`, alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). If there is no template or no
            matching item, this item will be hidden unless you
            explicitly show it with `visible: true`.
        tickcolor
            Sets the color of the border enclosing the slider.
        ticklen
            Sets the length in pixels of step tick marks
        tickwidth
            Sets the tick width (in px).
        transition
            :class:`plotly.graph_objects.layout.slider.Transition`
            instance or dict with compatible properties
        visible
            Determines whether or not the slider is visible.
        x
            Sets the x position (in normalized coordinates) of the
            slider.
        xanchor
            Sets the slider's horizontal position anchor. This
            anchor binds the `x` position to the "left", "center"
            or "right" of the range selector.
        y
            Sets the y position (in normalized coordinates) of the
            slider.
        yanchor
            Sets the slider's vertical position anchor This anchor
            binds the `y` position to the "top", "middle" or
            "bottom" of the range selector.
        Returns
        -------
        Slider
        """
        super(Slider, self).__init__("sliders")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.layout.Slider
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Slider`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("active", None)
        _v = active if active is not None else _v
        if _v is not None:
            self["active"] = _v
        _v = arg.pop("activebgcolor", None)
        _v = activebgcolor if activebgcolor is not None else _v
        if _v is not None:
            self["activebgcolor"] = _v
        _v = arg.pop("bgcolor", None)
        _v = bgcolor if bgcolor is not None else _v
        if _v is not None:
            self["bgcolor"] = _v
        _v = arg.pop("bordercolor", None)
        _v = bordercolor if bordercolor is not None else _v
        if _v is not None:
            self["bordercolor"] = _v
        _v = arg.pop("borderwidth", None)
        _v = borderwidth if borderwidth is not None else _v
        if _v is not None:
            self["borderwidth"] = _v
        _v = arg.pop("currentvalue", None)
        _v = currentvalue if currentvalue is not None else _v
        if _v is not None:
            self["currentvalue"] = _v
        _v = arg.pop("font", None)
        _v = font if font is not None else _v
        if _v is not None:
            self["font"] = _v
        _v = arg.pop("len", None)
        _v = len if len is not None else _v
        if _v is not None:
            self["len"] = _v
        _v = arg.pop("lenmode", None)
        _v = lenmode if lenmode is not None else _v
        if _v is not None:
            self["lenmode"] = _v
        _v = arg.pop("minorticklen", None)
        _v = minorticklen if minorticklen is not None else _v
        if _v is not None:
            self["minorticklen"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("pad", None)
        _v = pad if pad is not None else _v
        if _v is not None:
            self["pad"] = _v
        _v = arg.pop("steps", None)
        _v = steps if steps is not None else _v
        if _v is not None:
            self["steps"] = _v
        _v = arg.pop("stepdefaults", None)
        _v = stepdefaults if stepdefaults is not None else _v
        if _v is not None:
            self["stepdefaults"] = _v
        _v = arg.pop("templateitemname", None)
        _v = templateitemname if templateitemname is not None else _v
        if _v is not None:
            self["templateitemname"] = _v
        _v = arg.pop("tickcolor", None)
        _v = tickcolor if tickcolor is not None else _v
        if _v is not None:
            self["tickcolor"] = _v
        _v = arg.pop("ticklen", None)
        _v = ticklen if ticklen is not None else _v
        if _v is not None:
            self["ticklen"] = _v
        _v = arg.pop("tickwidth", None)
        _v = tickwidth if tickwidth is not None else _v
        if _v is not None:
            self["tickwidth"] = _v
        _v = arg.pop("transition", None)
        _v = transition if transition is not None else _v
        if _v is not None:
            self["transition"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        _v = arg.pop("x", None)
        _v = x if x is not None else _v
        if _v is not None:
            self["x"] = _v
        _v = arg.pop("xanchor", None)
        _v = xanchor if xanchor is not None else _v
        if _v is not None:
            self["xanchor"] = _v
        _v = arg.pop("y", None)
        _v = y if y is not None else _v
        if _v is not None:
            self["y"] = _v
        _v = arg.pop("yanchor", None)
        _v = yanchor if yanchor is not None else _v
        if _v is not None:
            self["yanchor"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_slider.Slider.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	50 
 | 
	packages/python/plotly/plotly/graph_objs/_splom.py 
 | 
	    def __init__(
        self,
        arg=None,
        customdata=None,
        customdatasrc=None,
        diagonal=None,
        dimensions=None,
        dimensiondefaults=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        marker=None,
        meta=None,
        metasrc=None,
        name=None,
        opacity=None,
        selected=None,
        selectedpoints=None,
        showlegend=None,
        showlowerhalf=None,
        showupperhalf=None,
        stream=None,
        text=None,
        textsrc=None,
        uid=None,
        uirevision=None,
        unselected=None,
        visible=None,
        xaxes=None,
        xhoverformat=None,
        yaxes=None,
        yhoverformat=None,
        **kwargs,
    ):
        """
        Construct a new Splom object
        Splom traces generate scatter plot matrix visualizations. Each
        splom `dimensions` items correspond to a generated axis. Values
        for each of those dimensions are set in `dimensions[i].values`.
        Splom traces support all `scattergl` marker style attributes.
        Specify `layout.grid` attributes and/or layout x-axis and
        y-axis attributes for more control over the axis positioning
        and style.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Splom`
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        diagonal
            :class:`plotly.graph_objects.splom.Diagonal` instance
            or dict with compatible properties
        dimensions
            A tuple of
            :class:`plotly.graph_objects.splom.Dimension` instances
            or dicts with compatible properties
        dimensiondefaults
            When used in a template (as
            layout.template.data.splom.dimensiondefaults), sets the
            default property values to use for elements of
            splom.dimensions
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.splom.Hoverlabel` instance
            or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available.  Anything contained in tag `<extra>` is
            displayed in the secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Same as `text`.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for this trace. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.splom.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        marker
            :class:`plotly.graph_objects.splom.Marker` instance or
            dict with compatible properties
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        opacity
            Sets the opacity of the trace.
        selected
            :class:`plotly.graph_objects.splom.Selected` instance
            or dict with compatible properties
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        showlegend
            Determines whether or not an item corresponding to this
            trace is shown in the legend.
        showlowerhalf
            Determines whether or not subplots on the lower half
            from the diagonal are displayed.
        showupperhalf
            Determines whether or not subplots on the upper half
            from the diagonal are displayed.
        stream
            :class:`plotly.graph_objects.splom.Stream` instance or
            dict with compatible properties
        text
            Sets text elements associated with each (x,y) pair to
            appear on hover. If a single string, the same string
            appears over all the data points. If an array of
            string, the items are mapped in order to the this
            trace's (x,y) coordinates.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        unselected
            :class:`plotly.graph_objects.splom.Unselected` instance
            or dict with compatible properties
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        xaxes
            Sets the list of x axes corresponding to dimensions of
            this splom trace. By default, a splom will match the
            first N xaxes where N is the number of input
            dimensions. Note that, in case where `diagonal.visible`
            is false and `showupperhalf` or `showlowerhalf` is
            false, this splom trace will generate one less x-axis
            and one less y-axis.
        xhoverformat
            Sets the hover text formatting rulefor `x`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `xaxis.hoverformat`.
        yaxes
            Sets the list of y axes corresponding to dimensions of
            this splom trace. By default, a splom will match the
            first N yaxes where N is the number of input
            dimensions. Note that, in case where `diagonal.visible`
            is false and `showupperhalf` or `showlowerhalf` is
            false, this splom trace will generate one less x-axis
            and one less y-axis.
        yhoverformat
            Sets the hover text formatting rulefor `y`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `yaxis.hoverformat`.
        Returns
        -------
        Splom
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__splom.Splom.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        customdata=None,
        customdatasrc=None,
        diagonal=None,
        dimensions=None,
        dimensiondefaults=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        marker=None,
        meta=None,
        metasrc=None,
        name=None,
        opacity=None,
        selected=None,
        selectedpoints=None,
        showlegend=None,
        showlowerhalf=None,
        showupperhalf=None,
        stream=None,
        text=None,
        textsrc=None,
        uid=None,
        uirevision=None,
        unselected=None,
        visible=None,
        xaxes=None,
        xhoverformat=None,
        yaxes=None,
        yhoverformat=None,
        **kwargs,
    ):
        """
        Construct a new Splom object
        Splom traces generate scatter plot matrix visualizations. Each
        splom `dimensions` items correspond to a generated axis. Values
        for each of those dimensions are set in `dimensions[i].values`.
        Splom traces support all `scattergl` marker style attributes.
        Specify `layout.grid` attributes and/or layout x-axis and
        y-axis attributes for more control over the axis positioning
        and style.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Splom`
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        diagonal
            :class:`plotly.graph_objects.splom.Diagonal` instance
            or dict with compatible properties
        dimensions
            A tuple of
            :class:`plotly.graph_objects.splom.Dimension` instances
            or dicts with compatible properties
        dimensiondefaults
            When used in a template (as
            layout.template.data.splom.dimensiondefaults), sets the
            default property values to use for elements of
            splom.dimensions
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.splom.Hoverlabel` instance
            or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available.  Anything contained in tag `<extra>` is
            displayed in the secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Same as `text`.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for this trace. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.splom.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        marker
            :class:`plotly.graph_objects.splom.Marker` instance or
            dict with compatible properties
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        opacity
            Sets the opacity of the trace.
        selected
            :class:`plotly.graph_objects.splom.Selected` instance
            or dict with compatible properties
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        showlegend
            Determines whether or not an item corresponding to this
            trace is shown in the legend.
        showlowerhalf
            Determines whether or not subplots on the lower half
            from the diagonal are displayed.
        showupperhalf
            Determines whether or not subplots on the upper half
            from the diagonal are displayed.
        stream
            :class:`plotly.graph_objects.splom.Stream` instance or
            dict with compatible properties
        text
            Sets text elements associated with each (x,y) pair to
            appear on hover. If a single string, the same string
            appears over all the data points. If an array of
            string, the items are mapped in order to the this
            trace's (x,y) coordinates.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        unselected
            :class:`plotly.graph_objects.splom.Unselected` instance
            or dict with compatible properties
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        xaxes
            Sets the list of x axes corresponding to dimensions of
            this splom trace. By default, a splom will match the
            first N xaxes where N is the number of input
            dimensions. Note that, in case where `diagonal.visible`
            is false and `showupperhalf` or `showlowerhalf` is
            false, this splom trace will generate one less x-axis
            and one less y-axis.
        xhoverformat
            Sets the hover text formatting rulefor `x`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `xaxis.hoverformat`.
        yaxes
            Sets the list of y axes corresponding to dimensions of
            this splom trace. By default, a splom will match the
            first N yaxes where N is the number of input
            dimensions. Note that, in case where `diagonal.visible`
            is false and `showupperhalf` or `showlowerhalf` is
            false, this splom trace will generate one less x-axis
            and one less y-axis.
        yhoverformat
            Sets the hover text formatting rulefor `y`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `yaxis.hoverformat`.
        Returns
        -------
        Splom
        """
        super(Splom, self).__init__("splom")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Splom
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Splom`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("customdata", None)
        _v = customdata if customdata is not None else _v
        if _v is not None:
            self["customdata"] = _v
        _v = arg.pop("customdatasrc", None)
        _v = customdatasrc if customdatasrc is not None else _v
        if _v is not None:
            self["customdatasrc"] = _v
        _v = arg.pop("diagonal", None)
        _v = diagonal if diagonal is not None else _v
        if _v is not None:
            self["diagonal"] = _v
        _v = arg.pop("dimensions", None)
        _v = dimensions if dimensions is not None else _v
        if _v is not None:
            self["dimensions"] = _v
        _v = arg.pop("dimensiondefaults", None)
        _v = dimensiondefaults if dimensiondefaults is not None else _v
        if _v is not None:
            self["dimensiondefaults"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoverinfosrc", None)
        _v = hoverinfosrc if hoverinfosrc is not None else _v
        if _v is not None:
            self["hoverinfosrc"] = _v
        _v = arg.pop("hoverlabel", None)
        _v = hoverlabel if hoverlabel is not None else _v
        if _v is not None:
            self["hoverlabel"] = _v
        _v = arg.pop("hovertemplate", None)
        _v = hovertemplate if hovertemplate is not None else _v
        if _v is not None:
            self["hovertemplate"] = _v
        _v = arg.pop("hovertemplatesrc", None)
        _v = hovertemplatesrc if hovertemplatesrc is not None else _v
        if _v is not None:
            self["hovertemplatesrc"] = _v
        _v = arg.pop("hovertext", None)
        _v = hovertext if hovertext is not None else _v
        if _v is not None:
            self["hovertext"] = _v
        _v = arg.pop("hovertextsrc", None)
        _v = hovertextsrc if hovertextsrc is not None else _v
        if _v is not None:
            self["hovertextsrc"] = _v
        _v = arg.pop("ids", None)
        _v = ids if ids is not None else _v
        if _v is not None:
            self["ids"] = _v
        _v = arg.pop("idssrc", None)
        _v = idssrc if idssrc is not None else _v
        if _v is not None:
            self["idssrc"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgroup", None)
        _v = legendgroup if legendgroup is not None else _v
        if _v is not None:
            self["legendgroup"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("marker", None)
        _v = marker if marker is not None else _v
        if _v is not None:
            self["marker"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("selected", None)
        _v = selected if selected is not None else _v
        if _v is not None:
            self["selected"] = _v
        _v = arg.pop("selectedpoints", None)
        _v = selectedpoints if selectedpoints is not None else _v
        if _v is not None:
            self["selectedpoints"] = _v
        _v = arg.pop("showlegend", None)
        _v = showlegend if showlegend is not None else _v
        if _v is not None:
            self["showlegend"] = _v
        _v = arg.pop("showlowerhalf", None)
        _v = showlowerhalf if showlowerhalf is not None else _v
        if _v is not None:
            self["showlowerhalf"] = _v
        _v = arg.pop("showupperhalf", None)
        _v = showupperhalf if showupperhalf is not None else _v
        if _v is not None:
            self["showupperhalf"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("text", None)
        _v = text if text is not None else _v
        if _v is not None:
            self["text"] = _v
        _v = arg.pop("textsrc", None)
        _v = textsrc if textsrc is not None else _v
        if _v is not None:
            self["textsrc"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("unselected", None)
        _v = unselected if unselected is not None else _v
        if _v is not None:
            self["unselected"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        _v = arg.pop("xaxes", None)
        _v = xaxes if xaxes is not None else _v
        if _v is not None:
            self["xaxes"] = _v
        _v = arg.pop("xhoverformat", None)
        _v = xhoverformat if xhoverformat is not None else _v
        if _v is not None:
            self["xhoverformat"] = _v
        _v = arg.pop("yaxes", None)
        _v = yaxes if yaxes is not None else _v
        if _v is not None:
            self["yaxes"] = _v
        _v = arg.pop("yhoverformat", None)
        _v = yhoverformat if yhoverformat is not None else _v
        if _v is not None:
            self["yhoverformat"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "splom"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_splom.Splom.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	52 
 | 
	packages/python/plotly/plotly/_subplots.py 
 | 
	def make_subplots(
    rows=1,
    cols=1,
    shared_xaxes=False,
    shared_yaxes=False,
    start_cell="top-left",
    print_grid=False,
    horizontal_spacing=None,
    vertical_spacing=None,
    subplot_titles=None,
    column_widths=None,
    row_heights=None,
    specs=None,
    insets=None,
    column_titles=None,
    row_titles=None,
    x_title=None,
    y_title=None,
    figure=None,
    **kwargs,
):
    """
    Return an instance of plotly.graph_objs.Figure with predefined subplots
    configured in 'layout'.
    Parameters
    ----------
    rows: int (default 1)
        Number of rows in the subplot grid. Must be greater than zero.
    cols: int (default 1)
        Number of columns in the subplot grid. Must be greater than zero.
    shared_xaxes: boolean or str (default False)
        Assign shared (linked) x-axes for 2D cartesian subplots
          - True or 'columns': Share axes among subplots in the same column
          - 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    shared_yaxes: boolean or str (default False)
        Assign shared (linked) y-axes for 2D cartesian subplots
          - 'columns': Share axes among subplots in the same column
          - True or 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    start_cell: 'bottom-left' or 'top-left' (default 'top-left')
        Choose the starting cell in the subplot grid used to set the
        domains_grid of the subplots.
          - 'top-left': Subplots are numbered with (1, 1) in the top
                        left corner
          - 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
                           left corner
    print_grid: boolean (default True):
        If True, prints a string representation of the plot grid.  Grid may
        also be printed using the `Figure.print_grid()` method on the
        resulting figure.
    horizontal_spacing: float (default 0.2 / cols)
        Space between subplot columns in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all columns (use 'specs' subplot-dependents spacing)
    vertical_spacing: float (default 0.3 / rows)
        Space between subplot rows in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all rows (use 'specs' subplot-dependents spacing)
    subplot_titles: list of str or None (default None)
        Title of each subplot as a list in row-major ordering.
        Empty strings ("") can be included in the list if no subplot title
        is desired in that space so that the titles are properly indexed.
    specs: list of lists of dict or None (default None)
        Per subplot specifications of subplot type, row/column spanning, and
        spacing.
        ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
        ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
        - Indices of the outer list correspond to subplot grid rows
          starting from the top, if start_cell='top-left',
          or bottom, if start_cell='bottom-left'.
          The number of rows in 'specs' must be equal to 'rows'.
        - Indices of the inner lists correspond to subplot grid columns
          starting from the left. The number of columns in 'specs'
          must be equal to 'cols'.
        - Each item in the 'specs' list corresponds to one subplot
          in a subplot grid. (N.B. The subplot grid has exactly 'rows'
          times 'cols' cells.)
        - Use None for a blank a subplot cell (or to move past a col/row span).
        - Note that specs[0][0] has the specs of the 'start_cell' subplot.
        - Each item in 'specs' is a dictionary.
            The available keys are:
            * type (string, default 'xy'): Subplot type. One of
                - 'xy': 2D Cartesian subplot type for scatter, bar, etc.
                - 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
                - 'polar': Polar subplot for scatterpolar, barpolar, etc.
                - 'ternary': Ternary subplot for scatterternary
                - 'map': Map subplot for scattermap, choroplethmap and densitymap
                - 'mapbox': Mapbox subplot for scattermapbox, choroplethmapbox and densitymapbox
                - 'domain': Subplot type for traces that are individually
                            positioned. pie, parcoords, parcats, etc.
                - trace type: A trace type which will be used to determine
                              the appropriate subplot type for that trace
            * secondary_y (bool, default False): If True, create a secondary
                y-axis positioned on the right side of the subplot. Only valid
                if type='xy'.
            * colspan (int, default 1): number of subplot columns
                for this subplot to span.
            * rowspan (int, default 1): number of subplot rows
                for this subplot to span.
            * l (float, default 0.0): padding left of cell
            * r (float, default 0.0): padding right of cell
            * t (float, default 0.0): padding right of cell
            * b (float, default 0.0): padding bottom of cell
        - Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
          the spacing in between the subplots.
    insets: list of dict or None (default None):
        Inset specifications.  Insets are subplots that overlay grid subplots
        - Each item in 'insets' is a dictionary.
            The available keys are:
            * cell (tuple, default=(1,1)): (row, col) index of the
                subplot cell to overlay inset axes onto.
            * type (string, default 'xy'): Subplot type
            * l (float, default=0.0): padding left of inset
                  in fraction of cell width
            * w (float or 'to_end', default='to_end') inset width
                  in fraction of cell width ('to_end': to cell right edge)
            * b (float, default=0.0): padding bottom of inset
                  in fraction of cell height
            * h (float or 'to_end', default='to_end') inset height
                  in fraction of cell height ('to_end': to cell top edge)
    column_widths: list of numbers or None (default None)
        list of length `cols` of the relative widths of each column of subplots.
        Values are normalized internally and used to distribute overall width
        of the figure (excluding padding) among the columns.
        For backward compatibility, may also be specified using the
        `column_width` keyword argument.
    row_heights: list of numbers or None (default None)
        list of length `rows` of the relative heights of each row of subplots.
        If start_cell='top-left' then row heights are applied top to bottom.
        Otherwise, if start_cell='bottom-left' then row heights are applied
        bottom to top.
        For backward compatibility, may also be specified using the
        `row_width` kwarg. If specified as `row_width`, then the width values
        are applied from bottom to top regardless of the value of start_cell.
        This matches the legacy behavior of the `row_width` argument.
    column_titles: list of str or None (default None)
        list of length `cols` of titles to place above the top subplot in
        each column.
    row_titles: list of str or None (default None)
        list of length `rows` of titles to place on the right side of each
        row of subplots. If start_cell='top-left' then row titles are
        applied top to bottom. Otherwise, if start_cell='bottom-left' then
        row titles are applied bottom to top.
    x_title: str or None (default None)
        Title to place below the bottom row of subplots,
        centered horizontally
    y_title: str or None (default None)
        Title to place to the left of the left column of subplots,
        centered vertically
    figure: go.Figure or None (default None)
        If None, a new go.Figure instance will be created and its axes will be
        populated with those corresponding to the requested subplot geometry and
        this new figure will be returned.
        If a go.Figure instance, the axes will be added to the
        layout of this figure and this figure will be returned. If the figure
        already contains axes, they will be overwritten.
    Examples
    --------
    Example 1:
    >>> # Stack two subplots vertically, and add a scatter trace to each
    >>> from plotly.subplots import make_subplots
    >>> import plotly.graph_objects as go
    >>> fig = make_subplots(rows=2)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    or see Figure.append_trace
    Example 2:
    >>> # Stack a scatter plot
    >>> fig = make_subplots(rows=2, shared_xaxes=True)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 3:
    >>> # irregular subplot layout (more examples below under 'specs')
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{}, {}],
    ...                     [{'colspan': 2}, None]])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]  [ (1,2) xaxis2,yaxis2 ]
    [ (2,1) xaxis3,yaxis3           -              ]
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 4:
    >>> # insets
    >>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    With insets:
    [ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
    Figure(...)
    Example 5:
    >>> # include subplot titles
    >>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 6:
    Subplot with mixed subplot types
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{'type': 'xy'},    {'type': 'polar'}],
    ...                            [{'type': 'scene'}, {'type': 'ternary'}]])
    >>> fig.add_traces(
    ...     [go.Scatter(y=[2, 3, 1]),
    ...      go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
    ...      go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
    ...      go.Scatterternary(a=[0.1, 0.2, 0.1],
    ...                        b=[0.2, 0.3, 0.1],
    ...                        c=[0.7, 0.5, 0.8])],
    ...     rows=[1, 1, 2, 2],
    ...     cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
    Figure(...)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__subplots.make_subplots.txt 
 | 
	def make_subplots(
    rows=1,
    cols=1,
    shared_xaxes=False,
    shared_yaxes=False,
    start_cell="top-left",
    print_grid=False,
    horizontal_spacing=None,
    vertical_spacing=None,
    subplot_titles=None,
    column_widths=None,
    row_heights=None,
    specs=None,
    insets=None,
    column_titles=None,
    row_titles=None,
    x_title=None,
    y_title=None,
    figure=None,
    **kwargs,
):
    """
    Return an instance of plotly.graph_objs.Figure with predefined subplots
    configured in 'layout'.
    Parameters
    ----------
    rows: int (default 1)
        Number of rows in the subplot grid. Must be greater than zero.
    cols: int (default 1)
        Number of columns in the subplot grid. Must be greater than zero.
    shared_xaxes: boolean or str (default False)
        Assign shared (linked) x-axes for 2D cartesian subplots
          - True or 'columns': Share axes among subplots in the same column
          - 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    shared_yaxes: boolean or str (default False)
        Assign shared (linked) y-axes for 2D cartesian subplots
          - 'columns': Share axes among subplots in the same column
          - True or 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    start_cell: 'bottom-left' or 'top-left' (default 'top-left')
        Choose the starting cell in the subplot grid used to set the
        domains_grid of the subplots.
          - 'top-left': Subplots are numbered with (1, 1) in the top
                        left corner
          - 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
                           left corner
    print_grid: boolean (default True):
        If True, prints a string representation of the plot grid.  Grid may
        also be printed using the `Figure.print_grid()` method on the
        resulting figure.
    horizontal_spacing: float (default 0.2 / cols)
        Space between subplot columns in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all columns (use 'specs' subplot-dependents spacing)
    vertical_spacing: float (default 0.3 / rows)
        Space between subplot rows in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all rows (use 'specs' subplot-dependents spacing)
    subplot_titles: list of str or None (default None)
        Title of each subplot as a list in row-major ordering.
        Empty strings ("") can be included in the list if no subplot title
        is desired in that space so that the titles are properly indexed.
    specs: list of lists of dict or None (default None)
        Per subplot specifications of subplot type, row/column spanning, and
        spacing.
        ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
        ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
        - Indices of the outer list correspond to subplot grid rows
          starting from the top, if start_cell='top-left',
          or bottom, if start_cell='bottom-left'.
          The number of rows in 'specs' must be equal to 'rows'.
        - Indices of the inner lists correspond to subplot grid columns
          starting from the left. The number of columns in 'specs'
          must be equal to 'cols'.
        - Each item in the 'specs' list corresponds to one subplot
          in a subplot grid. (N.B. The subplot grid has exactly 'rows'
          times 'cols' cells.)
        - Use None for a blank a subplot cell (or to move past a col/row span).
        - Note that specs[0][0] has the specs of the 'start_cell' subplot.
        - Each item in 'specs' is a dictionary.
            The available keys are:
            * type (string, default 'xy'): Subplot type. One of
                - 'xy': 2D Cartesian subplot type for scatter, bar, etc.
                - 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
                - 'polar': Polar subplot for scatterpolar, barpolar, etc.
                - 'ternary': Ternary subplot for scatterternary
                - 'map': Map subplot for scattermap, choroplethmap and densitymap
                - 'mapbox': Mapbox subplot for scattermapbox, choroplethmapbox and densitymapbox
                - 'domain': Subplot type for traces that are individually
                            positioned. pie, parcoords, parcats, etc.
                - trace type: A trace type which will be used to determine
                              the appropriate subplot type for that trace
            * secondary_y (bool, default False): If True, create a secondary
                y-axis positioned on the right side of the subplot. Only valid
                if type='xy'.
            * colspan (int, default 1): number of subplot columns
                for this subplot to span.
            * rowspan (int, default 1): number of subplot rows
                for this subplot to span.
            * l (float, default 0.0): padding left of cell
            * r (float, default 0.0): padding right of cell
            * t (float, default 0.0): padding right of cell
            * b (float, default 0.0): padding bottom of cell
        - Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
          the spacing in between the subplots.
    insets: list of dict or None (default None):
        Inset specifications.  Insets are subplots that overlay grid subplots
        - Each item in 'insets' is a dictionary.
            The available keys are:
            * cell (tuple, default=(1,1)): (row, col) index of the
                subplot cell to overlay inset axes onto.
            * type (string, default 'xy'): Subplot type
            * l (float, default=0.0): padding left of inset
                  in fraction of cell width
            * w (float or 'to_end', default='to_end') inset width
                  in fraction of cell width ('to_end': to cell right edge)
            * b (float, default=0.0): padding bottom of inset
                  in fraction of cell height
            * h (float or 'to_end', default='to_end') inset height
                  in fraction of cell height ('to_end': to cell top edge)
    column_widths: list of numbers or None (default None)
        list of length `cols` of the relative widths of each column of subplots.
        Values are normalized internally and used to distribute overall width
        of the figure (excluding padding) among the columns.
        For backward compatibility, may also be specified using the
        `column_width` keyword argument.
    row_heights: list of numbers or None (default None)
        list of length `rows` of the relative heights of each row of subplots.
        If start_cell='top-left' then row heights are applied top to bottom.
        Otherwise, if start_cell='bottom-left' then row heights are applied
        bottom to top.
        For backward compatibility, may also be specified using the
        `row_width` kwarg. If specified as `row_width`, then the width values
        are applied from bottom to top regardless of the value of start_cell.
        This matches the legacy behavior of the `row_width` argument.
    column_titles: list of str or None (default None)
        list of length `cols` of titles to place above the top subplot in
        each column.
    row_titles: list of str or None (default None)
        list of length `rows` of titles to place on the right side of each
        row of subplots. If start_cell='top-left' then row titles are
        applied top to bottom. Otherwise, if start_cell='bottom-left' then
        row titles are applied bottom to top.
    x_title: str or None (default None)
        Title to place below the bottom row of subplots,
        centered horizontally
    y_title: str or None (default None)
        Title to place to the left of the left column of subplots,
        centered vertically
    figure: go.Figure or None (default None)
        If None, a new go.Figure instance will be created and its axes will be
        populated with those corresponding to the requested subplot geometry and
        this new figure will be returned.
        If a go.Figure instance, the axes will be added to the
        layout of this figure and this figure will be returned. If the figure
        already contains axes, they will be overwritten.
    Examples
    --------
    Example 1:
    >>> # Stack two subplots vertically, and add a scatter trace to each
    >>> from plotly.subplots import make_subplots
    >>> import plotly.graph_objects as go
    >>> fig = make_subplots(rows=2)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    or see Figure.append_trace
    Example 2:
    >>> # Stack a scatter plot
    >>> fig = make_subplots(rows=2, shared_xaxes=True)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 3:
    >>> # irregular subplot layout (more examples below under 'specs')
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{}, {}],
    ...                     [{'colspan': 2}, None]])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]  [ (1,2) xaxis2,yaxis2 ]
    [ (2,1) xaxis3,yaxis3           -              ]
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 4:
    >>> # insets
    >>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    With insets:
    [ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
    Figure(...)
    Example 5:
    >>> # include subplot titles
    >>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 6:
    Subplot with mixed subplot types
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{'type': 'xy'},    {'type': 'polar'}],
    ...                            [{'type': 'scene'}, {'type': 'ternary'}]])
    >>> fig.add_traces(
    ...     [go.Scatter(y=[2, 3, 1]),
    ...      go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
    ...      go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
    ...      go.Scatterternary(a=[0.1, 0.2, 0.1],
    ...                        b=[0.2, 0.3, 0.1],
    ...                        c=[0.7, 0.5, 0.8])],
    ...     rows=[1, 1, 2, 2],
    ...     cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
    Figure(...)
    """
    import plotly.graph_objs as go
    # Handle backward compatibility
    # -----------------------------
    use_legacy_row_heights_order = "row_width" in kwargs
    row_heights = kwargs.pop("row_width", row_heights)
    column_widths = kwargs.pop("column_width", column_widths)
    if kwargs:
        raise TypeError(
            "make_subplots() got unexpected keyword argument(s): {}".format(
                list(kwargs)
            )
        )
    # Validate coerce inputs
    # ----------------------
    #  ### rows ###
    if not isinstance(rows, int) or rows <= 0:
        raise ValueError(
            """
The 'rows' argument to make_subplots must be an int greater than 0.
    Received value of type {typ}: {val}""".format(
                typ=type(rows), val=repr(rows)
            )
        )
    #  ### cols ###
    if not isinstance(cols, int) or cols <= 0:
        raise ValueError(
            """
The 'cols' argument to make_subplots must be an int greater than 0.
    Received value of type {typ}: {val}""".format(
                typ=type(cols), val=repr(cols)
            )
        )
    # ### start_cell ###
    if start_cell == "bottom-left":
        col_dir = 1
        row_dir = 1
    elif start_cell == "top-left":
        col_dir = 1
        row_dir = -1
    else:
        raise ValueError(
            """
The 'start_cell` argument to make_subplots must be one of \
['bottom-left', 'top-left']
    Received value of type {typ}: {val}""".format(
                typ=type(start_cell), val=repr(start_cell)
            )
        )
    # ### Helper to validate coerce elements of lists of dictionaries ###
    def _check_keys_and_fill(name, arg, defaults):
        def _checks(item, defaults):
            if item is None:
                return
            if not isinstance(item, dict):
                raise ValueError(
                    """
Elements of the '{name}' argument to make_subplots must be dictionaries \
or None.
    Received value of type {typ}: {val}""".format(
                        name=name, typ=type(item), val=repr(item)
                    )
                )
            for k in item:
                if k not in defaults:
                    raise ValueError(
                        """
Invalid key specified in an element of the '{name}' argument to \
make_subplots: {k}
    Valid keys include: {valid_keys}""".format(
                            k=repr(k), name=name, valid_keys=repr(list(defaults))
                        )
                    )
            for k, v in defaults.items():
                item.setdefault(k, v)
        for arg_i in arg:
            if isinstance(arg_i, (list, tuple)):
                # 2D list
                for arg_ii in arg_i:
                    _checks(arg_ii, defaults)
            elif isinstance(arg_i, dict):
                # 1D list
                _checks(arg_i, defaults)
    # ### specs ###
    if specs is None:
        specs = [[{} for c in range(cols)] for r in range(rows)]
    elif not (
        isinstance(specs, (list, tuple))
        and specs
        and all(isinstance(row, (list, tuple)) for row in specs)
        and len(specs) == rows
        and all(len(row) == cols for row in specs)
        and all(all(v is None or isinstance(v, dict) for v in row) for row in specs)
    ):
        raise ValueError(
            """
The 'specs' argument to make_subplots must be a 2D list of dictionaries with \
dimensions ({rows} x {cols}).
    Received value of type {typ}: {val}""".format(
                rows=rows, cols=cols, typ=type(specs), val=repr(specs)
            )
        )
    for row in specs:
        for spec in row:
            # For backward compatibility,
            # convert is_3d flag to type='scene' kwarg
            if spec and spec.pop("is_3d", None):
                spec["type"] = "scene"
    spec_defaults = dict(
        type="xy", secondary_y=False, colspan=1, rowspan=1, l=0.0, r=0.0, b=0.0, t=0.0
    )
    _check_keys_and_fill("specs", specs, spec_defaults)
    # Validate secondary_y
    has_secondary_y = False
    for row in specs:
        for spec in row:
            if spec is not None:
                has_secondary_y = has_secondary_y or spec["secondary_y"]
            if spec and spec["type"] != "xy" and spec["secondary_y"]:
                raise ValueError(
                    """
The 'secondary_y' spec property is not supported for subplot of type '{s_typ}'
     'secondary_y' is only supported for subplots of type 'xy'
""".format(
                        s_typ=spec["type"]
                    )
                )
    # ### insets ###
    if insets is None or insets is False:
        insets = []
    elif not (
        isinstance(insets, (list, tuple)) and all(isinstance(v, dict) for v in insets)
    ):
        raise ValueError(
            """
The 'insets' argument to make_subplots must be a list of dictionaries.
    Received value of type {typ}: {val}""".format(
                typ=type(insets), val=repr(insets)
            )
        )
    if insets:
        for inset in insets:
            if inset and inset.pop("is_3d", None):
                inset["type"] = "scene"
        inset_defaults = dict(
            cell=(1, 1), type="xy", l=0.0, w="to_end", b=0.0, h="to_end"
        )
        _check_keys_and_fill("insets", insets, inset_defaults)
    # ### shared_xaxes / shared_yaxes
    valid_shared_vals = [None, True, False, "rows", "columns", "all"]
    shared_err_msg = """
The {arg} argument to make_subplots must be one of: {valid_vals}
    Received value of type {typ}: {val}"""
    if shared_xaxes not in valid_shared_vals:
        val = shared_xaxes
        raise ValueError(
            shared_err_msg.format(
                arg="shared_xaxes",
                valid_vals=valid_shared_vals,
                typ=type(val),
                val=repr(val),
            )
        )
    if shared_yaxes not in valid_shared_vals:
        val = shared_yaxes
        raise ValueError(
            shared_err_msg.format(
                arg="shared_yaxes",
                valid_vals=valid_shared_vals,
                typ=type(val),
                val=repr(val),
            )
        )
    def _check_hv_spacing(dimsize, spacing, name, dimvarname, dimname):
        if spacing < 0 or spacing > 1:
            raise ValueError("%s spacing must be between 0 and 1." % (name,))
        if dimsize <= 1:
            return
        max_spacing = 1.0 / float(dimsize - 1)
        if spacing > max_spacing:
            raise ValueError(
                """{name} spacing cannot be greater than (1 / ({dimvarname} - 1)) = {max_spacing:f}.
The resulting plot would have {dimsize} {dimname} ({dimvarname}={dimsize}).""".format(
                    dimvarname=dimvarname,
                    name=name,
                    dimname=dimname,
                    max_spacing=max_spacing,
                    dimsize=dimsize,
                )
            )
    # ### horizontal_spacing ###
    if horizontal_spacing is None:
        if has_secondary_y:
            horizontal_spacing = 0.4 / cols
        else:
            horizontal_spacing = 0.2 / cols
    # check horizontal_spacing can be satisfied:
    _check_hv_spacing(cols, horizontal_spacing, "Horizontal", "cols", "columns")
    # ### vertical_spacing ###
    if vertical_spacing is None:
        if subplot_titles is not None:
            vertical_spacing = 0.5 / rows
        else:
            vertical_spacing = 0.3 / rows
    # check vertical_spacing can be satisfied:
    _check_hv_spacing(rows, vertical_spacing, "Vertical", "rows", "rows")
    # ### subplot titles ###
    if subplot_titles is None:
        subplot_titles = [""] * rows * cols
    # ### column_widths ###
    if has_secondary_y:
        # Add room for secondary y-axis title
        max_width = 0.94
    elif row_titles:
        # Add a little breathing room between row labels and legend
        max_width = 0.98
    else:
        max_width = 1.0
    if column_widths is None:
        widths = [(max_width - horizontal_spacing * (cols - 1)) / cols] * cols
    elif isinstance(column_widths, (list, tuple)) and len(column_widths) == cols:
        cum_sum = float(sum(column_widths))
        widths = []
        for w in column_widths:
            widths.append((max_width - horizontal_spacing * (cols - 1)) * (w / cum_sum))
    else:
        raise ValueError(
            """
The 'column_widths' argument to make_subplots must be a list of numbers of \
length {cols}.
    Received value of type {typ}: {val}""".format(
                cols=cols, typ=type(column_widths), val=repr(column_widths)
            )
        )
    # ### row_heights ###
    if row_heights is None:
        heights = [(1.0 - vertical_spacing * (rows - 1)) / rows] * rows
    elif isinstance(row_heights, (list, tuple)) and len(row_heights) == rows:
        cum_sum = float(sum(row_heights))
        heights = []
        for h in row_heights:
            heights.append((1.0 - vertical_spacing * (rows - 1)) * (h / cum_sum))
        if row_dir < 0 and not use_legacy_row_heights_order:
            heights = list(reversed(heights))
    else:
        raise ValueError(
            """
The 'row_heights' argument to make_subplots must be a list of numbers of \
length {rows}.
    Received value of type {typ}: {val}""".format(
                rows=rows, typ=type(row_heights), val=repr(row_heights)
            )
        )
    # ### column_titles / row_titles ###
    if column_titles and not isinstance(column_titles, (list, tuple)):
        raise ValueError(
            """
The column_titles argument to make_subplots must be a list or tuple
    Received value of type {typ}: {val}""".format(
                typ=type(column_titles), val=repr(column_titles)
            )
        )
    if row_titles and not isinstance(row_titles, (list, tuple)):
        raise ValueError(
            """
The row_titles argument to make_subplots must be a list or tuple
    Received value of type {typ}: {val}""".format(
                typ=type(row_titles), val=repr(row_titles)
            )
        )
    # Init layout
    # -----------
    layout = go.Layout()
    # Build grid reference
    # --------------------
    # Built row/col sequence using 'row_dir' and 'col_dir'
    col_seq = range(cols)[::col_dir]
    row_seq = range(rows)[::row_dir]
    # Build 2D array of tuples of the start x and start y coordinate of each
    # subplot
    grid = [
        [
            (
                (sum(widths[:c]) + c * horizontal_spacing),
                (sum(heights[:r]) + r * vertical_spacing),
            )
            for c in col_seq
        ]
        for r in row_seq
    ]
    domains_grid = [[None for _ in range(cols)] for _ in range(rows)]
    # Initialize subplot reference lists for the grid and insets
    grid_ref = [[None for c in range(cols)] for r in range(rows)]
    list_of_domains = []  # added for subplot titles
    max_subplot_ids = _get_initial_max_subplot_ids()
    # Loop through specs -- (r, c) <-> (row, col)
    for r, spec_row in enumerate(specs):
        for c, spec in enumerate(spec_row):
            if spec is None:  # skip over None cells
                continue
            # ### Compute x and y domain for subplot ###
            c_spanned = c + spec["colspan"] - 1  # get spanned c
            r_spanned = r + spec["rowspan"] - 1  # get spanned r
            # Throw exception if 'colspan' | 'rowspan' is too large for grid
            if c_spanned >= cols:
                raise Exception(
                    "Some 'colspan' value is too large for " "this subplot grid."
                )
            if r_spanned >= rows:
                raise Exception(
                    "Some 'rowspan' value is too large for " "this subplot grid."
                )
            # Get x domain using grid and colspan
            x_s = grid[r][c][0] + spec["l"]
            x_e = grid[r][c_spanned][0] + widths[c_spanned] - spec["r"]
            x_domain = [x_s, x_e]
            # Get y domain (dep. on row_dir) using grid & r_spanned
            if row_dir > 0:
                y_s = grid[r][c][1] + spec["b"]
                y_e = grid[r_spanned][c][1] + heights[r_spanned] - spec["t"]
            else:
                y_s = grid[r_spanned][c][1] + spec["b"]
                y_e = grid[r][c][1] + heights[-1 - r] - spec["t"]
            if y_s < 0.0:
                # round for values very close to one
                # handles some floating point errors
                if y_s > -0.01:
                    y_s = 0.0
                else:
                    raise Exception(
                        "A combination of the 'b' values, heights, and "
                        "number of subplots too large for this subplot grid."
                    )
            if y_s > 1.0:
                # round for values very close to one
                # handles some floating point errors
                if y_s < 1.01:
                    y_s = 1.0
                else:
                    raise Exception(
                        "A combination of the 'b' values, heights, and "
                        "number of subplots too large for this subplot grid."
                    )
            if y_e < 0.0:
                if y_e > -0.01:
                    y_e = 0.0
                else:
                    raise Exception(
                        "A combination of the 't' values, heights, and "
                        "number of subplots too large for this subplot grid."
                    )
            if y_e > 1.0:
                if y_e < 1.01:
                    y_e = 1.0
                else:
                    raise Exception(
                        "A combination of the 't' values, heights, and "
                        "number of subplots too large for this subplot grid."
                    )
            y_domain = [y_s, y_e]
            list_of_domains.append(x_domain)
            list_of_domains.append(y_domain)
            domains_grid[r][c] = [x_domain, y_domain]
            # ### construct subplot container ###
            subplot_type = spec["type"]
            secondary_y = spec["secondary_y"]
            subplot_refs = _init_subplot(
                layout, subplot_type, secondary_y, x_domain, y_domain, max_subplot_ids
            )
            grid_ref[r][c] = subplot_refs
    _configure_shared_axes(layout, grid_ref, specs, "x", shared_xaxes, row_dir)
    _configure_shared_axes(layout, grid_ref, specs, "y", shared_yaxes, row_dir)
    # Build inset reference
    # ---------------------
    # Loop through insets
    insets_ref = [None for inset in range(len(insets))] if insets else None
    if insets:
        for i_inset, inset in enumerate(insets):
            r = inset["cell"][0] - 1
            c = inset["cell"][1] - 1
            # Throw exception if r | c is out of range
            if not (0 <= r < rows):
                raise Exception(
                    "Some 'cell' row value is out of range. "
                    "Note: the starting cell is (1, 1)"
                )
            if not (0 <= c < cols):
                raise Exception(
                    "Some 'cell' col value is out of range. "
                    "Note: the starting cell is (1, 1)"
                )
            # Get inset x domain using grid
            x_s = grid[r][c][0] + inset["l"] * widths[c]
            if inset["w"] == "to_end":
                x_e = grid[r][c][0] + widths[c]
            else:
                x_e = x_s + inset["w"] * widths[c]
            x_domain = [x_s, x_e]
            # Get inset y domain using grid
            y_s = grid[r][c][1] + inset["b"] * heights[-1 - r]
            if inset["h"] == "to_end":
                y_e = grid[r][c][1] + heights[-1 - r]
            else:
                y_e = y_s + inset["h"] * heights[-1 - r]
            y_domain = [y_s, y_e]
            list_of_domains.append(x_domain)
            list_of_domains.append(y_domain)
            subplot_type = inset["type"]
            subplot_refs = _init_subplot(
                layout, subplot_type, False, x_domain, y_domain, max_subplot_ids
            )
            insets_ref[i_inset] = subplot_refs
    # Build grid_str
    # This is the message printed when print_grid=True
    grid_str = _build_grid_str(specs, grid_ref, insets, insets_ref, row_seq)
    # Add subplot titles
    plot_title_annotations = _build_subplot_title_annotations(
        subplot_titles, list_of_domains
    )
    layout["annotations"] = plot_title_annotations
    # Add column titles
    if column_titles:
        domains_list = []
        if row_dir > 0:
            for c in range(cols):
                domain_pair = domains_grid[-1][c]
                if domain_pair:
                    domains_list.extend(domain_pair)
        else:
            for c in range(cols):
                domain_pair = domains_grid[0][c]
                if domain_pair:
                    domains_list.extend(domain_pair)
        # Add subplot titles
        column_title_annotations = _build_subplot_title_annotations(
            column_titles, domains_list
        )
        layout["annotations"] += tuple(column_title_annotations)
    if row_titles:
        domains_list = []
        for r in range(rows):
            domain_pair = domains_grid[r][-1]
            if domain_pair:
                domains_list.extend(domain_pair)
        # Add subplot titles
        column_title_annotations = _build_subplot_title_annotations(
            row_titles, domains_list, title_edge="right"
        )
        layout["annotations"] += tuple(column_title_annotations)
    if x_title:
        domains_list = [(0, max_width), (0, 1)]
        # Add subplot titles
        column_title_annotations = _build_subplot_title_annotations(
            [x_title], domains_list, title_edge="bottom", offset=30
        )
        layout["annotations"] += tuple(column_title_annotations)
    if y_title:
        domains_list = [(0, 1), (0, 1)]
        # Add subplot titles
        column_title_annotations = _build_subplot_title_annotations(
            [y_title], domains_list, title_edge="left", offset=40
        )
        layout["annotations"] += tuple(column_title_annotations)
    # Handle displaying grid information
    if print_grid:
        print(grid_str)
    # Build resulting figure
    if figure is None:
        figure = go.Figure()
    figure.update_layout(layout)
    # Attach subplot grid info to the figure
    figure.__dict__["_grid_ref"] = grid_ref
    figure.__dict__["_grid_str"] = grid_str
    return figure
 
 | 
	_subplots.make_subplots 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	53 
 | 
	packages/python/plotly/plotly/graph_objs/_sunburst.py 
 | 
	    def __init__(
        self,
        arg=None,
        branchvalues=None,
        count=None,
        customdata=None,
        customdatasrc=None,
        domain=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        insidetextfont=None,
        insidetextorientation=None,
        labels=None,
        labelssrc=None,
        leaf=None,
        legend=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        level=None,
        marker=None,
        maxdepth=None,
        meta=None,
        metasrc=None,
        name=None,
        opacity=None,
        outsidetextfont=None,
        parents=None,
        parentssrc=None,
        root=None,
        rotation=None,
        sort=None,
        stream=None,
        text=None,
        textfont=None,
        textinfo=None,
        textsrc=None,
        texttemplate=None,
        texttemplatesrc=None,
        uid=None,
        uirevision=None,
        values=None,
        valuessrc=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Sunburst object
        Visualize hierarchal data spanning outward radially from root
        to leaves. The sunburst sectors are determined by the entries
        in "labels" or "ids" and in "parents".
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Sunburst`
        branchvalues
            Determines how the items in `values` are summed. When
            set to "total", items in `values` are taken to be value
            of all its descendants. When set to "remainder", items
            in `values` corresponding to the root and the branches
            sectors are taken to be the extra part not part of the
            sum of the values at their leaves.
        count
            Determines default for `values` when it is not
            provided, by inferring a 1 for each of the "leaves"
            and/or "branches", otherwise 0.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        domain
            :class:`plotly.graph_objects.sunburst.Domain` instance
            or dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.sunburst.Hoverlabel`
            instance or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry` and `percentParent`.
            Anything contained in tag `<extra>` is displayed in the
            secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Sets hover text elements associated with each sector.
            If a single string, the same string appears for all
            data points. If an array of string, the items are
            mapped in order of this trace's sectors. To be seen,
            trace `hoverinfo` must contain a "text" flag.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        insidetextfont
            Sets the font used for `textinfo` lying inside the
            sector.
        insidetextorientation
            Controls the orientation of the text inside chart
            sectors. When set to "auto", text may be oriented in
            any direction in order to be as big as possible in the
            middle of a sector. The "horizontal" option orients
            text to be parallel with the bottom of the chart, and
            may make text smaller in order to achieve that goal.
            The "radial" option orients text along the radius of
            the sector. The "tangential" option orients text
            perpendicular to the radius of the sector.
        labels
            Sets the labels of each of the sectors.
        labelssrc
            Sets the source reference on Chart Studio Cloud for
            `labels`.
        leaf
            :class:`plotly.graph_objects.sunburst.Leaf` instance or
            dict with compatible properties
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgrouptitle
            :class:`plotly.graph_objects.sunburst.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        level
            Sets the level from which this trace hierarchy is
            rendered. Set `level` to `''` to start from the root
            node in the hierarchy. Must be an "id" if `ids` is
            filled in, otherwise plotly attempts to find a matching
            item in `labels`.
        marker
            :class:`plotly.graph_objects.sunburst.Marker` instance
            or dict with compatible properties
        maxdepth
            Sets the number of rendered sectors from any given
            `level`. Set `maxdepth` to "-1" to render all the
            levels in the hierarchy.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        opacity
            Sets the opacity of the trace.
        outsidetextfont
            Sets the font used for `textinfo` lying outside the
            sector. This option refers to the root of the hierarchy
            presented at the center of a sunburst graph. Please
            note that if a hierarchy has multiple root nodes, this
            option won't have any effect and `insidetextfont` would
            be used.
        parents
            Sets the parent sectors for each of the sectors. Empty
            string items '' are understood to reference the root
            node in the hierarchy. If `ids` is filled, `parents`
            items are understood to be "ids" themselves. When `ids`
            is not set, plotly attempts to find matching items in
            `labels`, but beware they must be unique.
        parentssrc
            Sets the source reference on Chart Studio Cloud for
            `parents`.
        root
            :class:`plotly.graph_objects.sunburst.Root` instance or
            dict with compatible properties
        rotation
            Rotates the whole diagram counterclockwise by some
            angle. By default the first slice starts at 3 o'clock.
        sort
            Determines whether or not the sectors are reordered
            from largest to smallest.
        stream
            :class:`plotly.graph_objects.sunburst.Stream` instance
            or dict with compatible properties
        text
            Sets text elements associated with each sector. If
            trace `textinfo` contains a "text" flag, these elements
            will be seen on the chart. If trace `hoverinfo`
            contains a "text" flag and "hovertext" is not set,
            these elements will be seen in the hover labels.
        textfont
            Sets the font used for `textinfo`.
        textinfo
            Determines which trace information appear on the graph.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        texttemplate
            Template string used for rendering the information text
            that appear on points. Note that this will override
            `textinfo`. Variables are inserted using %{variable},
            for example "y: %{y}". Numbers are formatted using
            d3-format's syntax %{variable:d3-format}, for example
            "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. Every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry`, `percentParent`, `label`
            and `value`.
        texttemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `texttemplate`.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        values
            Sets the values associated with each of the sectors.
            Use with `branchvalues` to determine how the values are
            summed.
        valuessrc
            Sets the source reference on Chart Studio Cloud for
            `values`.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Sunburst
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__sunburst.Sunburst.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        branchvalues=None,
        count=None,
        customdata=None,
        customdatasrc=None,
        domain=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        insidetextfont=None,
        insidetextorientation=None,
        labels=None,
        labelssrc=None,
        leaf=None,
        legend=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        level=None,
        marker=None,
        maxdepth=None,
        meta=None,
        metasrc=None,
        name=None,
        opacity=None,
        outsidetextfont=None,
        parents=None,
        parentssrc=None,
        root=None,
        rotation=None,
        sort=None,
        stream=None,
        text=None,
        textfont=None,
        textinfo=None,
        textsrc=None,
        texttemplate=None,
        texttemplatesrc=None,
        uid=None,
        uirevision=None,
        values=None,
        valuessrc=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Sunburst object
        Visualize hierarchal data spanning outward radially from root
        to leaves. The sunburst sectors are determined by the entries
        in "labels" or "ids" and in "parents".
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Sunburst`
        branchvalues
            Determines how the items in `values` are summed. When
            set to "total", items in `values` are taken to be value
            of all its descendants. When set to "remainder", items
            in `values` corresponding to the root and the branches
            sectors are taken to be the extra part not part of the
            sum of the values at their leaves.
        count
            Determines default for `values` when it is not
            provided, by inferring a 1 for each of the "leaves"
            and/or "branches", otherwise 0.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        domain
            :class:`plotly.graph_objects.sunburst.Domain` instance
            or dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.sunburst.Hoverlabel`
            instance or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry` and `percentParent`.
            Anything contained in tag `<extra>` is displayed in the
            secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Sets hover text elements associated with each sector.
            If a single string, the same string appears for all
            data points. If an array of string, the items are
            mapped in order of this trace's sectors. To be seen,
            trace `hoverinfo` must contain a "text" flag.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        insidetextfont
            Sets the font used for `textinfo` lying inside the
            sector.
        insidetextorientation
            Controls the orientation of the text inside chart
            sectors. When set to "auto", text may be oriented in
            any direction in order to be as big as possible in the
            middle of a sector. The "horizontal" option orients
            text to be parallel with the bottom of the chart, and
            may make text smaller in order to achieve that goal.
            The "radial" option orients text along the radius of
            the sector. The "tangential" option orients text
            perpendicular to the radius of the sector.
        labels
            Sets the labels of each of the sectors.
        labelssrc
            Sets the source reference on Chart Studio Cloud for
            `labels`.
        leaf
            :class:`plotly.graph_objects.sunburst.Leaf` instance or
            dict with compatible properties
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgrouptitle
            :class:`plotly.graph_objects.sunburst.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        level
            Sets the level from which this trace hierarchy is
            rendered. Set `level` to `''` to start from the root
            node in the hierarchy. Must be an "id" if `ids` is
            filled in, otherwise plotly attempts to find a matching
            item in `labels`.
        marker
            :class:`plotly.graph_objects.sunburst.Marker` instance
            or dict with compatible properties
        maxdepth
            Sets the number of rendered sectors from any given
            `level`. Set `maxdepth` to "-1" to render all the
            levels in the hierarchy.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        opacity
            Sets the opacity of the trace.
        outsidetextfont
            Sets the font used for `textinfo` lying outside the
            sector. This option refers to the root of the hierarchy
            presented at the center of a sunburst graph. Please
            note that if a hierarchy has multiple root nodes, this
            option won't have any effect and `insidetextfont` would
            be used.
        parents
            Sets the parent sectors for each of the sectors. Empty
            string items '' are understood to reference the root
            node in the hierarchy. If `ids` is filled, `parents`
            items are understood to be "ids" themselves. When `ids`
            is not set, plotly attempts to find matching items in
            `labels`, but beware they must be unique.
        parentssrc
            Sets the source reference on Chart Studio Cloud for
            `parents`.
        root
            :class:`plotly.graph_objects.sunburst.Root` instance or
            dict with compatible properties
        rotation
            Rotates the whole diagram counterclockwise by some
            angle. By default the first slice starts at 3 o'clock.
        sort
            Determines whether or not the sectors are reordered
            from largest to smallest.
        stream
            :class:`plotly.graph_objects.sunburst.Stream` instance
            or dict with compatible properties
        text
            Sets text elements associated with each sector. If
            trace `textinfo` contains a "text" flag, these elements
            will be seen on the chart. If trace `hoverinfo`
            contains a "text" flag and "hovertext" is not set,
            these elements will be seen in the hover labels.
        textfont
            Sets the font used for `textinfo`.
        textinfo
            Determines which trace information appear on the graph.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        texttemplate
            Template string used for rendering the information text
            that appear on points. Note that this will override
            `textinfo`. Variables are inserted using %{variable},
            for example "y: %{y}". Numbers are formatted using
            d3-format's syntax %{variable:d3-format}, for example
            "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. Every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry`, `percentParent`, `label`
            and `value`.
        texttemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `texttemplate`.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        values
            Sets the values associated with each of the sectors.
            Use with `branchvalues` to determine how the values are
            summed.
        valuessrc
            Sets the source reference on Chart Studio Cloud for
            `values`.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Sunburst
        """
        super(Sunburst, self).__init__("sunburst")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Sunburst
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Sunburst`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("branchvalues", None)
        _v = branchvalues if branchvalues is not None else _v
        if _v is not None:
            self["branchvalues"] = _v
        _v = arg.pop("count", None)
        _v = count if count is not None else _v
        if _v is not None:
            self["count"] = _v
        _v = arg.pop("customdata", None)
        _v = customdata if customdata is not None else _v
        if _v is not None:
            self["customdata"] = _v
        _v = arg.pop("customdatasrc", None)
        _v = customdatasrc if customdatasrc is not None else _v
        if _v is not None:
            self["customdatasrc"] = _v
        _v = arg.pop("domain", None)
        _v = domain if domain is not None else _v
        if _v is not None:
            self["domain"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoverinfosrc", None)
        _v = hoverinfosrc if hoverinfosrc is not None else _v
        if _v is not None:
            self["hoverinfosrc"] = _v
        _v = arg.pop("hoverlabel", None)
        _v = hoverlabel if hoverlabel is not None else _v
        if _v is not None:
            self["hoverlabel"] = _v
        _v = arg.pop("hovertemplate", None)
        _v = hovertemplate if hovertemplate is not None else _v
        if _v is not None:
            self["hovertemplate"] = _v
        _v = arg.pop("hovertemplatesrc", None)
        _v = hovertemplatesrc if hovertemplatesrc is not None else _v
        if _v is not None:
            self["hovertemplatesrc"] = _v
        _v = arg.pop("hovertext", None)
        _v = hovertext if hovertext is not None else _v
        if _v is not None:
            self["hovertext"] = _v
        _v = arg.pop("hovertextsrc", None)
        _v = hovertextsrc if hovertextsrc is not None else _v
        if _v is not None:
            self["hovertextsrc"] = _v
        _v = arg.pop("ids", None)
        _v = ids if ids is not None else _v
        if _v is not None:
            self["ids"] = _v
        _v = arg.pop("idssrc", None)
        _v = idssrc if idssrc is not None else _v
        if _v is not None:
            self["idssrc"] = _v
        _v = arg.pop("insidetextfont", None)
        _v = insidetextfont if insidetextfont is not None else _v
        if _v is not None:
            self["insidetextfont"] = _v
        _v = arg.pop("insidetextorientation", None)
        _v = insidetextorientation if insidetextorientation is not None else _v
        if _v is not None:
            self["insidetextorientation"] = _v
        _v = arg.pop("labels", None)
        _v = labels if labels is not None else _v
        if _v is not None:
            self["labels"] = _v
        _v = arg.pop("labelssrc", None)
        _v = labelssrc if labelssrc is not None else _v
        if _v is not None:
            self["labelssrc"] = _v
        _v = arg.pop("leaf", None)
        _v = leaf if leaf is not None else _v
        if _v is not None:
            self["leaf"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("level", None)
        _v = level if level is not None else _v
        if _v is not None:
            self["level"] = _v
        _v = arg.pop("marker", None)
        _v = marker if marker is not None else _v
        if _v is not None:
            self["marker"] = _v
        _v = arg.pop("maxdepth", None)
        _v = maxdepth if maxdepth is not None else _v
        if _v is not None:
            self["maxdepth"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("outsidetextfont", None)
        _v = outsidetextfont if outsidetextfont is not None else _v
        if _v is not None:
            self["outsidetextfont"] = _v
        _v = arg.pop("parents", None)
        _v = parents if parents is not None else _v
        if _v is not None:
            self["parents"] = _v
        _v = arg.pop("parentssrc", None)
        _v = parentssrc if parentssrc is not None else _v
        if _v is not None:
            self["parentssrc"] = _v
        _v = arg.pop("root", None)
        _v = root if root is not None else _v
        if _v is not None:
            self["root"] = _v
        _v = arg.pop("rotation", None)
        _v = rotation if rotation is not None else _v
        if _v is not None:
            self["rotation"] = _v
        _v = arg.pop("sort", None)
        _v = sort if sort is not None else _v
        if _v is not None:
            self["sort"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("text", None)
        _v = text if text is not None else _v
        if _v is not None:
            self["text"] = _v
        _v = arg.pop("textfont", None)
        _v = textfont if textfont is not None else _v
        if _v is not None:
            self["textfont"] = _v
        _v = arg.pop("textinfo", None)
        _v = textinfo if textinfo is not None else _v
        if _v is not None:
            self["textinfo"] = _v
        _v = arg.pop("textsrc", None)
        _v = textsrc if textsrc is not None else _v
        if _v is not None:
            self["textsrc"] = _v
        _v = arg.pop("texttemplate", None)
        _v = texttemplate if texttemplate is not None else _v
        if _v is not None:
            self["texttemplate"] = _v
        _v = arg.pop("texttemplatesrc", None)
        _v = texttemplatesrc if texttemplatesrc is not None else _v
        if _v is not None:
            self["texttemplatesrc"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("values", None)
        _v = values if values is not None else _v
        if _v is not None:
            self["values"] = _v
        _v = arg.pop("valuessrc", None)
        _v = valuessrc if valuessrc is not None else _v
        if _v is not None:
            self["valuessrc"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "sunburst"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_sunburst.Sunburst.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	56 
 | 
	packages/python/plotly/plotly/io/_templates.py 
 | 
	def to_templated(fig, skip=("title", "text")):
    """
    Return a copy of a figure where all styling properties have been moved
    into the figure's template.  The template property of the resulting figure
    may then be used to set the default styling of other figures.
    Parameters
    ----------
    fig
        Figure object or dict representing a figure
    skip
        A collection of names of properties to skip when moving properties to
        the template. Defaults to ('title', 'text') so that the text
        of figure titles, axis titles, and annotations does not become part of
        the template
    Examples
    --------
    Imports
    >>> import plotly.graph_objs as go
    >>> import plotly.io as pio
    Construct a figure with large courier text
    >>> fig = go.Figure(layout={'title': 'Figure Title',
    ...                         'font': {'size': 20, 'family': 'Courier'},
    ...                         'template':"none"})
    >>> fig # doctest: +NORMALIZE_WHITESPACE
    Figure({
        'data': [],
        'layout': {'font': {'family': 'Courier', 'size': 20},
                   'template': '...', 'title': {'text': 'Figure Title'}}
    })
    Convert to a figure with a template. Note how the 'font' properties have
    been moved into the template property.
    >>> templated_fig = pio.to_templated(fig)
    >>> templated_fig.layout.template
    layout.Template({
        'layout': {'font': {'family': 'Courier', 'size': 20}}
    })
    >>> templated_fig
    Figure({
        'data': [], 'layout': {'template': '...', 'title': {'text': 'Figure Title'}}
    })
    Next create a new figure with this template
    >>> fig2 = go.Figure(layout={
    ...     'title': 'Figure 2 Title',
    ...     'template': templated_fig.layout.template})
    >>> fig2.layout.template
    layout.Template({
        'layout': {'font': {'family': 'Courier', 'size': 20}}
    })
    The default font in fig2 will now be size 20 Courier.
    Next, register as a named template...
    >>> pio.templates['large_courier'] = templated_fig.layout.template
    and specify this template by name when constructing a figure.
    >>> go.Figure(layout={
    ...     'title': 'Figure 3 Title',
    ...     'template': 'large_courier'}) # doctest: +ELLIPSIS
    Figure(...)
    Finally, set this as the default template to be applied to all new figures
    >>> pio.templates.default = 'large_courier'
    >>> fig = go.Figure(layout={'title': 'Figure 4 Title'})
    >>> fig.layout.template
    layout.Template({
        'layout': {'font': {'family': 'Courier', 'size': 20}}
    })
    Returns
    -------
    go.Figure
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__templates.to_templated.txt 
 | 
	def to_templated(fig, skip=("title", "text")):
    """
    Return a copy of a figure where all styling properties have been moved
    into the figure's template.  The template property of the resulting figure
    may then be used to set the default styling of other figures.
    Parameters
    ----------
    fig
        Figure object or dict representing a figure
    skip
        A collection of names of properties to skip when moving properties to
        the template. Defaults to ('title', 'text') so that the text
        of figure titles, axis titles, and annotations does not become part of
        the template
    Examples
    --------
    Imports
    >>> import plotly.graph_objs as go
    >>> import plotly.io as pio
    Construct a figure with large courier text
    >>> fig = go.Figure(layout={'title': 'Figure Title',
    ...                         'font': {'size': 20, 'family': 'Courier'},
    ...                         'template':"none"})
    >>> fig # doctest: +NORMALIZE_WHITESPACE
    Figure({
        'data': [],
        'layout': {'font': {'family': 'Courier', 'size': 20},
                   'template': '...', 'title': {'text': 'Figure Title'}}
    })
    Convert to a figure with a template. Note how the 'font' properties have
    been moved into the template property.
    >>> templated_fig = pio.to_templated(fig)
    >>> templated_fig.layout.template
    layout.Template({
        'layout': {'font': {'family': 'Courier', 'size': 20}}
    })
    >>> templated_fig
    Figure({
        'data': [], 'layout': {'template': '...', 'title': {'text': 'Figure Title'}}
    })
    Next create a new figure with this template
    >>> fig2 = go.Figure(layout={
    ...     'title': 'Figure 2 Title',
    ...     'template': templated_fig.layout.template})
    >>> fig2.layout.template
    layout.Template({
        'layout': {'font': {'family': 'Courier', 'size': 20}}
    })
    The default font in fig2 will now be size 20 Courier.
    Next, register as a named template...
    >>> pio.templates['large_courier'] = templated_fig.layout.template
    and specify this template by name when constructing a figure.
    >>> go.Figure(layout={
    ...     'title': 'Figure 3 Title',
    ...     'template': 'large_courier'}) # doctest: +ELLIPSIS
    Figure(...)
    Finally, set this as the default template to be applied to all new figures
    >>> pio.templates.default = 'large_courier'
    >>> fig = go.Figure(layout={'title': 'Figure 4 Title'})
    >>> fig.layout.template
    layout.Template({
        'layout': {'font': {'family': 'Courier', 'size': 20}}
    })
    Returns
    -------
    go.Figure
    """
    # process fig
    from plotly.basedatatypes import BaseFigure
    from plotly.graph_objs import Figure
    if not isinstance(fig, BaseFigure):
        fig = Figure(fig)
    # Process skip
    if not skip:
        skip = set()
    else:
        skip = set(skip)
    # Always skip uids
    skip.add("uid")
    # Initialize templated figure with deep copy of input figure
    templated_fig = copy.deepcopy(fig)
    # Handle layout
    walk_push_to_template(
        templated_fig.layout, templated_fig.layout.template.layout, skip=skip
    )
    # Handle traces
    trace_type_indexes = {}
    for trace in list(templated_fig.data):
        template_index = trace_type_indexes.get(trace.type, 0)
        # Extend template traces if necessary
        template_traces = list(templated_fig.layout.template.data[trace.type])
        while len(template_traces) <= template_index:
            # Append empty trace
            template_traces.append(trace.__class__())
        # Get corresponding template trace
        template_trace = template_traces[template_index]
        # Perform push properties to template
        walk_push_to_template(trace, template_trace, skip=skip)
        # Update template traces in templated_fig
        templated_fig.layout.template.data[trace.type] = template_traces
        # Update trace_type_indexes
        trace_type_indexes[trace.type] = template_index + 1
    # Remove useless trace arrays
    any_non_empty = False
    for trace_type in templated_fig.layout.template.data:
        traces = templated_fig.layout.template.data[trace_type]
        is_empty = [trace.to_plotly_json() == {"type": trace_type} for trace in traces]
        if all(is_empty):
            templated_fig.layout.template.data[trace_type] = None
        else:
            any_non_empty = True
    # Check if we can remove the data altogether key
    if not any_non_empty:
        templated_fig.layout.template.data = None
    return templated_fig
 
 | 
	_templates.to_templated 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	58 
 | 
	packages/python/plotly/plotly/graph_objs/scatter/_textfont.py 
 | 
	    def __init__(
        self,
        arg=None,
        color=None,
        colorsrc=None,
        family=None,
        familysrc=None,
        lineposition=None,
        linepositionsrc=None,
        shadow=None,
        shadowsrc=None,
        size=None,
        sizesrc=None,
        style=None,
        stylesrc=None,
        textcase=None,
        textcasesrc=None,
        variant=None,
        variantsrc=None,
        weight=None,
        weightsrc=None,
        **kwargs,
    ):
        """
        Construct a new Textfont object
        Sets the text font.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.scatter.Textfont`
        color
        colorsrc
            Sets the source reference on Chart Studio Cloud for
            `color`.
        family
            HTML font family - the typeface that will be applied by
            the web browser. The web browser will only be able to
            apply a font if it is available on the system which it
            operates. Provide multiple font families, separated by
            commas, to indicate the preference in which to apply
            fonts if they aren't available on the system. The Chart
            Studio Cloud (at https://chart-studio.plotly.com or on-
            premise) generates images on a server, where only a
            select number of fonts are installed and supported.
            These include "Arial", "Balto", "Courier New", "Droid
            Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
            One", "Old Standard TT", "Open Sans", "Overpass", "PT
            Sans Narrow", "Raleway", "Times New Roman".
        familysrc
            Sets the source reference on Chart Studio Cloud for
            `family`.
        lineposition
            Sets the kind of decoration line(s) with text, such as
            an "under", "over" or "through" as well as combinations
            e.g. "under+over", etc.
        linepositionsrc
            Sets the source reference on Chart Studio Cloud for
            `lineposition`.
        shadow
            Sets the shape and color of the shadow behind text.
            "auto" places minimal shadow and applies contrast text
            font color. See https://developer.mozilla.org/en-
            US/docs/Web/CSS/text-shadow for additional options.
        shadowsrc
            Sets the source reference on Chart Studio Cloud for
            `shadow`.
        size
        sizesrc
            Sets the source reference on Chart Studio Cloud for
            `size`.
        style
            Sets whether a font should be styled with a normal or
            italic face from its family.
        stylesrc
            Sets the source reference on Chart Studio Cloud for
            `style`.
        textcase
            Sets capitalization of text. It can be used to make
            text appear in all-uppercase or all-lowercase, or with
            each word capitalized.
        textcasesrc
            Sets the source reference on Chart Studio Cloud for
            `textcase`.
        variant
            Sets the variant of the font.
        variantsrc
            Sets the source reference on Chart Studio Cloud for
            `variant`.
        weight
            Sets the weight (or boldness) of the font.
        weightsrc
            Sets the source reference on Chart Studio Cloud for
            `weight`.
        Returns
        -------
        Textfont
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__textfont.Textfont.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        color=None,
        colorsrc=None,
        family=None,
        familysrc=None,
        lineposition=None,
        linepositionsrc=None,
        shadow=None,
        shadowsrc=None,
        size=None,
        sizesrc=None,
        style=None,
        stylesrc=None,
        textcase=None,
        textcasesrc=None,
        variant=None,
        variantsrc=None,
        weight=None,
        weightsrc=None,
        **kwargs,
    ):
        """
        Construct a new Textfont object
        Sets the text font.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.scatter.Textfont`
        color
        colorsrc
            Sets the source reference on Chart Studio Cloud for
            `color`.
        family
            HTML font family - the typeface that will be applied by
            the web browser. The web browser will only be able to
            apply a font if it is available on the system which it
            operates. Provide multiple font families, separated by
            commas, to indicate the preference in which to apply
            fonts if they aren't available on the system. The Chart
            Studio Cloud (at https://chart-studio.plotly.com or on-
            premise) generates images on a server, where only a
            select number of fonts are installed and supported.
            These include "Arial", "Balto", "Courier New", "Droid
            Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
            One", "Old Standard TT", "Open Sans", "Overpass", "PT
            Sans Narrow", "Raleway", "Times New Roman".
        familysrc
            Sets the source reference on Chart Studio Cloud for
            `family`.
        lineposition
            Sets the kind of decoration line(s) with text, such as
            an "under", "over" or "through" as well as combinations
            e.g. "under+over", etc.
        linepositionsrc
            Sets the source reference on Chart Studio Cloud for
            `lineposition`.
        shadow
            Sets the shape and color of the shadow behind text.
            "auto" places minimal shadow and applies contrast text
            font color. See https://developer.mozilla.org/en-
            US/docs/Web/CSS/text-shadow for additional options.
        shadowsrc
            Sets the source reference on Chart Studio Cloud for
            `shadow`.
        size
        sizesrc
            Sets the source reference on Chart Studio Cloud for
            `size`.
        style
            Sets whether a font should be styled with a normal or
            italic face from its family.
        stylesrc
            Sets the source reference on Chart Studio Cloud for
            `style`.
        textcase
            Sets capitalization of text. It can be used to make
            text appear in all-uppercase or all-lowercase, or with
            each word capitalized.
        textcasesrc
            Sets the source reference on Chart Studio Cloud for
            `textcase`.
        variant
            Sets the variant of the font.
        variantsrc
            Sets the source reference on Chart Studio Cloud for
            `variant`.
        weight
            Sets the weight (or boldness) of the font.
        weightsrc
            Sets the source reference on Chart Studio Cloud for
            `weight`.
        Returns
        -------
        Textfont
        """
        super(Textfont, self).__init__("textfont")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.scatter.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.Textfont`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("color", None)
        _v = color if color is not None else _v
        if _v is not None:
            self["color"] = _v
        _v = arg.pop("colorsrc", None)
        _v = colorsrc if colorsrc is not None else _v
        if _v is not None:
            self["colorsrc"] = _v
        _v = arg.pop("family", None)
        _v = family if family is not None else _v
        if _v is not None:
            self["family"] = _v
        _v = arg.pop("familysrc", None)
        _v = familysrc if familysrc is not None else _v
        if _v is not None:
            self["familysrc"] = _v
        _v = arg.pop("lineposition", None)
        _v = lineposition if lineposition is not None else _v
        if _v is not None:
            self["lineposition"] = _v
        _v = arg.pop("linepositionsrc", None)
        _v = linepositionsrc if linepositionsrc is not None else _v
        if _v is not None:
            self["linepositionsrc"] = _v
        _v = arg.pop("shadow", None)
        _v = shadow if shadow is not None else _v
        if _v is not None:
            self["shadow"] = _v
        _v = arg.pop("shadowsrc", None)
        _v = shadowsrc if shadowsrc is not None else _v
        if _v is not None:
            self["shadowsrc"] = _v
        _v = arg.pop("size", None)
        _v = size if size is not None else _v
        if _v is not None:
            self["size"] = _v
        _v = arg.pop("sizesrc", None)
        _v = sizesrc if sizesrc is not None else _v
        if _v is not None:
            self["sizesrc"] = _v
        _v = arg.pop("style", None)
        _v = style if style is not None else _v
        if _v is not None:
            self["style"] = _v
        _v = arg.pop("stylesrc", None)
        _v = stylesrc if stylesrc is not None else _v
        if _v is not None:
            self["stylesrc"] = _v
        _v = arg.pop("textcase", None)
        _v = textcase if textcase is not None else _v
        if _v is not None:
            self["textcase"] = _v
        _v = arg.pop("textcasesrc", None)
        _v = textcasesrc if textcasesrc is not None else _v
        if _v is not None:
            self["textcasesrc"] = _v
        _v = arg.pop("variant", None)
        _v = variant if variant is not None else _v
        if _v is not None:
            self["variant"] = _v
        _v = arg.pop("variantsrc", None)
        _v = variantsrc if variantsrc is not None else _v
        if _v is not None:
            self["variantsrc"] = _v
        _v = arg.pop("weight", None)
        _v = weight if weight is not None else _v
        if _v is not None:
            self["weight"] = _v
        _v = arg.pop("weightsrc", None)
        _v = weightsrc if weightsrc is not None else _v
        if _v is not None:
            self["weightsrc"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_textfont.Textfont.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	59 
 | 
	packages/python/plotly/plotly/graph_objs/_treemap.py 
 | 
	    def __init__(
        self,
        arg=None,
        branchvalues=None,
        count=None,
        customdata=None,
        customdatasrc=None,
        domain=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        insidetextfont=None,
        labels=None,
        labelssrc=None,
        legend=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        level=None,
        marker=None,
        maxdepth=None,
        meta=None,
        metasrc=None,
        name=None,
        opacity=None,
        outsidetextfont=None,
        parents=None,
        parentssrc=None,
        pathbar=None,
        root=None,
        sort=None,
        stream=None,
        text=None,
        textfont=None,
        textinfo=None,
        textposition=None,
        textsrc=None,
        texttemplate=None,
        texttemplatesrc=None,
        tiling=None,
        uid=None,
        uirevision=None,
        values=None,
        valuessrc=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Treemap object
        Visualize hierarchal data from leaves (and/or outer branches)
        towards root with rectangles. The treemap sectors are
        determined by the entries in "labels" or "ids" and in
        "parents".
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Treemap`
        branchvalues
            Determines how the items in `values` are summed. When
            set to "total", items in `values` are taken to be value
            of all its descendants. When set to "remainder", items
            in `values` corresponding to the root and the branches
            sectors are taken to be the extra part not part of the
            sum of the values at their leaves.
        count
            Determines default for `values` when it is not
            provided, by inferring a 1 for each of the "leaves"
            and/or "branches", otherwise 0.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        domain
            :class:`plotly.graph_objects.treemap.Domain` instance
            or dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.treemap.Hoverlabel`
            instance or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry` and `percentParent`.
            Anything contained in tag `<extra>` is displayed in the
            secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Sets hover text elements associated with each sector.
            If a single string, the same string appears for all
            data points. If an array of string, the items are
            mapped in order of this trace's sectors. To be seen,
            trace `hoverinfo` must contain a "text" flag.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        insidetextfont
            Sets the font used for `textinfo` lying inside the
            sector.
        labels
            Sets the labels of each of the sectors.
        labelssrc
            Sets the source reference on Chart Studio Cloud for
            `labels`.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgrouptitle
            :class:`plotly.graph_objects.treemap.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        level
            Sets the level from which this trace hierarchy is
            rendered. Set `level` to `''` to start from the root
            node in the hierarchy. Must be an "id" if `ids` is
            filled in, otherwise plotly attempts to find a matching
            item in `labels`.
        marker
            :class:`plotly.graph_objects.treemap.Marker` instance
            or dict with compatible properties
        maxdepth
            Sets the number of rendered sectors from any given
            `level`. Set `maxdepth` to "-1" to render all the
            levels in the hierarchy.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        opacity
            Sets the opacity of the trace.
        outsidetextfont
            Sets the font used for `textinfo` lying outside the
            sector. This option refers to the root of the hierarchy
            presented on top left corner of a treemap graph. Please
            note that if a hierarchy has multiple root nodes, this
            option won't have any effect and `insidetextfont` would
            be used.
        parents
            Sets the parent sectors for each of the sectors. Empty
            string items '' are understood to reference the root
            node in the hierarchy. If `ids` is filled, `parents`
            items are understood to be "ids" themselves. When `ids`
            is not set, plotly attempts to find matching items in
            `labels`, but beware they must be unique.
        parentssrc
            Sets the source reference on Chart Studio Cloud for
            `parents`.
        pathbar
            :class:`plotly.graph_objects.treemap.Pathbar` instance
            or dict with compatible properties
        root
            :class:`plotly.graph_objects.treemap.Root` instance or
            dict with compatible properties
        sort
            Determines whether or not the sectors are reordered
            from largest to smallest.
        stream
            :class:`plotly.graph_objects.treemap.Stream` instance
            or dict with compatible properties
        text
            Sets text elements associated with each sector. If
            trace `textinfo` contains a "text" flag, these elements
            will be seen on the chart. If trace `hoverinfo`
            contains a "text" flag and "hovertext" is not set,
            these elements will be seen in the hover labels.
        textfont
            Sets the font used for `textinfo`.
        textinfo
            Determines which trace information appear on the graph.
        textposition
            Sets the positions of the `text` elements.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        texttemplate
            Template string used for rendering the information text
            that appear on points. Note that this will override
            `textinfo`. Variables are inserted using %{variable},
            for example "y: %{y}". Numbers are formatted using
            d3-format's syntax %{variable:d3-format}, for example
            "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. Every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry`, `percentParent`, `label`
            and `value`.
        texttemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `texttemplate`.
        tiling
            :class:`plotly.graph_objects.treemap.Tiling` instance
            or dict with compatible properties
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        values
            Sets the values associated with each of the sectors.
            Use with `branchvalues` to determine how the values are
            summed.
        valuessrc
            Sets the source reference on Chart Studio Cloud for
            `values`.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Treemap
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__treemap.Treemap.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        branchvalues=None,
        count=None,
        customdata=None,
        customdatasrc=None,
        domain=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        insidetextfont=None,
        labels=None,
        labelssrc=None,
        legend=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        level=None,
        marker=None,
        maxdepth=None,
        meta=None,
        metasrc=None,
        name=None,
        opacity=None,
        outsidetextfont=None,
        parents=None,
        parentssrc=None,
        pathbar=None,
        root=None,
        sort=None,
        stream=None,
        text=None,
        textfont=None,
        textinfo=None,
        textposition=None,
        textsrc=None,
        texttemplate=None,
        texttemplatesrc=None,
        tiling=None,
        uid=None,
        uirevision=None,
        values=None,
        valuessrc=None,
        visible=None,
        **kwargs,
    ):
        """
        Construct a new Treemap object
        Visualize hierarchal data from leaves (and/or outer branches)
        towards root with rectangles. The treemap sectors are
        determined by the entries in "labels" or "ids" and in
        "parents".
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Treemap`
        branchvalues
            Determines how the items in `values` are summed. When
            set to "total", items in `values` are taken to be value
            of all its descendants. When set to "remainder", items
            in `values` corresponding to the root and the branches
            sectors are taken to be the extra part not part of the
            sum of the values at their leaves.
        count
            Determines default for `values` when it is not
            provided, by inferring a 1 for each of the "leaves"
            and/or "branches", otherwise 0.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        domain
            :class:`plotly.graph_objects.treemap.Domain` instance
            or dict with compatible properties
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.treemap.Hoverlabel`
            instance or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry` and `percentParent`.
            Anything contained in tag `<extra>` is displayed in the
            secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Sets hover text elements associated with each sector.
            If a single string, the same string appears for all
            data points. If an array of string, the items are
            mapped in order of this trace's sectors. To be seen,
            trace `hoverinfo` must contain a "text" flag.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        insidetextfont
            Sets the font used for `textinfo` lying inside the
            sector.
        labels
            Sets the labels of each of the sectors.
        labelssrc
            Sets the source reference on Chart Studio Cloud for
            `labels`.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgrouptitle
            :class:`plotly.graph_objects.treemap.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        level
            Sets the level from which this trace hierarchy is
            rendered. Set `level` to `''` to start from the root
            node in the hierarchy. Must be an "id" if `ids` is
            filled in, otherwise plotly attempts to find a matching
            item in `labels`.
        marker
            :class:`plotly.graph_objects.treemap.Marker` instance
            or dict with compatible properties
        maxdepth
            Sets the number of rendered sectors from any given
            `level`. Set `maxdepth` to "-1" to render all the
            levels in the hierarchy.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        opacity
            Sets the opacity of the trace.
        outsidetextfont
            Sets the font used for `textinfo` lying outside the
            sector. This option refers to the root of the hierarchy
            presented on top left corner of a treemap graph. Please
            note that if a hierarchy has multiple root nodes, this
            option won't have any effect and `insidetextfont` would
            be used.
        parents
            Sets the parent sectors for each of the sectors. Empty
            string items '' are understood to reference the root
            node in the hierarchy. If `ids` is filled, `parents`
            items are understood to be "ids" themselves. When `ids`
            is not set, plotly attempts to find matching items in
            `labels`, but beware they must be unique.
        parentssrc
            Sets the source reference on Chart Studio Cloud for
            `parents`.
        pathbar
            :class:`plotly.graph_objects.treemap.Pathbar` instance
            or dict with compatible properties
        root
            :class:`plotly.graph_objects.treemap.Root` instance or
            dict with compatible properties
        sort
            Determines whether or not the sectors are reordered
            from largest to smallest.
        stream
            :class:`plotly.graph_objects.treemap.Stream` instance
            or dict with compatible properties
        text
            Sets text elements associated with each sector. If
            trace `textinfo` contains a "text" flag, these elements
            will be seen on the chart. If trace `hoverinfo`
            contains a "text" flag and "hovertext" is not set,
            these elements will be seen in the hover labels.
        textfont
            Sets the font used for `textinfo`.
        textinfo
            Determines which trace information appear on the graph.
        textposition
            Sets the positions of the `text` elements.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        texttemplate
            Template string used for rendering the information text
            that appear on points. Note that this will override
            `textinfo`. Variables are inserted using %{variable},
            for example "y: %{y}". Numbers are formatted using
            d3-format's syntax %{variable:d3-format}, for example
            "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. Every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `currentPath`, `root`, `entry`,
            `percentRoot`, `percentEntry`, `percentParent`, `label`
            and `value`.
        texttemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `texttemplate`.
        tiling
            :class:`plotly.graph_objects.treemap.Tiling` instance
            or dict with compatible properties
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        values
            Sets the values associated with each of the sectors.
            Use with `branchvalues` to determine how the values are
            summed.
        valuessrc
            Sets the source reference on Chart Studio Cloud for
            `values`.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        Returns
        -------
        Treemap
        """
        super(Treemap, self).__init__("treemap")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Treemap
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Treemap`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("branchvalues", None)
        _v = branchvalues if branchvalues is not None else _v
        if _v is not None:
            self["branchvalues"] = _v
        _v = arg.pop("count", None)
        _v = count if count is not None else _v
        if _v is not None:
            self["count"] = _v
        _v = arg.pop("customdata", None)
        _v = customdata if customdata is not None else _v
        if _v is not None:
            self["customdata"] = _v
        _v = arg.pop("customdatasrc", None)
        _v = customdatasrc if customdatasrc is not None else _v
        if _v is not None:
            self["customdatasrc"] = _v
        _v = arg.pop("domain", None)
        _v = domain if domain is not None else _v
        if _v is not None:
            self["domain"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoverinfosrc", None)
        _v = hoverinfosrc if hoverinfosrc is not None else _v
        if _v is not None:
            self["hoverinfosrc"] = _v
        _v = arg.pop("hoverlabel", None)
        _v = hoverlabel if hoverlabel is not None else _v
        if _v is not None:
            self["hoverlabel"] = _v
        _v = arg.pop("hovertemplate", None)
        _v = hovertemplate if hovertemplate is not None else _v
        if _v is not None:
            self["hovertemplate"] = _v
        _v = arg.pop("hovertemplatesrc", None)
        _v = hovertemplatesrc if hovertemplatesrc is not None else _v
        if _v is not None:
            self["hovertemplatesrc"] = _v
        _v = arg.pop("hovertext", None)
        _v = hovertext if hovertext is not None else _v
        if _v is not None:
            self["hovertext"] = _v
        _v = arg.pop("hovertextsrc", None)
        _v = hovertextsrc if hovertextsrc is not None else _v
        if _v is not None:
            self["hovertextsrc"] = _v
        _v = arg.pop("ids", None)
        _v = ids if ids is not None else _v
        if _v is not None:
            self["ids"] = _v
        _v = arg.pop("idssrc", None)
        _v = idssrc if idssrc is not None else _v
        if _v is not None:
            self["idssrc"] = _v
        _v = arg.pop("insidetextfont", None)
        _v = insidetextfont if insidetextfont is not None else _v
        if _v is not None:
            self["insidetextfont"] = _v
        _v = arg.pop("labels", None)
        _v = labels if labels is not None else _v
        if _v is not None:
            self["labels"] = _v
        _v = arg.pop("labelssrc", None)
        _v = labelssrc if labelssrc is not None else _v
        if _v is not None:
            self["labelssrc"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("level", None)
        _v = level if level is not None else _v
        if _v is not None:
            self["level"] = _v
        _v = arg.pop("marker", None)
        _v = marker if marker is not None else _v
        if _v is not None:
            self["marker"] = _v
        _v = arg.pop("maxdepth", None)
        _v = maxdepth if maxdepth is not None else _v
        if _v is not None:
            self["maxdepth"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("outsidetextfont", None)
        _v = outsidetextfont if outsidetextfont is not None else _v
        if _v is not None:
            self["outsidetextfont"] = _v
        _v = arg.pop("parents", None)
        _v = parents if parents is not None else _v
        if _v is not None:
            self["parents"] = _v
        _v = arg.pop("parentssrc", None)
        _v = parentssrc if parentssrc is not None else _v
        if _v is not None:
            self["parentssrc"] = _v
        _v = arg.pop("pathbar", None)
        _v = pathbar if pathbar is not None else _v
        if _v is not None:
            self["pathbar"] = _v
        _v = arg.pop("root", None)
        _v = root if root is not None else _v
        if _v is not None:
            self["root"] = _v
        _v = arg.pop("sort", None)
        _v = sort if sort is not None else _v
        if _v is not None:
            self["sort"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("text", None)
        _v = text if text is not None else _v
        if _v is not None:
            self["text"] = _v
        _v = arg.pop("textfont", None)
        _v = textfont if textfont is not None else _v
        if _v is not None:
            self["textfont"] = _v
        _v = arg.pop("textinfo", None)
        _v = textinfo if textinfo is not None else _v
        if _v is not None:
            self["textinfo"] = _v
        _v = arg.pop("textposition", None)
        _v = textposition if textposition is not None else _v
        if _v is not None:
            self["textposition"] = _v
        _v = arg.pop("textsrc", None)
        _v = textsrc if textsrc is not None else _v
        if _v is not None:
            self["textsrc"] = _v
        _v = arg.pop("texttemplate", None)
        _v = texttemplate if texttemplate is not None else _v
        if _v is not None:
            self["texttemplate"] = _v
        _v = arg.pop("texttemplatesrc", None)
        _v = texttemplatesrc if texttemplatesrc is not None else _v
        if _v is not None:
            self["texttemplatesrc"] = _v
        _v = arg.pop("tiling", None)
        _v = tiling if tiling is not None else _v
        if _v is not None:
            self["tiling"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("values", None)
        _v = values if values is not None else _v
        if _v is not None:
            self["values"] = _v
        _v = arg.pop("valuessrc", None)
        _v = valuessrc if valuessrc is not None else _v
        if _v is not None:
            self["valuessrc"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "treemap"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_treemap.Treemap.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	61 
 | 
	packages/python/plotly/plotly/figure_factory/_trisurf.py 
 | 
	def create_trisurf(
    x,
    y,
    z,
    simplices,
    colormap=None,
    show_colorbar=True,
    scale=None,
    color_func=None,
    title="Trisurf Plot",
    plot_edges=True,
    showbackground=True,
    backgroundcolor="rgb(230, 230, 230)",
    gridcolor="rgb(255, 255, 255)",
    zerolinecolor="rgb(255, 255, 255)",
    edges_color="rgb(50, 50, 50)",
    height=800,
    width=800,
    aspectratio=None,
):
    """
    Returns figure for a triangulated surface plot
    :param (array) x: data values of x in a 1D array
    :param (array) y: data values of y in a 1D array
    :param (array) z: data values of z in a 1D array
    :param (array) simplices: an array of shape (ntri, 3) where ntri is
        the number of triangles in the triangularization. Each row of the
        array contains the indicies of the verticies of each triangle
    :param (str|tuple|list) colormap: either a plotly scale name, an rgb
        or hex color, a color tuple or a list of colors. An rgb color is
        of the form 'rgb(x, y, z)' where x, y, z belong to the interval
        [0, 255] and a color tuple is a tuple of the form (a, b, c) where
        a, b and c belong to [0, 1]. If colormap is a list, it must
        contain the valid color types aforementioned as its members
    :param (bool) show_colorbar: determines if colorbar is visible
    :param (list|array) scale: sets the scale values to be used if a non-
        linearly interpolated colormap is desired. If left as None, a
        linear interpolation between the colors will be excecuted
    :param (function|list) color_func: The parameter that determines the
        coloring of the surface. Takes either a function with 3 arguments
        x, y, z or a list/array of color values the same length as
        simplices. If None, coloring will only depend on the z axis
    :param (str) title: title of the plot
    :param (bool) plot_edges: determines if the triangles on the trisurf
        are visible
    :param (bool) showbackground: makes background in plot visible
    :param (str) backgroundcolor: color of background. Takes a string of
        the form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive
    :param (str) gridcolor: color of the gridlines besides the axes. Takes
        a string of the form 'rgb(x,y,z)' x,y,z are between 0 and 255
        inclusive
    :param (str) zerolinecolor: color of the axes. Takes a string of the
        form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive
    :param (str) edges_color: color of the edges, if plot_edges is True
    :param (int|float) height: the height of the plot (in pixels)
    :param (int|float) width: the width of the plot (in pixels)
    :param (dict) aspectratio: a dictionary of the aspect ratio values for
        the x, y and z axes. 'x', 'y' and 'z' take (int|float) values
    Example 1: Sphere
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u = np.linspace(0, 2*np.pi, 20)
    >>> v = np.linspace(0, np.pi, 20)
    >>> u,v = np.meshgrid(u,v)
    >>> u = u.flatten()
    >>> v = v.flatten()
    >>> x = np.sin(v)*np.cos(u)
    >>> y = np.sin(v)*np.sin(u)
    >>> z = np.cos(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap="Rainbow",
    ...                       simplices=simplices)
    Example 2: Torus
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u = np.linspace(0, 2*np.pi, 20)
    >>> v = np.linspace(0, 2*np.pi, 20)
    >>> u,v = np.meshgrid(u,v)
    >>> u = u.flatten()
    >>> v = v.flatten()
    >>> x = (3 + (np.cos(v)))*np.cos(u)
    >>> y = (3 + (np.cos(v)))*np.sin(u)
    >>> z = np.sin(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap="Viridis",
    ...                       simplices=simplices)
    Example 3: Mobius Band
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u = np.linspace(0, 2*np.pi, 24)
    >>> v = np.linspace(-1, 1, 8)
    >>> u,v = np.meshgrid(u,v)
    >>> u = u.flatten()
    >>> v = v.flatten()
    >>> tp = 1 + 0.5*v*np.cos(u/2.)
    >>> x = tp*np.cos(u)
    >>> y = tp*np.sin(u)
    >>> z = 0.5*v*np.sin(u/2.)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap=[(0.2, 0.4, 0.6), (1, 1, 1)],
    ...                       simplices=simplices)
    Example 4: Using a Custom Colormap Function with Light Cone
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u=np.linspace(-np.pi, np.pi, 30)
    >>> v=np.linspace(-np.pi, np.pi, 30)
    >>> u,v=np.meshgrid(u,v)
    >>> u=u.flatten()
    >>> v=v.flatten()
    >>> x = u
    >>> y = u*np.cos(v)
    >>> z = u*np.sin(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Define distance function
    >>> def dist_origin(x, y, z):
    ...     return np.sqrt((1.0 * x)**2 + (1.0 * y)**2 + (1.0 * z)**2)
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z,
    ...                       colormap=['#FFFFFF', '#E4FFFE',
    ...                                 '#A4F6F9', '#FF99FE',
    ...                                 '#BA52ED'],
    ...                       scale=[0, 0.6, 0.71, 0.89, 1],
    ...                       simplices=simplices,
    ...                       color_func=dist_origin)
    Example 5: Enter color_func as a list of colors
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> import random
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u=np.linspace(-np.pi, np.pi, 30)
    >>> v=np.linspace(-np.pi, np.pi, 30)
    >>> u,v=np.meshgrid(u,v)
    >>> u=u.flatten()
    >>> v=v.flatten()
    >>> x = u
    >>> y = u*np.cos(v)
    >>> z = u*np.sin(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> colors = []
    >>> color_choices = ['rgb(0, 0, 0)', '#6c4774', '#d6c7dd']
    >>> for index in range(len(simplices)):
    ...     colors.append(random.choice(color_choices))
    >>> fig = create_trisurf(
    ...     x, y, z, simplices,
    ...     color_func=colors,
    ...     show_colorbar=True,
    ...     edges_color='rgb(2, 85, 180)',
    ...     title=' Modern Art'
    ... )
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__trisurf.create_trisurf.txt 
 | 
	def create_trisurf(
    x,
    y,
    z,
    simplices,
    colormap=None,
    show_colorbar=True,
    scale=None,
    color_func=None,
    title="Trisurf Plot",
    plot_edges=True,
    showbackground=True,
    backgroundcolor="rgb(230, 230, 230)",
    gridcolor="rgb(255, 255, 255)",
    zerolinecolor="rgb(255, 255, 255)",
    edges_color="rgb(50, 50, 50)",
    height=800,
    width=800,
    aspectratio=None,
):
    """
    Returns figure for a triangulated surface plot
    :param (array) x: data values of x in a 1D array
    :param (array) y: data values of y in a 1D array
    :param (array) z: data values of z in a 1D array
    :param (array) simplices: an array of shape (ntri, 3) where ntri is
        the number of triangles in the triangularization. Each row of the
        array contains the indicies of the verticies of each triangle
    :param (str|tuple|list) colormap: either a plotly scale name, an rgb
        or hex color, a color tuple or a list of colors. An rgb color is
        of the form 'rgb(x, y, z)' where x, y, z belong to the interval
        [0, 255] and a color tuple is a tuple of the form (a, b, c) where
        a, b and c belong to [0, 1]. If colormap is a list, it must
        contain the valid color types aforementioned as its members
    :param (bool) show_colorbar: determines if colorbar is visible
    :param (list|array) scale: sets the scale values to be used if a non-
        linearly interpolated colormap is desired. If left as None, a
        linear interpolation between the colors will be excecuted
    :param (function|list) color_func: The parameter that determines the
        coloring of the surface. Takes either a function with 3 arguments
        x, y, z or a list/array of color values the same length as
        simplices. If None, coloring will only depend on the z axis
    :param (str) title: title of the plot
    :param (bool) plot_edges: determines if the triangles on the trisurf
        are visible
    :param (bool) showbackground: makes background in plot visible
    :param (str) backgroundcolor: color of background. Takes a string of
        the form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive
    :param (str) gridcolor: color of the gridlines besides the axes. Takes
        a string of the form 'rgb(x,y,z)' x,y,z are between 0 and 255
        inclusive
    :param (str) zerolinecolor: color of the axes. Takes a string of the
        form 'rgb(x,y,z)' x,y,z are between 0 and 255 inclusive
    :param (str) edges_color: color of the edges, if plot_edges is True
    :param (int|float) height: the height of the plot (in pixels)
    :param (int|float) width: the width of the plot (in pixels)
    :param (dict) aspectratio: a dictionary of the aspect ratio values for
        the x, y and z axes. 'x', 'y' and 'z' take (int|float) values
    Example 1: Sphere
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u = np.linspace(0, 2*np.pi, 20)
    >>> v = np.linspace(0, np.pi, 20)
    >>> u,v = np.meshgrid(u,v)
    >>> u = u.flatten()
    >>> v = v.flatten()
    >>> x = np.sin(v)*np.cos(u)
    >>> y = np.sin(v)*np.sin(u)
    >>> z = np.cos(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap="Rainbow",
    ...                       simplices=simplices)
    Example 2: Torus
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u = np.linspace(0, 2*np.pi, 20)
    >>> v = np.linspace(0, 2*np.pi, 20)
    >>> u,v = np.meshgrid(u,v)
    >>> u = u.flatten()
    >>> v = v.flatten()
    >>> x = (3 + (np.cos(v)))*np.cos(u)
    >>> y = (3 + (np.cos(v)))*np.sin(u)
    >>> z = np.sin(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap="Viridis",
    ...                       simplices=simplices)
    Example 3: Mobius Band
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u = np.linspace(0, 2*np.pi, 24)
    >>> v = np.linspace(-1, 1, 8)
    >>> u,v = np.meshgrid(u,v)
    >>> u = u.flatten()
    >>> v = v.flatten()
    >>> tp = 1 + 0.5*v*np.cos(u/2.)
    >>> x = tp*np.cos(u)
    >>> y = tp*np.sin(u)
    >>> z = 0.5*v*np.sin(u/2.)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z, colormap=[(0.2, 0.4, 0.6), (1, 1, 1)],
    ...                       simplices=simplices)
    Example 4: Using a Custom Colormap Function with Light Cone
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u=np.linspace(-np.pi, np.pi, 30)
    >>> v=np.linspace(-np.pi, np.pi, 30)
    >>> u,v=np.meshgrid(u,v)
    >>> u=u.flatten()
    >>> v=v.flatten()
    >>> x = u
    >>> y = u*np.cos(v)
    >>> z = u*np.sin(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> # Define distance function
    >>> def dist_origin(x, y, z):
    ...     return np.sqrt((1.0 * x)**2 + (1.0 * y)**2 + (1.0 * z)**2)
    >>> # Create a figure
    >>> fig1 = create_trisurf(x=x, y=y, z=z,
    ...                       colormap=['#FFFFFF', '#E4FFFE',
    ...                                 '#A4F6F9', '#FF99FE',
    ...                                 '#BA52ED'],
    ...                       scale=[0, 0.6, 0.71, 0.89, 1],
    ...                       simplices=simplices,
    ...                       color_func=dist_origin)
    Example 5: Enter color_func as a list of colors
    >>> # Necessary Imports for Trisurf
    >>> import numpy as np
    >>> from scipy.spatial import Delaunay
    >>> import random
    >>> from plotly.figure_factory import create_trisurf
    >>> from plotly.graph_objs import graph_objs
    >>> # Make data for plot
    >>> u=np.linspace(-np.pi, np.pi, 30)
    >>> v=np.linspace(-np.pi, np.pi, 30)
    >>> u,v=np.meshgrid(u,v)
    >>> u=u.flatten()
    >>> v=v.flatten()
    >>> x = u
    >>> y = u*np.cos(v)
    >>> z = u*np.sin(v)
    >>> points2D = np.vstack([u,v]).T
    >>> tri = Delaunay(points2D)
    >>> simplices = tri.simplices
    >>> colors = []
    >>> color_choices = ['rgb(0, 0, 0)', '#6c4774', '#d6c7dd']
    >>> for index in range(len(simplices)):
    ...     colors.append(random.choice(color_choices))
    >>> fig = create_trisurf(
    ...     x, y, z, simplices,
    ...     color_func=colors,
    ...     show_colorbar=True,
    ...     edges_color='rgb(2, 85, 180)',
    ...     title=' Modern Art'
    ... )
    """
    if aspectratio is None:
        aspectratio = {"x": 1, "y": 1, "z": 1}
    # Validate colormap
    clrs.validate_colors(colormap)
    colormap, scale = clrs.convert_colors_to_same_type(
        colormap, colortype="tuple", return_default_colors=True, scale=scale
    )
    data1 = trisurf(
        x,
        y,
        z,
        simplices,
        show_colorbar=show_colorbar,
        color_func=color_func,
        colormap=colormap,
        scale=scale,
        edges_color=edges_color,
        plot_edges=plot_edges,
    )
    axis = dict(
        showbackground=showbackground,
        backgroundcolor=backgroundcolor,
        gridcolor=gridcolor,
        zerolinecolor=zerolinecolor,
    )
    layout = graph_objs.Layout(
        title=title,
        width=width,
        height=height,
        scene=graph_objs.layout.Scene(
            xaxis=graph_objs.layout.scene.XAxis(**axis),
            yaxis=graph_objs.layout.scene.YAxis(**axis),
            zaxis=graph_objs.layout.scene.ZAxis(**axis),
            aspectratio=dict(
                x=aspectratio["x"], y=aspectratio["y"], z=aspectratio["z"]
            ),
        ),
    )
    return graph_objs.Figure(data=data1, layout=layout)
 
 | 
	_trisurf.create_trisurf 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	62 
 | 
	packages/python/plotly/plotly/graph_objs/layout/_updatemenu.py 
 | 
	    def __init__(
        self,
        arg=None,
        active=None,
        bgcolor=None,
        bordercolor=None,
        borderwidth=None,
        buttons=None,
        buttondefaults=None,
        direction=None,
        font=None,
        name=None,
        pad=None,
        showactive=None,
        templateitemname=None,
        type=None,
        visible=None,
        x=None,
        xanchor=None,
        y=None,
        yanchor=None,
        **kwargs,
    ):
        """
        Construct a new Updatemenu object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Updatemenu`
        active
            Determines which button (by index starting from 0) is
            considered active.
        bgcolor
            Sets the background color of the update menu buttons.
        bordercolor
            Sets the color of the border enclosing the update menu.
        borderwidth
            Sets the width (in px) of the border enclosing the
            update menu.
        buttons
            A tuple of
            :class:`plotly.graph_objects.layout.updatemenu.Button`
            instances or dicts with compatible properties
        buttondefaults
            When used in a template (as
            layout.template.layout.updatemenu.buttondefaults), sets
            the default property values to use for elements of
            layout.updatemenu.buttons
        direction
            Determines the direction in which the buttons are laid
            out, whether in a dropdown menu or a row/column of
            buttons. For `left` and `up`, the buttons will still
            appear in left-to-right or top-to-bottom order
            respectively.
        font
            Sets the font of the update menu button text.
        name
            When used in a template, named items are created in the
            output figure in addition to any items the figure
            already has in this array. You can modify these items
            in the output figure by making your own item with
            `templateitemname` matching this `name` alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). Has no effect outside of a
            template.
        pad
            Sets the padding around the buttons or dropdown menu.
        showactive
            Highlights active dropdown item or active button if
            true.
        templateitemname
            Used to refer to a named item in this array in the
            template. Named items from the template will be created
            even without a matching item in the input figure, but
            you can modify one by making an item with
            `templateitemname` matching its `name`, alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). If there is no template or no
            matching item, this item will be hidden unless you
            explicitly show it with `visible: true`.
        type
            Determines whether the buttons are accessible via a
            dropdown menu or whether the buttons are stacked
            horizontally or vertically
        visible
            Determines whether or not the update menu is visible.
        x
            Sets the x position (in normalized coordinates) of the
            update menu.
        xanchor
            Sets the update menu's horizontal position anchor. This
            anchor binds the `x` position to the "left", "center"
            or "right" of the range selector.
        y
            Sets the y position (in normalized coordinates) of the
            update menu.
        yanchor
            Sets the update menu's vertical position anchor This
            anchor binds the `y` position to the "top", "middle" or
            "bottom" of the range selector.
        Returns
        -------
        Updatemenu
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__updatemenu.Updatemenu.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        active=None,
        bgcolor=None,
        bordercolor=None,
        borderwidth=None,
        buttons=None,
        buttondefaults=None,
        direction=None,
        font=None,
        name=None,
        pad=None,
        showactive=None,
        templateitemname=None,
        type=None,
        visible=None,
        x=None,
        xanchor=None,
        y=None,
        yanchor=None,
        **kwargs,
    ):
        """
        Construct a new Updatemenu object
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.layout.Updatemenu`
        active
            Determines which button (by index starting from 0) is
            considered active.
        bgcolor
            Sets the background color of the update menu buttons.
        bordercolor
            Sets the color of the border enclosing the update menu.
        borderwidth
            Sets the width (in px) of the border enclosing the
            update menu.
        buttons
            A tuple of
            :class:`plotly.graph_objects.layout.updatemenu.Button`
            instances or dicts with compatible properties
        buttondefaults
            When used in a template (as
            layout.template.layout.updatemenu.buttondefaults), sets
            the default property values to use for elements of
            layout.updatemenu.buttons
        direction
            Determines the direction in which the buttons are laid
            out, whether in a dropdown menu or a row/column of
            buttons. For `left` and `up`, the buttons will still
            appear in left-to-right or top-to-bottom order
            respectively.
        font
            Sets the font of the update menu button text.
        name
            When used in a template, named items are created in the
            output figure in addition to any items the figure
            already has in this array. You can modify these items
            in the output figure by making your own item with
            `templateitemname` matching this `name` alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). Has no effect outside of a
            template.
        pad
            Sets the padding around the buttons or dropdown menu.
        showactive
            Highlights active dropdown item or active button if
            true.
        templateitemname
            Used to refer to a named item in this array in the
            template. Named items from the template will be created
            even without a matching item in the input figure, but
            you can modify one by making an item with
            `templateitemname` matching its `name`, alongside your
            modifications (including `visible: false` or `enabled:
            false` to hide it). If there is no template or no
            matching item, this item will be hidden unless you
            explicitly show it with `visible: true`.
        type
            Determines whether the buttons are accessible via a
            dropdown menu or whether the buttons are stacked
            horizontally or vertically
        visible
            Determines whether or not the update menu is visible.
        x
            Sets the x position (in normalized coordinates) of the
            update menu.
        xanchor
            Sets the update menu's horizontal position anchor. This
            anchor binds the `x` position to the "left", "center"
            or "right" of the range selector.
        y
            Sets the y position (in normalized coordinates) of the
            update menu.
        yanchor
            Sets the update menu's vertical position anchor This
            anchor binds the `y` position to the "top", "middle" or
            "bottom" of the range selector.
        Returns
        -------
        Updatemenu
        """
        super(Updatemenu, self).__init__("updatemenus")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.layout.Updatemenu
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Updatemenu`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("active", None)
        _v = active if active is not None else _v
        if _v is not None:
            self["active"] = _v
        _v = arg.pop("bgcolor", None)
        _v = bgcolor if bgcolor is not None else _v
        if _v is not None:
            self["bgcolor"] = _v
        _v = arg.pop("bordercolor", None)
        _v = bordercolor if bordercolor is not None else _v
        if _v is not None:
            self["bordercolor"] = _v
        _v = arg.pop("borderwidth", None)
        _v = borderwidth if borderwidth is not None else _v
        if _v is not None:
            self["borderwidth"] = _v
        _v = arg.pop("buttons", None)
        _v = buttons if buttons is not None else _v
        if _v is not None:
            self["buttons"] = _v
        _v = arg.pop("buttondefaults", None)
        _v = buttondefaults if buttondefaults is not None else _v
        if _v is not None:
            self["buttondefaults"] = _v
        _v = arg.pop("direction", None)
        _v = direction if direction is not None else _v
        if _v is not None:
            self["direction"] = _v
        _v = arg.pop("font", None)
        _v = font if font is not None else _v
        if _v is not None:
            self["font"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("pad", None)
        _v = pad if pad is not None else _v
        if _v is not None:
            self["pad"] = _v
        _v = arg.pop("showactive", None)
        _v = showactive if showactive is not None else _v
        if _v is not None:
            self["showactive"] = _v
        _v = arg.pop("templateitemname", None)
        _v = templateitemname if templateitemname is not None else _v
        if _v is not None:
            self["templateitemname"] = _v
        _v = arg.pop("type", None)
        _v = type if type is not None else _v
        if _v is not None:
            self["type"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        _v = arg.pop("x", None)
        _v = x if x is not None else _v
        if _v is not None:
            self["x"] = _v
        _v = arg.pop("xanchor", None)
        _v = xanchor if xanchor is not None else _v
        if _v is not None:
            self["xanchor"] = _v
        _v = arg.pop("y", None)
        _v = y if y is not None else _v
        if _v is not None:
            self["y"] = _v
        _v = arg.pop("yanchor", None)
        _v = yanchor if yanchor is not None else _v
        if _v is not None:
            self["yanchor"] = _v
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_updatemenu.Updatemenu.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	64 
 | 
	packages/python/plotly/plotly/graph_objs/_violin.py 
 | 
	    def __init__(
        self,
        arg=None,
        alignmentgroup=None,
        bandwidth=None,
        box=None,
        customdata=None,
        customdatasrc=None,
        fillcolor=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hoveron=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        jitter=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        line=None,
        marker=None,
        meanline=None,
        meta=None,
        metasrc=None,
        name=None,
        offsetgroup=None,
        opacity=None,
        orientation=None,
        pointpos=None,
        points=None,
        quartilemethod=None,
        scalegroup=None,
        scalemode=None,
        selected=None,
        selectedpoints=None,
        showlegend=None,
        side=None,
        span=None,
        spanmode=None,
        stream=None,
        text=None,
        textsrc=None,
        uid=None,
        uirevision=None,
        unselected=None,
        visible=None,
        width=None,
        x=None,
        x0=None,
        xaxis=None,
        xhoverformat=None,
        xsrc=None,
        y=None,
        y0=None,
        yaxis=None,
        yhoverformat=None,
        ysrc=None,
        zorder=None,
        **kwargs,
    ):
        """
        Construct a new Violin object
        In vertical (horizontal) violin plots, statistics are computed
        using `y` (`x`) values. By supplying an `x` (`y`) array, one
        violin per distinct x (y) value is drawn If no `x` (`y`) list
        is provided, a single violin is drawn. That violin position is
        then positioned with with `name` or with `x0` (`y0`) if
        provided.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Violin`
        alignmentgroup
            Set several traces linked to the same position axis or
            matching axes to the same alignmentgroup. This controls
            whether bars compute their positional range dependently
            or independently.
        bandwidth
            Sets the bandwidth used to compute the kernel density
            estimate. By default, the bandwidth is determined by
            Silverman's rule of thumb.
        box
            :class:`plotly.graph_objects.violin.Box` instance or
            dict with compatible properties
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        fillcolor
            Sets the fill color. Defaults to a half-transparent
            variant of the line color, marker color, or marker line
            color, whichever is available.
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.violin.Hoverlabel`
            instance or dict with compatible properties
        hoveron
            Do the hover effects highlight individual violins or
            sample points or the kernel density estimate or any
            combination of them?
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available.  Anything contained in tag `<extra>` is
            displayed in the secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Same as `text`.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        jitter
            Sets the amount of jitter in the sample points drawn.
            If 0, the sample points align along the distribution
            axis. If 1, the sample points are drawn in a random
            jitter of width equal to the width of the violins.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for this trace. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.violin.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        line
            :class:`plotly.graph_objects.violin.Line` instance or
            dict with compatible properties
        marker
            :class:`plotly.graph_objects.violin.Marker` instance or
            dict with compatible properties
        meanline
            :class:`plotly.graph_objects.violin.Meanline` instance
            or dict with compatible properties
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover. For violin traces, the name
            will also be used for the position coordinate, if `x`
            and `x0` (`y` and `y0` if horizontal) are missing and
            the position axis is categorical. Note that the trace
            name is also used as a default value for attribute
            `scalegroup` (please see its description for details).
        offsetgroup
            Set several traces linked to the same position axis or
            matching axes to the same offsetgroup where bars of the
            same position coordinate will line up.
        opacity
            Sets the opacity of the trace.
        orientation
            Sets the orientation of the violin(s). If "v" ("h"),
            the distribution is visualized along the vertical
            (horizontal).
        pointpos
            Sets the position of the sample points in relation to
            the violins. If 0, the sample points are places over
            the center of the violins. Positive (negative) values
            correspond to positions to the right (left) for
            vertical violins and above (below) for horizontal
            violins.
        points
            If "outliers", only the sample points lying outside the
            whiskers are shown If "suspectedoutliers", the outlier
            points are shown and points either less than 4*Q1-3*Q3
            or greater than 4*Q3-3*Q1 are highlighted (see
            `outliercolor`) If "all", all sample points are shown
            If False, only the violins are shown with no sample
            points. Defaults to "suspectedoutliers" when
            `marker.outliercolor` or `marker.line.outliercolor` is
            set, otherwise defaults to "outliers".
        quartilemethod
            Sets the method used to compute the sample's Q1 and Q3
            quartiles. The "linear" method uses the 25th percentile
            for Q1 and 75th percentile for Q3 as computed using
            method #10 (listed on
            http://jse.amstat.org/v14n3/langford.html). The
            "exclusive" method uses the median to divide the
            ordered dataset into two halves if the sample is odd,
            it does not include the median in either half - Q1 is
            then the median of the lower half and Q3 the median of
            the upper half. The "inclusive" method also uses the
            median to divide the ordered dataset into two halves
            but if the sample is odd, it includes the median in
            both halves - Q1 is then the median of the lower half
            and Q3 the median of the upper half.
        scalegroup
            If there are multiple violins that should be sized
            according to to some metric (see `scalemode`), link
            them by providing a non-empty group id here shared by
            every trace in the same group. If a violin's `width` is
            undefined, `scalegroup` will default to the trace's
            name. In this case, violins with the same names will be
            linked together
        scalemode
            Sets the metric by which the width of each violin is
            determined. "width" means each violin has the same
            (max) width "count" means the violins are scaled by the
            number of sample points making up each violin.
        selected
            :class:`plotly.graph_objects.violin.Selected` instance
            or dict with compatible properties
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        showlegend
            Determines whether or not an item corresponding to this
            trace is shown in the legend.
        side
            Determines on which side of the position value the
            density function making up one half of a violin is
            plotted. Useful when comparing two violin traces under
            "overlay" mode, where one trace has `side` set to
            "positive" and the other to "negative".
        span
            Sets the span in data space for which the density
            function will be computed. Has an effect only when
            `spanmode` is set to "manual".
        spanmode
            Sets the method by which the span in data space where
            the density function will be computed. "soft" means the
            span goes from the sample's minimum value minus two
            bandwidths to the sample's maximum value plus two
            bandwidths. "hard" means the span goes from the
            sample's minimum to its maximum value. For custom span
            settings, use mode "manual" and fill in the `span`
            attribute.
        stream
            :class:`plotly.graph_objects.violin.Stream` instance or
            dict with compatible properties
        text
            Sets the text elements associated with each sample
            value. If a single string, the same string appears over
            all the data points. If an array of string, the items
            are mapped in order to the this trace's (x,y)
            coordinates. To be seen, trace `hoverinfo` must contain
            a "text" flag.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        unselected
            :class:`plotly.graph_objects.violin.Unselected`
            instance or dict with compatible properties
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        width
            Sets the width of the violin in data coordinates. If 0
            (default value) the width is automatically selected
            based on the positions of other violin traces in the
            same subplot.
        x
            Sets the x sample data or coordinates. See overview for
            more info.
        x0
            Sets the x coordinate for single-box traces or the
            starting coordinate for multi-box traces set using
            q1/median/q3. See overview for more info.
        xaxis
            Sets a reference between this trace's x coordinates and
            a 2D cartesian x axis. If "x" (the default value), the
            x coordinates refer to `layout.xaxis`. If "x2", the x
            coordinates refer to `layout.xaxis2`, and so on.
        xhoverformat
            Sets the hover text formatting rulefor `x`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `xaxis.hoverformat`.
        xsrc
            Sets the source reference on Chart Studio Cloud for
            `x`.
        y
            Sets the y sample data or coordinates. See overview for
            more info.
        y0
            Sets the y coordinate for single-box traces or the
            starting coordinate for multi-box traces set using
            q1/median/q3. See overview for more info.
        yaxis
            Sets a reference between this trace's y coordinates and
            a 2D cartesian y axis. If "y" (the default value), the
            y coordinates refer to `layout.yaxis`. If "y2", the y
            coordinates refer to `layout.yaxis2`, and so on.
        yhoverformat
            Sets the hover text formatting rulefor `y`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `yaxis.hoverformat`.
        ysrc
            Sets the source reference on Chart Studio Cloud for
            `y`.
        zorder
            Sets the layer on which this trace is displayed,
            relative to other SVG traces on the same subplot. SVG
            traces with higher `zorder` appear in front of those
            with lower `zorder`.
        Returns
        -------
        Violin
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__violin.Violin.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        alignmentgroup=None,
        bandwidth=None,
        box=None,
        customdata=None,
        customdatasrc=None,
        fillcolor=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hoveron=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        jitter=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        line=None,
        marker=None,
        meanline=None,
        meta=None,
        metasrc=None,
        name=None,
        offsetgroup=None,
        opacity=None,
        orientation=None,
        pointpos=None,
        points=None,
        quartilemethod=None,
        scalegroup=None,
        scalemode=None,
        selected=None,
        selectedpoints=None,
        showlegend=None,
        side=None,
        span=None,
        spanmode=None,
        stream=None,
        text=None,
        textsrc=None,
        uid=None,
        uirevision=None,
        unselected=None,
        visible=None,
        width=None,
        x=None,
        x0=None,
        xaxis=None,
        xhoverformat=None,
        xsrc=None,
        y=None,
        y0=None,
        yaxis=None,
        yhoverformat=None,
        ysrc=None,
        zorder=None,
        **kwargs,
    ):
        """
        Construct a new Violin object
        In vertical (horizontal) violin plots, statistics are computed
        using `y` (`x`) values. By supplying an `x` (`y`) array, one
        violin per distinct x (y) value is drawn If no `x` (`y`) list
        is provided, a single violin is drawn. That violin position is
        then positioned with with `name` or with `x0` (`y0`) if
        provided.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Violin`
        alignmentgroup
            Set several traces linked to the same position axis or
            matching axes to the same alignmentgroup. This controls
            whether bars compute their positional range dependently
            or independently.
        bandwidth
            Sets the bandwidth used to compute the kernel density
            estimate. By default, the bandwidth is determined by
            Silverman's rule of thumb.
        box
            :class:`plotly.graph_objects.violin.Box` instance or
            dict with compatible properties
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        fillcolor
            Sets the fill color. Defaults to a half-transparent
            variant of the line color, marker color, or marker line
            color, whichever is available.
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.violin.Hoverlabel`
            instance or dict with compatible properties
        hoveron
            Do the hover effects highlight individual violins or
            sample points or the kernel density estimate or any
            combination of them?
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available.  Anything contained in tag `<extra>` is
            displayed in the secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Same as `text`.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        jitter
            Sets the amount of jitter in the sample points drawn.
            If 0, the sample points align along the distribution
            axis. If 1, the sample points are drawn in a random
            jitter of width equal to the width of the violins.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for this trace. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.violin.Legendgrouptitle`
            instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        line
            :class:`plotly.graph_objects.violin.Line` instance or
            dict with compatible properties
        marker
            :class:`plotly.graph_objects.violin.Marker` instance or
            dict with compatible properties
        meanline
            :class:`plotly.graph_objects.violin.Meanline` instance
            or dict with compatible properties
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover. For violin traces, the name
            will also be used for the position coordinate, if `x`
            and `x0` (`y` and `y0` if horizontal) are missing and
            the position axis is categorical. Note that the trace
            name is also used as a default value for attribute
            `scalegroup` (please see its description for details).
        offsetgroup
            Set several traces linked to the same position axis or
            matching axes to the same offsetgroup where bars of the
            same position coordinate will line up.
        opacity
            Sets the opacity of the trace.
        orientation
            Sets the orientation of the violin(s). If "v" ("h"),
            the distribution is visualized along the vertical
            (horizontal).
        pointpos
            Sets the position of the sample points in relation to
            the violins. If 0, the sample points are places over
            the center of the violins. Positive (negative) values
            correspond to positions to the right (left) for
            vertical violins and above (below) for horizontal
            violins.
        points
            If "outliers", only the sample points lying outside the
            whiskers are shown If "suspectedoutliers", the outlier
            points are shown and points either less than 4*Q1-3*Q3
            or greater than 4*Q3-3*Q1 are highlighted (see
            `outliercolor`) If "all", all sample points are shown
            If False, only the violins are shown with no sample
            points. Defaults to "suspectedoutliers" when
            `marker.outliercolor` or `marker.line.outliercolor` is
            set, otherwise defaults to "outliers".
        quartilemethod
            Sets the method used to compute the sample's Q1 and Q3
            quartiles. The "linear" method uses the 25th percentile
            for Q1 and 75th percentile for Q3 as computed using
            method #10 (listed on
            http://jse.amstat.org/v14n3/langford.html). The
            "exclusive" method uses the median to divide the
            ordered dataset into two halves if the sample is odd,
            it does not include the median in either half - Q1 is
            then the median of the lower half and Q3 the median of
            the upper half. The "inclusive" method also uses the
            median to divide the ordered dataset into two halves
            but if the sample is odd, it includes the median in
            both halves - Q1 is then the median of the lower half
            and Q3 the median of the upper half.
        scalegroup
            If there are multiple violins that should be sized
            according to to some metric (see `scalemode`), link
            them by providing a non-empty group id here shared by
            every trace in the same group. If a violin's `width` is
            undefined, `scalegroup` will default to the trace's
            name. In this case, violins with the same names will be
            linked together
        scalemode
            Sets the metric by which the width of each violin is
            determined. "width" means each violin has the same
            (max) width "count" means the violins are scaled by the
            number of sample points making up each violin.
        selected
            :class:`plotly.graph_objects.violin.Selected` instance
            or dict with compatible properties
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        showlegend
            Determines whether or not an item corresponding to this
            trace is shown in the legend.
        side
            Determines on which side of the position value the
            density function making up one half of a violin is
            plotted. Useful when comparing two violin traces under
            "overlay" mode, where one trace has `side` set to
            "positive" and the other to "negative".
        span
            Sets the span in data space for which the density
            function will be computed. Has an effect only when
            `spanmode` is set to "manual".
        spanmode
            Sets the method by which the span in data space where
            the density function will be computed. "soft" means the
            span goes from the sample's minimum value minus two
            bandwidths to the sample's maximum value plus two
            bandwidths. "hard" means the span goes from the
            sample's minimum to its maximum value. For custom span
            settings, use mode "manual" and fill in the `span`
            attribute.
        stream
            :class:`plotly.graph_objects.violin.Stream` instance or
            dict with compatible properties
        text
            Sets the text elements associated with each sample
            value. If a single string, the same string appears over
            all the data points. If an array of string, the items
            are mapped in order to the this trace's (x,y)
            coordinates. To be seen, trace `hoverinfo` must contain
            a "text" flag.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        unselected
            :class:`plotly.graph_objects.violin.Unselected`
            instance or dict with compatible properties
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        width
            Sets the width of the violin in data coordinates. If 0
            (default value) the width is automatically selected
            based on the positions of other violin traces in the
            same subplot.
        x
            Sets the x sample data or coordinates. See overview for
            more info.
        x0
            Sets the x coordinate for single-box traces or the
            starting coordinate for multi-box traces set using
            q1/median/q3. See overview for more info.
        xaxis
            Sets a reference between this trace's x coordinates and
            a 2D cartesian x axis. If "x" (the default value), the
            x coordinates refer to `layout.xaxis`. If "x2", the x
            coordinates refer to `layout.xaxis2`, and so on.
        xhoverformat
            Sets the hover text formatting rulefor `x`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `xaxis.hoverformat`.
        xsrc
            Sets the source reference on Chart Studio Cloud for
            `x`.
        y
            Sets the y sample data or coordinates. See overview for
            more info.
        y0
            Sets the y coordinate for single-box traces or the
            starting coordinate for multi-box traces set using
            q1/median/q3. See overview for more info.
        yaxis
            Sets a reference between this trace's y coordinates and
            a 2D cartesian y axis. If "y" (the default value), the
            y coordinates refer to `layout.yaxis`. If "y2", the y
            coordinates refer to `layout.yaxis2`, and so on.
        yhoverformat
            Sets the hover text formatting rulefor `y`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `yaxis.hoverformat`.
        ysrc
            Sets the source reference on Chart Studio Cloud for
            `y`.
        zorder
            Sets the layer on which this trace is displayed,
            relative to other SVG traces on the same subplot. SVG
            traces with higher `zorder` appear in front of those
            with lower `zorder`.
        Returns
        -------
        Violin
        """
        super(Violin, self).__init__("violin")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Violin
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Violin`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("alignmentgroup", None)
        _v = alignmentgroup if alignmentgroup is not None else _v
        if _v is not None:
            self["alignmentgroup"] = _v
        _v = arg.pop("bandwidth", None)
        _v = bandwidth if bandwidth is not None else _v
        if _v is not None:
            self["bandwidth"] = _v
        _v = arg.pop("box", None)
        _v = box if box is not None else _v
        if _v is not None:
            self["box"] = _v
        _v = arg.pop("customdata", None)
        _v = customdata if customdata is not None else _v
        if _v is not None:
            self["customdata"] = _v
        _v = arg.pop("customdatasrc", None)
        _v = customdatasrc if customdatasrc is not None else _v
        if _v is not None:
            self["customdatasrc"] = _v
        _v = arg.pop("fillcolor", None)
        _v = fillcolor if fillcolor is not None else _v
        if _v is not None:
            self["fillcolor"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoverinfosrc", None)
        _v = hoverinfosrc if hoverinfosrc is not None else _v
        if _v is not None:
            self["hoverinfosrc"] = _v
        _v = arg.pop("hoverlabel", None)
        _v = hoverlabel if hoverlabel is not None else _v
        if _v is not None:
            self["hoverlabel"] = _v
        _v = arg.pop("hoveron", None)
        _v = hoveron if hoveron is not None else _v
        if _v is not None:
            self["hoveron"] = _v
        _v = arg.pop("hovertemplate", None)
        _v = hovertemplate if hovertemplate is not None else _v
        if _v is not None:
            self["hovertemplate"] = _v
        _v = arg.pop("hovertemplatesrc", None)
        _v = hovertemplatesrc if hovertemplatesrc is not None else _v
        if _v is not None:
            self["hovertemplatesrc"] = _v
        _v = arg.pop("hovertext", None)
        _v = hovertext if hovertext is not None else _v
        if _v is not None:
            self["hovertext"] = _v
        _v = arg.pop("hovertextsrc", None)
        _v = hovertextsrc if hovertextsrc is not None else _v
        if _v is not None:
            self["hovertextsrc"] = _v
        _v = arg.pop("ids", None)
        _v = ids if ids is not None else _v
        if _v is not None:
            self["ids"] = _v
        _v = arg.pop("idssrc", None)
        _v = idssrc if idssrc is not None else _v
        if _v is not None:
            self["idssrc"] = _v
        _v = arg.pop("jitter", None)
        _v = jitter if jitter is not None else _v
        if _v is not None:
            self["jitter"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgroup", None)
        _v = legendgroup if legendgroup is not None else _v
        if _v is not None:
            self["legendgroup"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("line", None)
        _v = line if line is not None else _v
        if _v is not None:
            self["line"] = _v
        _v = arg.pop("marker", None)
        _v = marker if marker is not None else _v
        if _v is not None:
            self["marker"] = _v
        _v = arg.pop("meanline", None)
        _v = meanline if meanline is not None else _v
        if _v is not None:
            self["meanline"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("offsetgroup", None)
        _v = offsetgroup if offsetgroup is not None else _v
        if _v is not None:
            self["offsetgroup"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("orientation", None)
        _v = orientation if orientation is not None else _v
        if _v is not None:
            self["orientation"] = _v
        _v = arg.pop("pointpos", None)
        _v = pointpos if pointpos is not None else _v
        if _v is not None:
            self["pointpos"] = _v
        _v = arg.pop("points", None)
        _v = points if points is not None else _v
        if _v is not None:
            self["points"] = _v
        _v = arg.pop("quartilemethod", None)
        _v = quartilemethod if quartilemethod is not None else _v
        if _v is not None:
            self["quartilemethod"] = _v
        _v = arg.pop("scalegroup", None)
        _v = scalegroup if scalegroup is not None else _v
        if _v is not None:
            self["scalegroup"] = _v
        _v = arg.pop("scalemode", None)
        _v = scalemode if scalemode is not None else _v
        if _v is not None:
            self["scalemode"] = _v
        _v = arg.pop("selected", None)
        _v = selected if selected is not None else _v
        if _v is not None:
            self["selected"] = _v
        _v = arg.pop("selectedpoints", None)
        _v = selectedpoints if selectedpoints is not None else _v
        if _v is not None:
            self["selectedpoints"] = _v
        _v = arg.pop("showlegend", None)
        _v = showlegend if showlegend is not None else _v
        if _v is not None:
            self["showlegend"] = _v
        _v = arg.pop("side", None)
        _v = side if side is not None else _v
        if _v is not None:
            self["side"] = _v
        _v = arg.pop("span", None)
        _v = span if span is not None else _v
        if _v is not None:
            self["span"] = _v
        _v = arg.pop("spanmode", None)
        _v = spanmode if spanmode is not None else _v
        if _v is not None:
            self["spanmode"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("text", None)
        _v = text if text is not None else _v
        if _v is not None:
            self["text"] = _v
        _v = arg.pop("textsrc", None)
        _v = textsrc if textsrc is not None else _v
        if _v is not None:
            self["textsrc"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("unselected", None)
        _v = unselected if unselected is not None else _v
        if _v is not None:
            self["unselected"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        _v = arg.pop("width", None)
        _v = width if width is not None else _v
        if _v is not None:
            self["width"] = _v
        _v = arg.pop("x", None)
        _v = x if x is not None else _v
        if _v is not None:
            self["x"] = _v
        _v = arg.pop("x0", None)
        _v = x0 if x0 is not None else _v
        if _v is not None:
            self["x0"] = _v
        _v = arg.pop("xaxis", None)
        _v = xaxis if xaxis is not None else _v
        if _v is not None:
            self["xaxis"] = _v
        _v = arg.pop("xhoverformat", None)
        _v = xhoverformat if xhoverformat is not None else _v
        if _v is not None:
            self["xhoverformat"] = _v
        _v = arg.pop("xsrc", None)
        _v = xsrc if xsrc is not None else _v
        if _v is not None:
            self["xsrc"] = _v
        _v = arg.pop("y", None)
        _v = y if y is not None else _v
        if _v is not None:
            self["y"] = _v
        _v = arg.pop("y0", None)
        _v = y0 if y0 is not None else _v
        if _v is not None:
            self["y0"] = _v
        _v = arg.pop("yaxis", None)
        _v = yaxis if yaxis is not None else _v
        if _v is not None:
            self["yaxis"] = _v
        _v = arg.pop("yhoverformat", None)
        _v = yhoverformat if yhoverformat is not None else _v
        if _v is not None:
            self["yhoverformat"] = _v
        _v = arg.pop("ysrc", None)
        _v = ysrc if ysrc is not None else _v
        if _v is not None:
            self["ysrc"] = _v
        _v = arg.pop("zorder", None)
        _v = zorder if zorder is not None else _v
        if _v is not None:
            self["zorder"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "violin"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_violin.Violin.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	65 
 | 
	packages/python/plotly/plotly/figure_factory/_violin.py 
 | 
	def create_violin(
    data,
    data_header=None,
    group_header=None,
    colors=None,
    use_colorscale=False,
    group_stats=None,
    rugplot=True,
    sort=False,
    height=450,
    width=600,
    title="Violin and Rug Plot",
):
    """
    **deprecated**, use instead the plotly.graph_objects trace
    :class:`plotly.graph_objects.Violin`.
    :param (list|array) data: accepts either a list of numerical values,
        a list of dictionaries all with identical keys and at least one
        column of numeric values, or a pandas dataframe with at least one
        column of numbers.
    :param (str) data_header: the header of the data column to be used
        from an inputted pandas dataframe. Not applicable if 'data' is
        a list of numeric values.
    :param (str) group_header: applicable if grouping data by a variable.
        'group_header' must be set to the name of the grouping variable.
    :param (str|tuple|list|dict) colors: either a plotly scale name,
        an rgb or hex color, a color tuple, a list of colors or a
        dictionary. An rgb color is of the form 'rgb(x, y, z)' where
        x, y and z belong to the interval [0, 255] and a color tuple is a
        tuple of the form (a, b, c) where a, b and c belong to [0, 1].
        If colors is a list, it must contain valid color types as its
        members.
    :param (bool) use_colorscale: only applicable if grouping by another
        variable. Will implement a colorscale based on the first 2 colors
        of param colors. This means colors must be a list with at least 2
        colors in it (Plotly colorscales are accepted since they map to a
        list of two rgb colors). Default = False
    :param (dict) group_stats: a dictionary where each key is a unique
        value from the group_header column in data. Each value must be a
        number and will be used to color the violin plots if a colorscale
        is being used.
    :param (bool) rugplot: determines if a rugplot is draw on violin plot.
        Default = True
    :param (bool) sort: determines if violins are sorted
        alphabetically (True) or by input order (False). Default = False
    :param (float) height: the height of the violin plot.
    :param (float) width: the width of the violin plot.
    :param (str) title: the title of the violin plot.
    Example 1: Single Violin Plot
    >>> from plotly.figure_factory import create_violin
    >>> import plotly.graph_objs as graph_objects
    >>> import numpy as np
    >>> from scipy import stats
    >>> # create list of random values
    >>> data_list = np.random.randn(100)
    >>> # create violin fig
    >>> fig = create_violin(data_list, colors='#604d9e')
    >>> # plot
    >>> fig.show()
    Example 2: Multiple Violin Plots with Qualitative Coloring
    >>> from plotly.figure_factory import create_violin
    >>> import plotly.graph_objs as graph_objects
    >>> import numpy as np
    >>> import pandas as pd
    >>> from scipy import stats
    >>> # create dataframe
    >>> np.random.seed(619517)
    >>> Nr=250
    >>> y = np.random.randn(Nr)
    >>> gr = np.random.choice(list("ABCDE"), Nr)
    >>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
    >>> for i, letter in enumerate("ABCDE"):
    ...     y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
    >>> df = pd.DataFrame(dict(Score=y, Group=gr))
    >>> # create violin fig
    >>> fig = create_violin(df, data_header='Score', group_header='Group',
    ...                    sort=True, height=600, width=1000)
    >>> # plot
    >>> fig.show()
    Example 3: Violin Plots with Colorscale
    >>> from plotly.figure_factory import create_violin
    >>> import plotly.graph_objs as graph_objects
    >>> import numpy as np
    >>> import pandas as pd
    >>> from scipy import stats
    >>> # create dataframe
    >>> np.random.seed(619517)
    >>> Nr=250
    >>> y = np.random.randn(Nr)
    >>> gr = np.random.choice(list("ABCDE"), Nr)
    >>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
    >>> for i, letter in enumerate("ABCDE"):
    ...     y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
    >>> df = pd.DataFrame(dict(Score=y, Group=gr))
    >>> # define header params
    >>> data_header = 'Score'
    >>> group_header = 'Group'
    >>> # make groupby object with pandas
    >>> group_stats = {}
    >>> groupby_data = df.groupby([group_header])
    >>> for group in "ABCDE":
    ...     data_from_group = groupby_data.get_group(group)[data_header]
    ...     # take a stat of the grouped data
    ...     stat = np.median(data_from_group)
    ...     # add to dictionary
    ...     group_stats[group] = stat
    >>> # create violin fig
    >>> fig = create_violin(df, data_header='Score', group_header='Group',
    ...                     height=600, width=1000, use_colorscale=True,
    ...                     group_stats=group_stats)
    >>> # plot
    >>> fig.show()
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__violin.create_violin.txt 
 | 
	def create_violin(
    data,
    data_header=None,
    group_header=None,
    colors=None,
    use_colorscale=False,
    group_stats=None,
    rugplot=True,
    sort=False,
    height=450,
    width=600,
    title="Violin and Rug Plot",
):
    """
    **deprecated**, use instead the plotly.graph_objects trace
    :class:`plotly.graph_objects.Violin`.
    :param (list|array) data: accepts either a list of numerical values,
        a list of dictionaries all with identical keys and at least one
        column of numeric values, or a pandas dataframe with at least one
        column of numbers.
    :param (str) data_header: the header of the data column to be used
        from an inputted pandas dataframe. Not applicable if 'data' is
        a list of numeric values.
    :param (str) group_header: applicable if grouping data by a variable.
        'group_header' must be set to the name of the grouping variable.
    :param (str|tuple|list|dict) colors: either a plotly scale name,
        an rgb or hex color, a color tuple, a list of colors or a
        dictionary. An rgb color is of the form 'rgb(x, y, z)' where
        x, y and z belong to the interval [0, 255] and a color tuple is a
        tuple of the form (a, b, c) where a, b and c belong to [0, 1].
        If colors is a list, it must contain valid color types as its
        members.
    :param (bool) use_colorscale: only applicable if grouping by another
        variable. Will implement a colorscale based on the first 2 colors
        of param colors. This means colors must be a list with at least 2
        colors in it (Plotly colorscales are accepted since they map to a
        list of two rgb colors). Default = False
    :param (dict) group_stats: a dictionary where each key is a unique
        value from the group_header column in data. Each value must be a
        number and will be used to color the violin plots if a colorscale
        is being used.
    :param (bool) rugplot: determines if a rugplot is draw on violin plot.
        Default = True
    :param (bool) sort: determines if violins are sorted
        alphabetically (True) or by input order (False). Default = False
    :param (float) height: the height of the violin plot.
    :param (float) width: the width of the violin plot.
    :param (str) title: the title of the violin plot.
    Example 1: Single Violin Plot
    >>> from plotly.figure_factory import create_violin
    >>> import plotly.graph_objs as graph_objects
    >>> import numpy as np
    >>> from scipy import stats
    >>> # create list of random values
    >>> data_list = np.random.randn(100)
    >>> # create violin fig
    >>> fig = create_violin(data_list, colors='#604d9e')
    >>> # plot
    >>> fig.show()
    Example 2: Multiple Violin Plots with Qualitative Coloring
    >>> from plotly.figure_factory import create_violin
    >>> import plotly.graph_objs as graph_objects
    >>> import numpy as np
    >>> import pandas as pd
    >>> from scipy import stats
    >>> # create dataframe
    >>> np.random.seed(619517)
    >>> Nr=250
    >>> y = np.random.randn(Nr)
    >>> gr = np.random.choice(list("ABCDE"), Nr)
    >>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
    >>> for i, letter in enumerate("ABCDE"):
    ...     y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
    >>> df = pd.DataFrame(dict(Score=y, Group=gr))
    >>> # create violin fig
    >>> fig = create_violin(df, data_header='Score', group_header='Group',
    ...                    sort=True, height=600, width=1000)
    >>> # plot
    >>> fig.show()
    Example 3: Violin Plots with Colorscale
    >>> from plotly.figure_factory import create_violin
    >>> import plotly.graph_objs as graph_objects
    >>> import numpy as np
    >>> import pandas as pd
    >>> from scipy import stats
    >>> # create dataframe
    >>> np.random.seed(619517)
    >>> Nr=250
    >>> y = np.random.randn(Nr)
    >>> gr = np.random.choice(list("ABCDE"), Nr)
    >>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
    >>> for i, letter in enumerate("ABCDE"):
    ...     y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
    >>> df = pd.DataFrame(dict(Score=y, Group=gr))
    >>> # define header params
    >>> data_header = 'Score'
    >>> group_header = 'Group'
    >>> # make groupby object with pandas
    >>> group_stats = {}
    >>> groupby_data = df.groupby([group_header])
    >>> for group in "ABCDE":
    ...     data_from_group = groupby_data.get_group(group)[data_header]
    ...     # take a stat of the grouped data
    ...     stat = np.median(data_from_group)
    ...     # add to dictionary
    ...     group_stats[group] = stat
    >>> # create violin fig
    >>> fig = create_violin(df, data_header='Score', group_header='Group',
    ...                     height=600, width=1000, use_colorscale=True,
    ...                     group_stats=group_stats)
    >>> # plot
    >>> fig.show()
    """
    # Validate colors
    if isinstance(colors, dict):
        valid_colors = clrs.validate_colors_dict(colors, "rgb")
    else:
        valid_colors = clrs.validate_colors(colors, "rgb")
    # validate data and choose plot type
    if group_header is None:
        if isinstance(data, list):
            if len(data) <= 0:
                raise exceptions.PlotlyError(
                    "If data is a list, it must be "
                    "nonempty and contain either "
                    "numbers or dictionaries."
                )
            if not all(isinstance(element, Number) for element in data):
                raise exceptions.PlotlyError(
                    "If data is a list, it must " "contain only numbers."
                )
        if pd and isinstance(data, pd.core.frame.DataFrame):
            if data_header is None:
                raise exceptions.PlotlyError(
                    "data_header must be the "
                    "column name with the "
                    "desired numeric data for "
                    "the violin plot."
                )
            data = data[data_header].values.tolist()
        # call the plotting functions
        plot_data, plot_xrange = violinplot(
            data, fillcolor=valid_colors[0], rugplot=rugplot
        )
        layout = graph_objs.Layout(
            title=title,
            autosize=False,
            font=graph_objs.layout.Font(size=11),
            height=height,
            showlegend=False,
            width=width,
            xaxis=make_XAxis("", plot_xrange),
            yaxis=make_YAxis(""),
            hovermode="closest",
        )
        layout["yaxis"].update(dict(showline=False, showticklabels=False, ticks=""))
        fig = graph_objs.Figure(data=plot_data, layout=layout)
        return fig
    else:
        if not isinstance(data, pd.core.frame.DataFrame):
            raise exceptions.PlotlyError(
                "Error. You must use a pandas "
                "DataFrame if you are using a "
                "group header."
            )
        if data_header is None:
            raise exceptions.PlotlyError(
                "data_header must be the column "
                "name with the desired numeric "
                "data for the violin plot."
            )
        if use_colorscale is False:
            if isinstance(valid_colors, dict):
                # validate colors dict choice below
                fig = violin_dict(
                    data,
                    data_header,
                    group_header,
                    valid_colors,
                    use_colorscale,
                    group_stats,
                    rugplot,
                    sort,
                    height,
                    width,
                    title,
                )
                return fig
            else:
                fig = violin_no_colorscale(
                    data,
                    data_header,
                    group_header,
                    valid_colors,
                    use_colorscale,
                    group_stats,
                    rugplot,
                    sort,
                    height,
                    width,
                    title,
                )
                return fig
        else:
            if isinstance(valid_colors, dict):
                raise exceptions.PlotlyError(
                    "The colors param cannot be "
                    "a dictionary if you are "
                    "using a colorscale."
                )
            if len(valid_colors) < 2:
                raise exceptions.PlotlyError(
                    "colors must be a list with "
                    "at least 2 colors. A "
                    "Plotly scale is allowed."
                )
            if not isinstance(group_stats, dict):
                raise exceptions.PlotlyError(
                    "Your group_stats param " "must be a dictionary."
                )
            fig = violin_colorscale(
                data,
                data_header,
                group_header,
                valid_colors,
                use_colorscale,
                group_stats,
                rugplot,
                sort,
                height,
                width,
                title,
            )
            return fig
 
 | 
	_violin.create_violin 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	66 
 | 
	packages/python/plotly/plotly/graph_objs/_waterfall.py 
 | 
	    def __init__(
        self,
        arg=None,
        alignmentgroup=None,
        base=None,
        cliponaxis=None,
        connector=None,
        constraintext=None,
        customdata=None,
        customdatasrc=None,
        decreasing=None,
        dx=None,
        dy=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        increasing=None,
        insidetextanchor=None,
        insidetextfont=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        measure=None,
        measuresrc=None,
        meta=None,
        metasrc=None,
        name=None,
        offset=None,
        offsetgroup=None,
        offsetsrc=None,
        opacity=None,
        orientation=None,
        outsidetextfont=None,
        selectedpoints=None,
        showlegend=None,
        stream=None,
        text=None,
        textangle=None,
        textfont=None,
        textinfo=None,
        textposition=None,
        textpositionsrc=None,
        textsrc=None,
        texttemplate=None,
        texttemplatesrc=None,
        totals=None,
        uid=None,
        uirevision=None,
        visible=None,
        width=None,
        widthsrc=None,
        x=None,
        x0=None,
        xaxis=None,
        xhoverformat=None,
        xperiod=None,
        xperiod0=None,
        xperiodalignment=None,
        xsrc=None,
        y=None,
        y0=None,
        yaxis=None,
        yhoverformat=None,
        yperiod=None,
        yperiod0=None,
        yperiodalignment=None,
        ysrc=None,
        zorder=None,
        **kwargs,
    ):
        """
        Construct a new Waterfall object
        Draws waterfall trace which is useful graph to displays the
        contribution of various elements (either positive or negative)
        in a bar chart. The data visualized by the span of the bars is
        set in `y` if `orientation` is set to "v" (the default) and the
        labels are set in `x`. By setting `orientation` to "h", the
        roles are interchanged.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Waterfall`
        alignmentgroup
            Set several traces linked to the same position axis or
            matching axes to the same alignmentgroup. This controls
            whether bars compute their positional range dependently
            or independently.
        base
            Sets where the bar base is drawn (in position axis
            units).
        cliponaxis
            Determines whether the text nodes are clipped about the
            subplot axes. To show the text nodes above axis lines
            and tick labels, make sure to set `xaxis.layer` and
            `yaxis.layer` to *below traces*.
        connector
            :class:`plotly.graph_objects.waterfall.Connector`
            instance or dict with compatible properties
        constraintext
            Constrain the size of text inside or outside a bar to
            be no larger than the bar itself.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        decreasing
            :class:`plotly.graph_objects.waterfall.Decreasing`
            instance or dict with compatible properties
        dx
            Sets the x coordinate step. See `x0` for more info.
        dy
            Sets the y coordinate step. See `y0` for more info.
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.waterfall.Hoverlabel`
            instance or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `initial`, `delta` and `final`. Anything
            contained in tag `<extra>` is displayed in the
            secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Sets hover text elements associated with each (x,y)
            pair. If a single string, the same string appears over
            all the data points. If an array of string, the items
            are mapped in order to the this trace's (x,y)
            coordinates. To be seen, trace `hoverinfo` must contain
            a "text" flag.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        increasing
            :class:`plotly.graph_objects.waterfall.Increasing`
            instance or dict with compatible properties
        insidetextanchor
            Determines if texts are kept at center or start/end
            points in `textposition` "inside" mode.
        insidetextfont
            Sets the font used for `text` lying inside the bar.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for this trace. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.waterfall.Legendgrouptitle
            ` instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        measure
            An array containing types of values. By default the
            values are considered as 'relative'. However; it is
            possible to use 'total' to compute the sums. Also
            'absolute' could be applied to reset the computed total
            or to declare an initial value where needed.
        measuresrc
            Sets the source reference on Chart Studio Cloud for
            `measure`.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        offset
            Shifts the position where the bar is drawn (in position
            axis units). In "group" barmode, traces that set
            "offset" will be excluded and drawn in "overlay" mode
            instead.
        offsetgroup
            Set several traces linked to the same position axis or
            matching axes to the same offsetgroup where bars of the
            same position coordinate will line up.
        offsetsrc
            Sets the source reference on Chart Studio Cloud for
            `offset`.
        opacity
            Sets the opacity of the trace.
        orientation
            Sets the orientation of the bars. With "v" ("h"), the
            value of the each bar spans along the vertical
            (horizontal).
        outsidetextfont
            Sets the font used for `text` lying outside the bar.
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        showlegend
            Determines whether or not an item corresponding to this
            trace is shown in the legend.
        stream
            :class:`plotly.graph_objects.waterfall.Stream` instance
            or dict with compatible properties
        text
            Sets text elements associated with each (x,y) pair. If
            a single string, the same string appears over all the
            data points. If an array of string, the items are
            mapped in order to the this trace's (x,y) coordinates.
            If trace `hoverinfo` contains a "text" flag and
            "hovertext" is not set, these elements will be seen in
            the hover labels.
        textangle
            Sets the angle of the tick labels with respect to the
            bar. For example, a `tickangle` of -90 draws the tick
            labels vertically. With "auto" the texts may
            automatically be rotated to fit with the maximum size
            in bars.
        textfont
            Sets the font used for `text`.
        textinfo
            Determines which trace information appear on the graph.
            In the case of having multiple waterfalls, totals are
            computed separately (per trace).
        textposition
            Specifies the location of the `text`. "inside"
            positions `text` inside, next to the bar end (rotated
            and scaled if needed). "outside" positions `text`
            outside, next to the bar end (scaled if needed), unless
            there is another bar stacked on this one, then the text
            gets pushed inside. "auto" tries to position `text`
            inside the bar, but if the bar is too small and no bar
            is stacked on this one the text is moved outside. If
            "none", no text appears.
        textpositionsrc
            Sets the source reference on Chart Studio Cloud for
            `textposition`.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        texttemplate
            Template string used for rendering the information text
            that appear on points. Note that this will override
            `textinfo`. Variables are inserted using %{variable},
            for example "y: %{y}". Numbers are formatted using
            d3-format's syntax %{variable:d3-format}, for example
            "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. Every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `initial`, `delta`, `final` and `label`.
        texttemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `texttemplate`.
        totals
            :class:`plotly.graph_objects.waterfall.Totals` instance
            or dict with compatible properties
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        width
            Sets the bar width (in position axis units).
        widthsrc
            Sets the source reference on Chart Studio Cloud for
            `width`.
        x
            Sets the x coordinates.
        x0
            Alternate to `x`. Builds a linear space of x
            coordinates. Use with `dx` where `x0` is the starting
            coordinate and `dx` the step.
        xaxis
            Sets a reference between this trace's x coordinates and
            a 2D cartesian x axis. If "x" (the default value), the
            x coordinates refer to `layout.xaxis`. If "x2", the x
            coordinates refer to `layout.xaxis2`, and so on.
        xhoverformat
            Sets the hover text formatting rulefor `x`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `xaxis.hoverformat`.
        xperiod
            Only relevant when the axis `type` is "date". Sets the
            period positioning in milliseconds or "M<n>" on the x
            axis. Special values in the form of "M<n>" could be
            used to declare the number of months. In this case `n`
            must be a positive integer.
        xperiod0
            Only relevant when the axis `type` is "date". Sets the
            base for period positioning in milliseconds or date
            string on the x0 axis. When `x0period` is round number
            of weeks, the `x0period0` by default would be on a
            Sunday i.e. 2000-01-02, otherwise it would be at
            2000-01-01.
        xperiodalignment
            Only relevant when the axis `type` is "date". Sets the
            alignment of data points on the x axis.
        xsrc
            Sets the source reference on Chart Studio Cloud for
            `x`.
        y
            Sets the y coordinates.
        y0
            Alternate to `y`. Builds a linear space of y
            coordinates. Use with `dy` where `y0` is the starting
            coordinate and `dy` the step.
        yaxis
            Sets a reference between this trace's y coordinates and
            a 2D cartesian y axis. If "y" (the default value), the
            y coordinates refer to `layout.yaxis`. If "y2", the y
            coordinates refer to `layout.yaxis2`, and so on.
        yhoverformat
            Sets the hover text formatting rulefor `y`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `yaxis.hoverformat`.
        yperiod
            Only relevant when the axis `type` is "date". Sets the
            period positioning in milliseconds or "M<n>" on the y
            axis. Special values in the form of "M<n>" could be
            used to declare the number of months. In this case `n`
            must be a positive integer.
        yperiod0
            Only relevant when the axis `type` is "date". Sets the
            base for period positioning in milliseconds or date
            string on the y0 axis. When `y0period` is round number
            of weeks, the `y0period0` by default would be on a
            Sunday i.e. 2000-01-02, otherwise it would be at
            2000-01-01.
        yperiodalignment
            Only relevant when the axis `type` is "date". Sets the
            alignment of data points on the y axis.
        ysrc
            Sets the source reference on Chart Studio Cloud for
            `y`.
        zorder
            Sets the layer on which this trace is displayed,
            relative to other SVG traces on the same subplot. SVG
            traces with higher `zorder` appear in front of those
            with lower `zorder`.
        Returns
        -------
        Waterfall
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__waterfall.Waterfall.__init__.txt 
 | 
	    def __init__(
        self,
        arg=None,
        alignmentgroup=None,
        base=None,
        cliponaxis=None,
        connector=None,
        constraintext=None,
        customdata=None,
        customdatasrc=None,
        decreasing=None,
        dx=None,
        dy=None,
        hoverinfo=None,
        hoverinfosrc=None,
        hoverlabel=None,
        hovertemplate=None,
        hovertemplatesrc=None,
        hovertext=None,
        hovertextsrc=None,
        ids=None,
        idssrc=None,
        increasing=None,
        insidetextanchor=None,
        insidetextfont=None,
        legend=None,
        legendgroup=None,
        legendgrouptitle=None,
        legendrank=None,
        legendwidth=None,
        measure=None,
        measuresrc=None,
        meta=None,
        metasrc=None,
        name=None,
        offset=None,
        offsetgroup=None,
        offsetsrc=None,
        opacity=None,
        orientation=None,
        outsidetextfont=None,
        selectedpoints=None,
        showlegend=None,
        stream=None,
        text=None,
        textangle=None,
        textfont=None,
        textinfo=None,
        textposition=None,
        textpositionsrc=None,
        textsrc=None,
        texttemplate=None,
        texttemplatesrc=None,
        totals=None,
        uid=None,
        uirevision=None,
        visible=None,
        width=None,
        widthsrc=None,
        x=None,
        x0=None,
        xaxis=None,
        xhoverformat=None,
        xperiod=None,
        xperiod0=None,
        xperiodalignment=None,
        xsrc=None,
        y=None,
        y0=None,
        yaxis=None,
        yhoverformat=None,
        yperiod=None,
        yperiod0=None,
        yperiodalignment=None,
        ysrc=None,
        zorder=None,
        **kwargs,
    ):
        """
        Construct a new Waterfall object
        Draws waterfall trace which is useful graph to displays the
        contribution of various elements (either positive or negative)
        in a bar chart. The data visualized by the span of the bars is
        set in `y` if `orientation` is set to "v" (the default) and the
        labels are set in `x`. By setting `orientation` to "h", the
        roles are interchanged.
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.Waterfall`
        alignmentgroup
            Set several traces linked to the same position axis or
            matching axes to the same alignmentgroup. This controls
            whether bars compute their positional range dependently
            or independently.
        base
            Sets where the bar base is drawn (in position axis
            units).
        cliponaxis
            Determines whether the text nodes are clipped about the
            subplot axes. To show the text nodes above axis lines
            and tick labels, make sure to set `xaxis.layer` and
            `yaxis.layer` to *below traces*.
        connector
            :class:`plotly.graph_objects.waterfall.Connector`
            instance or dict with compatible properties
        constraintext
            Constrain the size of text inside or outside a bar to
            be no larger than the bar itself.
        customdata
            Assigns extra data each datum. This may be useful when
            listening to hover, click and selection events. Note
            that, "scatter" traces also appends customdata items in
            the markers DOM elements
        customdatasrc
            Sets the source reference on Chart Studio Cloud for
            `customdata`.
        decreasing
            :class:`plotly.graph_objects.waterfall.Decreasing`
            instance or dict with compatible properties
        dx
            Sets the x coordinate step. See `x0` for more info.
        dy
            Sets the y coordinate step. See `y0` for more info.
        hoverinfo
            Determines which trace information appear on hover. If
            `none` or `skip` are set, no information is displayed
            upon hovering. But, if `none` is set, click and hover
            events are still fired.
        hoverinfosrc
            Sets the source reference on Chart Studio Cloud for
            `hoverinfo`.
        hoverlabel
            :class:`plotly.graph_objects.waterfall.Hoverlabel`
            instance or dict with compatible properties
        hovertemplate
            Template string used for rendering the information that
            appear on hover box. Note that this will override
            `hoverinfo`. Variables are inserted using %{variable},
            for example "y: %{y}" as well as %{xother}, {%_xother},
            {%_xother_}, {%xother_}. When showing info for several
            points, "xother" will be added to those with different
            x positions from the first point. An underscore before
            or after "(x|y)other" will add a space on that side,
            only when this field is shown. Numbers are formatted
            using d3-format's syntax %{variable:d3-format}, for
            example "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. The variables available in
            `hovertemplate` are the ones emitted as event data
            described at this link
            https://plotly.com/javascript/plotlyjs-events/#event-
            data. Additionally, every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `initial`, `delta` and `final`. Anything
            contained in tag `<extra>` is displayed in the
            secondary box, for example
            "<extra>{fullData.name}</extra>". To hide the secondary
            box completely, use an empty tag `<extra></extra>`.
        hovertemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `hovertemplate`.
        hovertext
            Sets hover text elements associated with each (x,y)
            pair. If a single string, the same string appears over
            all the data points. If an array of string, the items
            are mapped in order to the this trace's (x,y)
            coordinates. To be seen, trace `hoverinfo` must contain
            a "text" flag.
        hovertextsrc
            Sets the source reference on Chart Studio Cloud for
            `hovertext`.
        ids
            Assigns id labels to each datum. These ids for object
            constancy of data points during animation. Should be an
            array of strings, not numbers or any other type.
        idssrc
            Sets the source reference on Chart Studio Cloud for
            `ids`.
        increasing
            :class:`plotly.graph_objects.waterfall.Increasing`
            instance or dict with compatible properties
        insidetextanchor
            Determines if texts are kept at center or start/end
            points in `textposition` "inside" mode.
        insidetextfont
            Sets the font used for `text` lying inside the bar.
        legend
            Sets the reference to a legend to show this trace in.
            References to these legends are "legend", "legend2",
            "legend3", etc. Settings for these legends are set in
            the layout, under `layout.legend`, `layout.legend2`,
            etc.
        legendgroup
            Sets the legend group for this trace. Traces and shapes
            part of the same legend group hide/show at the same
            time when toggling legend items.
        legendgrouptitle
            :class:`plotly.graph_objects.waterfall.Legendgrouptitle
            ` instance or dict with compatible properties
        legendrank
            Sets the legend rank for this trace. Items and groups
            with smaller ranks are presented on top/left side while
            with "reversed" `legend.traceorder` they are on
            bottom/right side. The default legendrank is 1000, so
            that you can use ranks less than 1000 to place certain
            items before all unranked items, and ranks greater than
            1000 to go after all unranked items. When having
            unranked or equal rank items shapes would be displayed
            after traces i.e. according to their order in data and
            layout.
        legendwidth
            Sets the width (in px or fraction) of the legend for
            this trace.
        measure
            An array containing types of values. By default the
            values are considered as 'relative'. However; it is
            possible to use 'total' to compute the sums. Also
            'absolute' could be applied to reset the computed total
            or to declare an initial value where needed.
        measuresrc
            Sets the source reference on Chart Studio Cloud for
            `measure`.
        meta
            Assigns extra meta information associated with this
            trace that can be used in various text attributes.
            Attributes such as trace `name`, graph, axis and
            colorbar `title.text`, annotation `text`
            `rangeselector`, `updatemenues` and `sliders` `label`
            text all support `meta`. To access the trace `meta`
            values in an attribute in the same trace, simply use
            `%{meta[i]}` where `i` is the index or key of the
            `meta` item in question. To access trace `meta` in
            layout attributes, use `%{data[n[.meta[i]}` where `i`
            is the index or key of the `meta` and `n` is the trace
            index.
        metasrc
            Sets the source reference on Chart Studio Cloud for
            `meta`.
        name
            Sets the trace name. The trace name appears as the
            legend item and on hover.
        offset
            Shifts the position where the bar is drawn (in position
            axis units). In "group" barmode, traces that set
            "offset" will be excluded and drawn in "overlay" mode
            instead.
        offsetgroup
            Set several traces linked to the same position axis or
            matching axes to the same offsetgroup where bars of the
            same position coordinate will line up.
        offsetsrc
            Sets the source reference on Chart Studio Cloud for
            `offset`.
        opacity
            Sets the opacity of the trace.
        orientation
            Sets the orientation of the bars. With "v" ("h"), the
            value of the each bar spans along the vertical
            (horizontal).
        outsidetextfont
            Sets the font used for `text` lying outside the bar.
        selectedpoints
            Array containing integer indices of selected points.
            Has an effect only for traces that support selections.
            Note that an empty array means an empty selection where
            the `unselected` are turned on for all points, whereas,
            any other non-array values means no selection all where
            the `selected` and `unselected` styles have no effect.
        showlegend
            Determines whether or not an item corresponding to this
            trace is shown in the legend.
        stream
            :class:`plotly.graph_objects.waterfall.Stream` instance
            or dict with compatible properties
        text
            Sets text elements associated with each (x,y) pair. If
            a single string, the same string appears over all the
            data points. If an array of string, the items are
            mapped in order to the this trace's (x,y) coordinates.
            If trace `hoverinfo` contains a "text" flag and
            "hovertext" is not set, these elements will be seen in
            the hover labels.
        textangle
            Sets the angle of the tick labels with respect to the
            bar. For example, a `tickangle` of -90 draws the tick
            labels vertically. With "auto" the texts may
            automatically be rotated to fit with the maximum size
            in bars.
        textfont
            Sets the font used for `text`.
        textinfo
            Determines which trace information appear on the graph.
            In the case of having multiple waterfalls, totals are
            computed separately (per trace).
        textposition
            Specifies the location of the `text`. "inside"
            positions `text` inside, next to the bar end (rotated
            and scaled if needed). "outside" positions `text`
            outside, next to the bar end (scaled if needed), unless
            there is another bar stacked on this one, then the text
            gets pushed inside. "auto" tries to position `text`
            inside the bar, but if the bar is too small and no bar
            is stacked on this one the text is moved outside. If
            "none", no text appears.
        textpositionsrc
            Sets the source reference on Chart Studio Cloud for
            `textposition`.
        textsrc
            Sets the source reference on Chart Studio Cloud for
            `text`.
        texttemplate
            Template string used for rendering the information text
            that appear on points. Note that this will override
            `textinfo`. Variables are inserted using %{variable},
            for example "y: %{y}". Numbers are formatted using
            d3-format's syntax %{variable:d3-format}, for example
            "Price: %{y:$.2f}".
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format
            for details on the formatting syntax. Dates are
            formatted using d3-time-format's syntax
            %{variable|d3-time-format}, for example "Day:
            %{2019-01-01|%A}". https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format for details on the
            date formatting syntax. Every attributes that can be
            specified per-point (the ones that are `arrayOk: true`)
            are available. Finally, the template string has access
            to variables `initial`, `delta`, `final` and `label`.
        texttemplatesrc
            Sets the source reference on Chart Studio Cloud for
            `texttemplate`.
        totals
            :class:`plotly.graph_objects.waterfall.Totals` instance
            or dict with compatible properties
        uid
            Assign an id to this trace, Use this to provide object
            constancy between traces during animations and
            transitions.
        uirevision
            Controls persistence of some user-driven changes to the
            trace: `constraintrange` in `parcoords` traces, as well
            as some `editable: true` modifications such as `name`
            and `colorbar.title`. Defaults to `layout.uirevision`.
            Note that other user-driven trace attribute changes are
            controlled by `layout` attributes: `trace.visible` is
            controlled by `layout.legend.uirevision`,
            `selectedpoints` is controlled by
            `layout.selectionrevision`, and `colorbar.(x|y)`
            (accessible with `config: {editable: true}`) is
            controlled by `layout.editrevision`. Trace changes are
            tracked by `uid`, which only falls back on trace index
            if no `uid` is provided. So if your app can add/remove
            traces before the end of the `data` array, such that
            the same trace has a different index, you can still
            preserve user-driven changes if you give each trace a
            `uid` that stays with it as it moves.
        visible
            Determines whether or not this trace is visible. If
            "legendonly", the trace is not drawn, but can appear as
            a legend item (provided that the legend itself is
            visible).
        width
            Sets the bar width (in position axis units).
        widthsrc
            Sets the source reference on Chart Studio Cloud for
            `width`.
        x
            Sets the x coordinates.
        x0
            Alternate to `x`. Builds a linear space of x
            coordinates. Use with `dx` where `x0` is the starting
            coordinate and `dx` the step.
        xaxis
            Sets a reference between this trace's x coordinates and
            a 2D cartesian x axis. If "x" (the default value), the
            x coordinates refer to `layout.xaxis`. If "x2", the x
            coordinates refer to `layout.xaxis2`, and so on.
        xhoverformat
            Sets the hover text formatting rulefor `x`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `xaxis.hoverformat`.
        xperiod
            Only relevant when the axis `type` is "date". Sets the
            period positioning in milliseconds or "M<n>" on the x
            axis. Special values in the form of "M<n>" could be
            used to declare the number of months. In this case `n`
            must be a positive integer.
        xperiod0
            Only relevant when the axis `type` is "date". Sets the
            base for period positioning in milliseconds or date
            string on the x0 axis. When `x0period` is round number
            of weeks, the `x0period0` by default would be on a
            Sunday i.e. 2000-01-02, otherwise it would be at
            2000-01-01.
        xperiodalignment
            Only relevant when the axis `type` is "date". Sets the
            alignment of data points on the x axis.
        xsrc
            Sets the source reference on Chart Studio Cloud for
            `x`.
        y
            Sets the y coordinates.
        y0
            Alternate to `y`. Builds a linear space of y
            coordinates. Use with `dy` where `y0` is the starting
            coordinate and `dy` the step.
        yaxis
            Sets a reference between this trace's y coordinates and
            a 2D cartesian y axis. If "y" (the default value), the
            y coordinates refer to `layout.yaxis`. If "y2", the y
            coordinates refer to `layout.yaxis2`, and so on.
        yhoverformat
            Sets the hover text formatting rulefor `y`  using d3
            formatting mini-languages which are very similar to
            those in Python. For numbers, see:
            https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
            And for dates see: https://github.com/d3/d3-time-
            format/tree/v2.2.3#locale_format. We add two items to
            d3's date formatter: "%h" for half of the year as a
            decimal number as well as "%{n}f" for fractional
            seconds with n digits. For example, *2016-10-13
            09:15:23.456* with tickformat "%H~%M~%S.%2f" would
            display *09~15~23.46*By default the values are
            formatted using `yaxis.hoverformat`.
        yperiod
            Only relevant when the axis `type` is "date". Sets the
            period positioning in milliseconds or "M<n>" on the y
            axis. Special values in the form of "M<n>" could be
            used to declare the number of months. In this case `n`
            must be a positive integer.
        yperiod0
            Only relevant when the axis `type` is "date". Sets the
            base for period positioning in milliseconds or date
            string on the y0 axis. When `y0period` is round number
            of weeks, the `y0period0` by default would be on a
            Sunday i.e. 2000-01-02, otherwise it would be at
            2000-01-01.
        yperiodalignment
            Only relevant when the axis `type` is "date". Sets the
            alignment of data points on the y axis.
        ysrc
            Sets the source reference on Chart Studio Cloud for
            `y`.
        zorder
            Sets the layer on which this trace is displayed,
            relative to other SVG traces on the same subplot. SVG
            traces with higher `zorder` appear in front of those
            with lower `zorder`.
        Returns
        -------
        Waterfall
        """
        super(Waterfall, self).__init__("waterfall")
        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return
        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError(
                """\
The first argument to the plotly.graph_objs.Waterfall
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Waterfall`"""
            )
        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)
        self._validate = kwargs.pop("_validate", True)
        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("alignmentgroup", None)
        _v = alignmentgroup if alignmentgroup is not None else _v
        if _v is not None:
            self["alignmentgroup"] = _v
        _v = arg.pop("base", None)
        _v = base if base is not None else _v
        if _v is not None:
            self["base"] = _v
        _v = arg.pop("cliponaxis", None)
        _v = cliponaxis if cliponaxis is not None else _v
        if _v is not None:
            self["cliponaxis"] = _v
        _v = arg.pop("connector", None)
        _v = connector if connector is not None else _v
        if _v is not None:
            self["connector"] = _v
        _v = arg.pop("constraintext", None)
        _v = constraintext if constraintext is not None else _v
        if _v is not None:
            self["constraintext"] = _v
        _v = arg.pop("customdata", None)
        _v = customdata if customdata is not None else _v
        if _v is not None:
            self["customdata"] = _v
        _v = arg.pop("customdatasrc", None)
        _v = customdatasrc if customdatasrc is not None else _v
        if _v is not None:
            self["customdatasrc"] = _v
        _v = arg.pop("decreasing", None)
        _v = decreasing if decreasing is not None else _v
        if _v is not None:
            self["decreasing"] = _v
        _v = arg.pop("dx", None)
        _v = dx if dx is not None else _v
        if _v is not None:
            self["dx"] = _v
        _v = arg.pop("dy", None)
        _v = dy if dy is not None else _v
        if _v is not None:
            self["dy"] = _v
        _v = arg.pop("hoverinfo", None)
        _v = hoverinfo if hoverinfo is not None else _v
        if _v is not None:
            self["hoverinfo"] = _v
        _v = arg.pop("hoverinfosrc", None)
        _v = hoverinfosrc if hoverinfosrc is not None else _v
        if _v is not None:
            self["hoverinfosrc"] = _v
        _v = arg.pop("hoverlabel", None)
        _v = hoverlabel if hoverlabel is not None else _v
        if _v is not None:
            self["hoverlabel"] = _v
        _v = arg.pop("hovertemplate", None)
        _v = hovertemplate if hovertemplate is not None else _v
        if _v is not None:
            self["hovertemplate"] = _v
        _v = arg.pop("hovertemplatesrc", None)
        _v = hovertemplatesrc if hovertemplatesrc is not None else _v
        if _v is not None:
            self["hovertemplatesrc"] = _v
        _v = arg.pop("hovertext", None)
        _v = hovertext if hovertext is not None else _v
        if _v is not None:
            self["hovertext"] = _v
        _v = arg.pop("hovertextsrc", None)
        _v = hovertextsrc if hovertextsrc is not None else _v
        if _v is not None:
            self["hovertextsrc"] = _v
        _v = arg.pop("ids", None)
        _v = ids if ids is not None else _v
        if _v is not None:
            self["ids"] = _v
        _v = arg.pop("idssrc", None)
        _v = idssrc if idssrc is not None else _v
        if _v is not None:
            self["idssrc"] = _v
        _v = arg.pop("increasing", None)
        _v = increasing if increasing is not None else _v
        if _v is not None:
            self["increasing"] = _v
        _v = arg.pop("insidetextanchor", None)
        _v = insidetextanchor if insidetextanchor is not None else _v
        if _v is not None:
            self["insidetextanchor"] = _v
        _v = arg.pop("insidetextfont", None)
        _v = insidetextfont if insidetextfont is not None else _v
        if _v is not None:
            self["insidetextfont"] = _v
        _v = arg.pop("legend", None)
        _v = legend if legend is not None else _v
        if _v is not None:
            self["legend"] = _v
        _v = arg.pop("legendgroup", None)
        _v = legendgroup if legendgroup is not None else _v
        if _v is not None:
            self["legendgroup"] = _v
        _v = arg.pop("legendgrouptitle", None)
        _v = legendgrouptitle if legendgrouptitle is not None else _v
        if _v is not None:
            self["legendgrouptitle"] = _v
        _v = arg.pop("legendrank", None)
        _v = legendrank if legendrank is not None else _v
        if _v is not None:
            self["legendrank"] = _v
        _v = arg.pop("legendwidth", None)
        _v = legendwidth if legendwidth is not None else _v
        if _v is not None:
            self["legendwidth"] = _v
        _v = arg.pop("measure", None)
        _v = measure if measure is not None else _v
        if _v is not None:
            self["measure"] = _v
        _v = arg.pop("measuresrc", None)
        _v = measuresrc if measuresrc is not None else _v
        if _v is not None:
            self["measuresrc"] = _v
        _v = arg.pop("meta", None)
        _v = meta if meta is not None else _v
        if _v is not None:
            self["meta"] = _v
        _v = arg.pop("metasrc", None)
        _v = metasrc if metasrc is not None else _v
        if _v is not None:
            self["metasrc"] = _v
        _v = arg.pop("name", None)
        _v = name if name is not None else _v
        if _v is not None:
            self["name"] = _v
        _v = arg.pop("offset", None)
        _v = offset if offset is not None else _v
        if _v is not None:
            self["offset"] = _v
        _v = arg.pop("offsetgroup", None)
        _v = offsetgroup if offsetgroup is not None else _v
        if _v is not None:
            self["offsetgroup"] = _v
        _v = arg.pop("offsetsrc", None)
        _v = offsetsrc if offsetsrc is not None else _v
        if _v is not None:
            self["offsetsrc"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("orientation", None)
        _v = orientation if orientation is not None else _v
        if _v is not None:
            self["orientation"] = _v
        _v = arg.pop("outsidetextfont", None)
        _v = outsidetextfont if outsidetextfont is not None else _v
        if _v is not None:
            self["outsidetextfont"] = _v
        _v = arg.pop("selectedpoints", None)
        _v = selectedpoints if selectedpoints is not None else _v
        if _v is not None:
            self["selectedpoints"] = _v
        _v = arg.pop("showlegend", None)
        _v = showlegend if showlegend is not None else _v
        if _v is not None:
            self["showlegend"] = _v
        _v = arg.pop("stream", None)
        _v = stream if stream is not None else _v
        if _v is not None:
            self["stream"] = _v
        _v = arg.pop("text", None)
        _v = text if text is not None else _v
        if _v is not None:
            self["text"] = _v
        _v = arg.pop("textangle", None)
        _v = textangle if textangle is not None else _v
        if _v is not None:
            self["textangle"] = _v
        _v = arg.pop("textfont", None)
        _v = textfont if textfont is not None else _v
        if _v is not None:
            self["textfont"] = _v
        _v = arg.pop("textinfo", None)
        _v = textinfo if textinfo is not None else _v
        if _v is not None:
            self["textinfo"] = _v
        _v = arg.pop("textposition", None)
        _v = textposition if textposition is not None else _v
        if _v is not None:
            self["textposition"] = _v
        _v = arg.pop("textpositionsrc", None)
        _v = textpositionsrc if textpositionsrc is not None else _v
        if _v is not None:
            self["textpositionsrc"] = _v
        _v = arg.pop("textsrc", None)
        _v = textsrc if textsrc is not None else _v
        if _v is not None:
            self["textsrc"] = _v
        _v = arg.pop("texttemplate", None)
        _v = texttemplate if texttemplate is not None else _v
        if _v is not None:
            self["texttemplate"] = _v
        _v = arg.pop("texttemplatesrc", None)
        _v = texttemplatesrc if texttemplatesrc is not None else _v
        if _v is not None:
            self["texttemplatesrc"] = _v
        _v = arg.pop("totals", None)
        _v = totals if totals is not None else _v
        if _v is not None:
            self["totals"] = _v
        _v = arg.pop("uid", None)
        _v = uid if uid is not None else _v
        if _v is not None:
            self["uid"] = _v
        _v = arg.pop("uirevision", None)
        _v = uirevision if uirevision is not None else _v
        if _v is not None:
            self["uirevision"] = _v
        _v = arg.pop("visible", None)
        _v = visible if visible is not None else _v
        if _v is not None:
            self["visible"] = _v
        _v = arg.pop("width", None)
        _v = width if width is not None else _v
        if _v is not None:
            self["width"] = _v
        _v = arg.pop("widthsrc", None)
        _v = widthsrc if widthsrc is not None else _v
        if _v is not None:
            self["widthsrc"] = _v
        _v = arg.pop("x", None)
        _v = x if x is not None else _v
        if _v is not None:
            self["x"] = _v
        _v = arg.pop("x0", None)
        _v = x0 if x0 is not None else _v
        if _v is not None:
            self["x0"] = _v
        _v = arg.pop("xaxis", None)
        _v = xaxis if xaxis is not None else _v
        if _v is not None:
            self["xaxis"] = _v
        _v = arg.pop("xhoverformat", None)
        _v = xhoverformat if xhoverformat is not None else _v
        if _v is not None:
            self["xhoverformat"] = _v
        _v = arg.pop("xperiod", None)
        _v = xperiod if xperiod is not None else _v
        if _v is not None:
            self["xperiod"] = _v
        _v = arg.pop("xperiod0", None)
        _v = xperiod0 if xperiod0 is not None else _v
        if _v is not None:
            self["xperiod0"] = _v
        _v = arg.pop("xperiodalignment", None)
        _v = xperiodalignment if xperiodalignment is not None else _v
        if _v is not None:
            self["xperiodalignment"] = _v
        _v = arg.pop("xsrc", None)
        _v = xsrc if xsrc is not None else _v
        if _v is not None:
            self["xsrc"] = _v
        _v = arg.pop("y", None)
        _v = y if y is not None else _v
        if _v is not None:
            self["y"] = _v
        _v = arg.pop("y0", None)
        _v = y0 if y0 is not None else _v
        if _v is not None:
            self["y0"] = _v
        _v = arg.pop("yaxis", None)
        _v = yaxis if yaxis is not None else _v
        if _v is not None:
            self["yaxis"] = _v
        _v = arg.pop("yhoverformat", None)
        _v = yhoverformat if yhoverformat is not None else _v
        if _v is not None:
            self["yhoverformat"] = _v
        _v = arg.pop("yperiod", None)
        _v = yperiod if yperiod is not None else _v
        if _v is not None:
            self["yperiod"] = _v
        _v = arg.pop("yperiod0", None)
        _v = yperiod0 if yperiod0 is not None else _v
        if _v is not None:
            self["yperiod0"] = _v
        _v = arg.pop("yperiodalignment", None)
        _v = yperiodalignment if yperiodalignment is not None else _v
        if _v is not None:
            self["yperiodalignment"] = _v
        _v = arg.pop("ysrc", None)
        _v = ysrc if ysrc is not None else _v
        if _v is not None:
            self["ysrc"] = _v
        _v = arg.pop("zorder", None)
        _v = zorder if zorder is not None else _v
        if _v is not None:
            self["zorder"] = _v
        # Read-only literals
        # ------------------
        self._props["type"] = "waterfall"
        arg.pop("type", None)
        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))
        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
 
 | 
	_waterfall.Waterfall.__init__ 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	71 
 | 
	packages/python/plotly/plotly/offline/offline.py 
 | 
	def plot(
    figure_or_data,
    show_link=False,
    link_text="Export to plot.ly",
    validate=True,
    output_type="file",
    include_plotlyjs=True,
    filename="temp-plot.html",
    auto_open=True,
    image=None,
    image_filename="plot_image",
    image_width=800,
    image_height=600,
    config=None,
    include_mathjax=False,
    auto_play=True,
    animation_opts=None,
):
    """Create a plotly graph locally as an HTML document or string.
    Example:
    ```
    from plotly.offline import plot
    import plotly.graph_objs as go
    plot([go.Scatter(x=[1, 2, 3], y=[3, 2, 6])], filename='my-graph.html')
    # We can also download an image of the plot by setting the image parameter
    # to the image format we want
    plot([go.Scatter(x=[1, 2, 3], y=[3, 2, 6])], filename='my-graph.html',
         image='jpeg')
    ```
    More examples below.
    figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or
                      dict or list that describes a Plotly graph.
                      See https://plot.ly/python/ for examples of
                      graph descriptions.
    Keyword arguments:
    show_link (default=False) -- display a link in the bottom-right corner of
        of the chart that will export the chart to Plotly Cloud or
        Plotly Enterprise
    link_text (default='Export to plot.ly') -- the text of export link
    validate (default=True) -- validate that all of the keys in the figure
        are valid? omit if your version of plotly.js has become outdated
        with your version of graph_reference.json or if you need to include
        extra, unnecessary keys in your figure.
    output_type ('file' | 'div' - default 'file') -- if 'file', then
        the graph is saved as a standalone HTML file and `plot`
        returns None.
        If 'div', then `plot` returns a string that just contains the
        HTML <div> that contains the graph and the script to generate the
        graph.
        Use 'file' if you want to save and view a single graph at a time
        in a standalone HTML file.
        Use 'div' if you are embedding these graphs in an HTML file with
        other graphs or HTML markup, like a HTML report or an website.
    include_plotlyjs (True | False | 'cdn' | 'directory' | path - default=True)
        Specifies how the plotly.js library is included in the output html
        file or div string.
        If True, a script tag containing the plotly.js source code (~3MB)
        is included in the output.  HTML files generated with this option are
        fully self-contained and can be used offline.
        If 'cdn', a script tag that references the plotly.js CDN is included
        in the output. HTML files generated with this option are about 3MB
        smaller than those generated with include_plotlyjs=True, but they
        require an active internet connection in order to load the plotly.js
        library.
        If 'directory', a script tag is included that references an external
        plotly.min.js bundle that is assumed to reside in the same
        directory as the HTML file.  If output_type='file' then the
        plotly.min.js bundle is copied into the directory of the resulting
        HTML file. If a file named plotly.min.js already exists in the output
        directory then this file is left unmodified and no copy is performed.
        HTML files generated with this option can be used offline, but they
        require a copy of the plotly.min.js bundle in the same directory.
        This option is useful when many figures will be saved as HTML files in
        the same directory because the plotly.js source code will be included
        only once per output directory, rather than once per output file.
        If a string that ends in '.js', a script tag is included that
        references the specified path. This approach can be used to point
        the resulting HTML file to an alternative CDN.
        If False, no script tag referencing plotly.js is included. This is
        useful when output_type='div' and the resulting div string will be
        placed inside an HTML document that already loads plotly.js.  This
        option is not advised when output_type='file' as it will result in
        a non-functional html file.
    filename (default='temp-plot.html') -- The local filename to save the
        outputted chart to. If the filename already exists, it will be
        overwritten. This argument only applies if `output_type` is 'file'.
    auto_open (default=True) -- If True, open the saved file in a
        web browser after saving.
        This argument only applies if `output_type` is 'file'.
    image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
        the format of the image to be downloaded, if we choose to download an
        image. This parameter has a default value of None indicating that no
        image should be downloaded. Please note: for higher resolution images
        and more export options, consider making requests to our image servers.
        Type: `help(py.image)` for more details.
    image_filename (default='plot_image') -- Sets the name of the file your
        image will be saved to. The extension should not be included.
    image_height (default=600) -- Specifies the height of the image in `px`.
    image_width (default=800) -- Specifies the width of the image in `px`.
    config (default=None) -- Plot view options dictionary. Keyword arguments
        `show_link` and `link_text` set the associated options in this
        dictionary if it doesn't contain them already.
    include_mathjax (False | 'cdn' | path - default=False) --
        Specifies how the MathJax.js library is included in the output html
        file or div string.  MathJax is required in order to display labels
        with LaTeX typesetting.
        If False, no script tag referencing MathJax.js will be included in the
        output. HTML files generated with this option will not be able to
        display LaTeX typesetting.
        If 'cdn', a script tag that references a MathJax CDN location will be
        included in the output.  HTML files generated with this option will be
        able to display LaTeX typesetting as long as they have internet access.
        If a string that ends in '.js', a script tag is included that
        references the specified path. This approach can be used to point the
        resulting HTML file to an alternative CDN.
    auto_play (default=True) -- Whether to automatically start the animation
        sequence on page load if the figure contains frames. Has no effect if
        the figure does not contain frames.
    animation_opts (default=None) -- Dict of custom animation parameters that
        are used for the automatically started animation on page load. This
        dict is passed to the function Plotly.animate in Plotly.js. See
        https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
        for available options. Has no effect if the figure
        does not contain frames, or auto_play is False.
    Example:
    ```
    from plotly.offline import plot
    figure = {'data': [{'x': [0, 1], 'y': [0, 1]}],
              'layout': {'xaxis': {'range': [0, 5], 'autorange': False},
                         'yaxis': {'range': [0, 5], 'autorange': False},
                         'title': 'Start Title'},
              'frames': [{'data': [{'x': [1, 2], 'y': [1, 2]}]},
                         {'data': [{'x': [1, 4], 'y': [1, 4]}]},
                         {'data': [{'x': [3, 4], 'y': [3, 4]}],
                          'layout': {'title': 'End Title'}}]}
    plot(figure, animation_opts={'frame': {'duration': 1}})
    ```
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_offline.plot.txt 
 | 
	def plot(
    figure_or_data,
    show_link=False,
    link_text="Export to plot.ly",
    validate=True,
    output_type="file",
    include_plotlyjs=True,
    filename="temp-plot.html",
    auto_open=True,
    image=None,
    image_filename="plot_image",
    image_width=800,
    image_height=600,
    config=None,
    include_mathjax=False,
    auto_play=True,
    animation_opts=None,
):
    """Create a plotly graph locally as an HTML document or string.
    Example:
    ```
    from plotly.offline import plot
    import plotly.graph_objs as go
    plot([go.Scatter(x=[1, 2, 3], y=[3, 2, 6])], filename='my-graph.html')
    # We can also download an image of the plot by setting the image parameter
    # to the image format we want
    plot([go.Scatter(x=[1, 2, 3], y=[3, 2, 6])], filename='my-graph.html',
         image='jpeg')
    ```
    More examples below.
    figure_or_data -- a plotly.graph_objs.Figure or plotly.graph_objs.Data or
                      dict or list that describes a Plotly graph.
                      See https://plot.ly/python/ for examples of
                      graph descriptions.
    Keyword arguments:
    show_link (default=False) -- display a link in the bottom-right corner of
        of the chart that will export the chart to Plotly Cloud or
        Plotly Enterprise
    link_text (default='Export to plot.ly') -- the text of export link
    validate (default=True) -- validate that all of the keys in the figure
        are valid? omit if your version of plotly.js has become outdated
        with your version of graph_reference.json or if you need to include
        extra, unnecessary keys in your figure.
    output_type ('file' | 'div' - default 'file') -- if 'file', then
        the graph is saved as a standalone HTML file and `plot`
        returns None.
        If 'div', then `plot` returns a string that just contains the
        HTML <div> that contains the graph and the script to generate the
        graph.
        Use 'file' if you want to save and view a single graph at a time
        in a standalone HTML file.
        Use 'div' if you are embedding these graphs in an HTML file with
        other graphs or HTML markup, like a HTML report or an website.
    include_plotlyjs (True | False | 'cdn' | 'directory' | path - default=True)
        Specifies how the plotly.js library is included in the output html
        file or div string.
        If True, a script tag containing the plotly.js source code (~3MB)
        is included in the output.  HTML files generated with this option are
        fully self-contained and can be used offline.
        If 'cdn', a script tag that references the plotly.js CDN is included
        in the output. HTML files generated with this option are about 3MB
        smaller than those generated with include_plotlyjs=True, but they
        require an active internet connection in order to load the plotly.js
        library.
        If 'directory', a script tag is included that references an external
        plotly.min.js bundle that is assumed to reside in the same
        directory as the HTML file.  If output_type='file' then the
        plotly.min.js bundle is copied into the directory of the resulting
        HTML file. If a file named plotly.min.js already exists in the output
        directory then this file is left unmodified and no copy is performed.
        HTML files generated with this option can be used offline, but they
        require a copy of the plotly.min.js bundle in the same directory.
        This option is useful when many figures will be saved as HTML files in
        the same directory because the plotly.js source code will be included
        only once per output directory, rather than once per output file.
        If a string that ends in '.js', a script tag is included that
        references the specified path. This approach can be used to point
        the resulting HTML file to an alternative CDN.
        If False, no script tag referencing plotly.js is included. This is
        useful when output_type='div' and the resulting div string will be
        placed inside an HTML document that already loads plotly.js.  This
        option is not advised when output_type='file' as it will result in
        a non-functional html file.
    filename (default='temp-plot.html') -- The local filename to save the
        outputted chart to. If the filename already exists, it will be
        overwritten. This argument only applies if `output_type` is 'file'.
    auto_open (default=True) -- If True, open the saved file in a
        web browser after saving.
        This argument only applies if `output_type` is 'file'.
    image (default=None |'png' |'jpeg' |'svg' |'webp') -- This parameter sets
        the format of the image to be downloaded, if we choose to download an
        image. This parameter has a default value of None indicating that no
        image should be downloaded. Please note: for higher resolution images
        and more export options, consider making requests to our image servers.
        Type: `help(py.image)` for more details.
    image_filename (default='plot_image') -- Sets the name of the file your
        image will be saved to. The extension should not be included.
    image_height (default=600) -- Specifies the height of the image in `px`.
    image_width (default=800) -- Specifies the width of the image in `px`.
    config (default=None) -- Plot view options dictionary. Keyword arguments
        `show_link` and `link_text` set the associated options in this
        dictionary if it doesn't contain them already.
    include_mathjax (False | 'cdn' | path - default=False) --
        Specifies how the MathJax.js library is included in the output html
        file or div string.  MathJax is required in order to display labels
        with LaTeX typesetting.
        If False, no script tag referencing MathJax.js will be included in the
        output. HTML files generated with this option will not be able to
        display LaTeX typesetting.
        If 'cdn', a script tag that references a MathJax CDN location will be
        included in the output.  HTML files generated with this option will be
        able to display LaTeX typesetting as long as they have internet access.
        If a string that ends in '.js', a script tag is included that
        references the specified path. This approach can be used to point the
        resulting HTML file to an alternative CDN.
    auto_play (default=True) -- Whether to automatically start the animation
        sequence on page load if the figure contains frames. Has no effect if
        the figure does not contain frames.
    animation_opts (default=None) -- Dict of custom animation parameters that
        are used for the automatically started animation on page load. This
        dict is passed to the function Plotly.animate in Plotly.js. See
        https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
        for available options. Has no effect if the figure
        does not contain frames, or auto_play is False.
    Example:
    ```
    from plotly.offline import plot
    figure = {'data': [{'x': [0, 1], 'y': [0, 1]}],
              'layout': {'xaxis': {'range': [0, 5], 'autorange': False},
                         'yaxis': {'range': [0, 5], 'autorange': False},
                         'title': 'Start Title'},
              'frames': [{'data': [{'x': [1, 2], 'y': [1, 2]}]},
                         {'data': [{'x': [1, 4], 'y': [1, 4]}]},
                         {'data': [{'x': [3, 4], 'y': [3, 4]}],
                          'layout': {'title': 'End Title'}}]}
    plot(figure, animation_opts={'frame': {'duration': 1}})
    ```
    """
    import plotly.io as pio
    # Output type
    if output_type not in ["div", "file"]:
        raise ValueError(
            "`output_type` argument must be 'div' or 'file'. "
            "You supplied `" + output_type + "``"
        )
    if not filename.endswith(".html") and output_type == "file":
        warnings.warn(
            "Your filename `" + filename + "` didn't end with .html. "
            "Adding .html to the end of your file."
        )
        filename += ".html"
    # Config
    config = dict(config) if config else {}
    config.setdefault("showLink", show_link)
    config.setdefault("linkText", link_text)
    figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)
    width = figure.get("layout", {}).get("width", "100%")
    height = figure.get("layout", {}).get("height", "100%")
    if width == "100%" or height == "100%":
        config.setdefault("responsive", True)
    # Handle image request
    post_script = build_save_image_post_script(
        image, image_filename, image_height, image_width, "plot"
    )
    if output_type == "file":
        pio.write_html(
            figure,
            filename,
            config=config,
            auto_play=auto_play,
            include_plotlyjs=include_plotlyjs,
            include_mathjax=include_mathjax,
            post_script=post_script,
            full_html=True,
            validate=validate,
            animation_opts=animation_opts,
            auto_open=auto_open,
        )
        return filename
    else:
        return pio.to_html(
            figure,
            config=config,
            auto_play=auto_play,
            include_plotlyjs=include_plotlyjs,
            include_mathjax=include_mathjax,
            post_script=post_script,
            full_html=False,
            validate=validate,
            animation_opts=animation_opts,
        )
 
 | 
	offline.plot 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	72 
 | 
	packages/python/plotly/_plotly_utils/png.py 
 | 
	    def __init__(
        self,
        width=None,
        height=None,
        size=None,
        greyscale=Default,
        alpha=False,
        bitdepth=8,
        palette=None,
        transparent=None,
        background=None,
        gamma=None,
        compression=None,
        interlace=False,
        planes=None,
        colormap=None,
        maxval=None,
        chunk_limit=2**20,
        x_pixels_per_unit=None,
        y_pixels_per_unit=None,
        unit_is_meter=False,
    ):
        """
        Create a PNG encoder object.
        Arguments:
        width, height
          Image size in pixels, as two separate arguments.
        size
          Image size (w,h) in pixels, as single argument.
        greyscale
          Pixels are greyscale, not RGB.
        alpha
          Input data has alpha channel (RGBA or LA).
        bitdepth
          Bit depth: from 1 to 16 (for each channel).
        palette
          Create a palette for a colour mapped image (colour type 3).
        transparent
          Specify a transparent colour (create a ``tRNS`` chunk).
        background
          Specify a default background colour (create a ``bKGD`` chunk).
        gamma
          Specify a gamma value (create a ``gAMA`` chunk).
        compression
          zlib compression level: 0 (none) to 9 (more compressed);
          default: -1 or None.
        interlace
          Create an interlaced image.
        chunk_limit
          Write multiple ``IDAT`` chunks to save memory.
        x_pixels_per_unit
          Number of pixels a unit along the x axis (write a
          `pHYs` chunk).
        y_pixels_per_unit
          Number of pixels a unit along the y axis (write a
          `pHYs` chunk). Along with `x_pixel_unit`, this gives
          the pixel size ratio.
        unit_is_meter
          `True` to indicate that the unit (for the `pHYs`
          chunk) is metre.
        The image size (in pixels) can be specified either by using the
        `width` and `height` arguments, or with the single `size`
        argument.
        If `size` is used it should be a pair (*width*, *height*).
        The `greyscale` argument indicates whether input pixels
        are greyscale (when true), or colour (when false).
        The default is true unless `palette=` is used.
        The `alpha` argument (a boolean) specifies
        whether input pixels have an alpha channel (or not).
        `bitdepth` specifies the bit depth of the source pixel values.
        Each channel may have a different bit depth.
        Each source pixel must have values that are
        an integer between 0 and ``2**bitdepth-1``, where
        `bitdepth` is the bit depth for the corresponding channel.
        For example, 8-bit images have values between 0 and 255.
        PNG only stores images with bit depths of
        1,2,4,8, or 16 (the same for all channels).
        When `bitdepth` is not one of these values or where
        channels have different bit depths,
        the next highest valid bit depth is selected,
        and an ``sBIT`` (significant bits) chunk is generated
        that specifies the original precision of the source image.
        In this case the supplied pixel values will be rescaled to
        fit the range of the selected bit depth.
        The PNG file format supports many bit depth / colour model
        combinations, but not all.
        The details are somewhat arcane
        (refer to the PNG specification for full details).
        Briefly:
        Bit depths < 8 (1,2,4) are only allowed with greyscale and
        colour mapped images;
        colour mapped images cannot have bit depth 16.
        For colour mapped images
        (in other words, when the `palette` argument is specified)
        the `bitdepth` argument must match one of
        the valid PNG bit depths: 1, 2, 4, or 8.
        (It is valid to have a PNG image with a palette and
        an ``sBIT`` chunk, but the meaning is slightly different;
        it would be awkward to use the `bitdepth` argument for this.)
        The `palette` option, when specified,
        causes a colour mapped image to be created:
        the PNG colour type is set to 3;
        `greyscale` must not be true; `alpha` must not be true;
        `transparent` must not be set.
        The bit depth must be 1,2,4, or 8.
        When a colour mapped image is created,
        the pixel values are palette indexes and
        the `bitdepth` argument specifies the size of these indexes
        (not the size of the colour values in the palette).
        The palette argument value should be a sequence of 3- or
        4-tuples.
        3-tuples specify RGB palette entries;
        4-tuples specify RGBA palette entries.
        All the 4-tuples (if present) must come before all the 3-tuples.
        A ``PLTE`` chunk is created;
        if there are 4-tuples then a ``tRNS`` chunk is created as well.
        The ``PLTE`` chunk will contain all the RGB triples in the same
        sequence;
        the ``tRNS`` chunk will contain the alpha channel for
        all the 4-tuples, in the same sequence.
        Palette entries are always 8-bit.
        If specified, the `transparent` and `background` parameters must be
        a tuple with one element for each channel in the image.
        Either a 3-tuple of integer (RGB) values for a colour image, or
        a 1-tuple of a single integer for a greyscale image.
        If specified, the `gamma` parameter must be a positive number
        (generally, a `float`).
        A ``gAMA`` chunk will be created.
        Note that this will not change the values of the pixels as
        they appear in the PNG file,
        they are assumed to have already
        been converted appropriately for the gamma specified.
        The `compression` argument specifies the compression level to
        be used by the ``zlib`` module.
        Values from 1 to 9 (highest) specify compression.
        0 means no compression.
        -1 and ``None`` both mean that the ``zlib`` module uses
        the default level of compession (which is generally acceptable).
        If `interlace` is true then an interlaced image is created
        (using PNG's so far only interace method, *Adam7*).
        This does not affect how the pixels should be passed in,
        rather it changes how they are arranged into the PNG file.
        On slow connexions interlaced images can be
        partially decoded by the browser to give
        a rough view of the image that is
        successively refined as more image data appears.
        .. note ::
          Enabling the `interlace` option requires the entire image
          to be processed in working memory.
        `chunk_limit` is used to limit the amount of memory used whilst
        compressing the image.
        In order to avoid using large amounts of memory,
        multiple ``IDAT`` chunks may be created.
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_png.Writer.__init__.txt 
 | 
	    def __init__(
        self,
        width=None,
        height=None,
        size=None,
        greyscale=Default,
        alpha=False,
        bitdepth=8,
        palette=None,
        transparent=None,
        background=None,
        gamma=None,
        compression=None,
        interlace=False,
        planes=None,
        colormap=None,
        maxval=None,
        chunk_limit=2**20,
        x_pixels_per_unit=None,
        y_pixels_per_unit=None,
        unit_is_meter=False,
    ):
        """
        Create a PNG encoder object.
        Arguments:
        width, height
          Image size in pixels, as two separate arguments.
        size
          Image size (w,h) in pixels, as single argument.
        greyscale
          Pixels are greyscale, not RGB.
        alpha
          Input data has alpha channel (RGBA or LA).
        bitdepth
          Bit depth: from 1 to 16 (for each channel).
        palette
          Create a palette for a colour mapped image (colour type 3).
        transparent
          Specify a transparent colour (create a ``tRNS`` chunk).
        background
          Specify a default background colour (create a ``bKGD`` chunk).
        gamma
          Specify a gamma value (create a ``gAMA`` chunk).
        compression
          zlib compression level: 0 (none) to 9 (more compressed);
          default: -1 or None.
        interlace
          Create an interlaced image.
        chunk_limit
          Write multiple ``IDAT`` chunks to save memory.
        x_pixels_per_unit
          Number of pixels a unit along the x axis (write a
          `pHYs` chunk).
        y_pixels_per_unit
          Number of pixels a unit along the y axis (write a
          `pHYs` chunk). Along with `x_pixel_unit`, this gives
          the pixel size ratio.
        unit_is_meter
          `True` to indicate that the unit (for the `pHYs`
          chunk) is metre.
        The image size (in pixels) can be specified either by using the
        `width` and `height` arguments, or with the single `size`
        argument.
        If `size` is used it should be a pair (*width*, *height*).
        The `greyscale` argument indicates whether input pixels
        are greyscale (when true), or colour (when false).
        The default is true unless `palette=` is used.
        The `alpha` argument (a boolean) specifies
        whether input pixels have an alpha channel (or not).
        `bitdepth` specifies the bit depth of the source pixel values.
        Each channel may have a different bit depth.
        Each source pixel must have values that are
        an integer between 0 and ``2**bitdepth-1``, where
        `bitdepth` is the bit depth for the corresponding channel.
        For example, 8-bit images have values between 0 and 255.
        PNG only stores images with bit depths of
        1,2,4,8, or 16 (the same for all channels).
        When `bitdepth` is not one of these values or where
        channels have different bit depths,
        the next highest valid bit depth is selected,
        and an ``sBIT`` (significant bits) chunk is generated
        that specifies the original precision of the source image.
        In this case the supplied pixel values will be rescaled to
        fit the range of the selected bit depth.
        The PNG file format supports many bit depth / colour model
        combinations, but not all.
        The details are somewhat arcane
        (refer to the PNG specification for full details).
        Briefly:
        Bit depths < 8 (1,2,4) are only allowed with greyscale and
        colour mapped images;
        colour mapped images cannot have bit depth 16.
        For colour mapped images
        (in other words, when the `palette` argument is specified)
        the `bitdepth` argument must match one of
        the valid PNG bit depths: 1, 2, 4, or 8.
        (It is valid to have a PNG image with a palette and
        an ``sBIT`` chunk, but the meaning is slightly different;
        it would be awkward to use the `bitdepth` argument for this.)
        The `palette` option, when specified,
        causes a colour mapped image to be created:
        the PNG colour type is set to 3;
        `greyscale` must not be true; `alpha` must not be true;
        `transparent` must not be set.
        The bit depth must be 1,2,4, or 8.
        When a colour mapped image is created,
        the pixel values are palette indexes and
        the `bitdepth` argument specifies the size of these indexes
        (not the size of the colour values in the palette).
        The palette argument value should be a sequence of 3- or
        4-tuples.
        3-tuples specify RGB palette entries;
        4-tuples specify RGBA palette entries.
        All the 4-tuples (if present) must come before all the 3-tuples.
        A ``PLTE`` chunk is created;
        if there are 4-tuples then a ``tRNS`` chunk is created as well.
        The ``PLTE`` chunk will contain all the RGB triples in the same
        sequence;
        the ``tRNS`` chunk will contain the alpha channel for
        all the 4-tuples, in the same sequence.
        Palette entries are always 8-bit.
        If specified, the `transparent` and `background` parameters must be
        a tuple with one element for each channel in the image.
        Either a 3-tuple of integer (RGB) values for a colour image, or
        a 1-tuple of a single integer for a greyscale image.
        If specified, the `gamma` parameter must be a positive number
        (generally, a `float`).
        A ``gAMA`` chunk will be created.
        Note that this will not change the values of the pixels as
        they appear in the PNG file,
        they are assumed to have already
        been converted appropriately for the gamma specified.
        The `compression` argument specifies the compression level to
        be used by the ``zlib`` module.
        Values from 1 to 9 (highest) specify compression.
        0 means no compression.
        -1 and ``None`` both mean that the ``zlib`` module uses
        the default level of compession (which is generally acceptable).
        If `interlace` is true then an interlaced image is created
        (using PNG's so far only interace method, *Adam7*).
        This does not affect how the pixels should be passed in,
        rather it changes how they are arranged into the PNG file.
        On slow connexions interlaced images can be
        partially decoded by the browser to give
        a rough view of the image that is
        successively refined as more image data appears.
        .. note ::
          Enabling the `interlace` option requires the entire image
          to be processed in working memory.
        `chunk_limit` is used to limit the amount of memory used whilst
        compressing the image.
        In order to avoid using large amounts of memory,
        multiple ``IDAT`` chunks may be created.
        """
        # At the moment the `planes` argument is ignored;
        # its purpose is to act as a dummy so that
        # ``Writer(x, y, **info)`` works, where `info` is a dictionary
        # returned by Reader.read and friends.
        # Ditto for `colormap`.
        width, height = check_sizes(size, width, height)
        del size
        if not is_natural(width) or not is_natural(height):
            raise ProtocolError("width and height must be integers")
        if width <= 0 or height <= 0:
            raise ProtocolError("width and height must be greater than zero")
        # http://www.w3.org/TR/PNG/#7Integers-and-byte-order
        if width > 2**31 - 1 or height > 2**31 - 1:
            raise ProtocolError("width and height cannot exceed 2**31-1")
        if alpha and transparent is not None:
            raise ProtocolError("transparent colour not allowed with alpha channel")
        # bitdepth is either single integer, or tuple of integers.
        # Convert to tuple.
        try:
            len(bitdepth)
        except TypeError:
            bitdepth = (bitdepth,)
        for b in bitdepth:
            valid = is_natural(b) and 1 <= b <= 16
            if not valid:
                raise ProtocolError(
                    "each bitdepth %r must be a positive integer <= 16" % (bitdepth,)
                )
        # Calculate channels, and
        # expand bitdepth to be one element per channel.
        palette = check_palette(palette)
        alpha = bool(alpha)
        colormap = bool(palette)
        if greyscale is Default and palette:
            greyscale = False
        greyscale = bool(greyscale)
        if colormap:
            color_planes = 1
            planes = 1
        else:
            color_planes = (3, 1)[greyscale]
            planes = color_planes + alpha
        if len(bitdepth) == 1:
            bitdepth *= planes
        bitdepth, self.rescale = check_bitdepth_rescale(
            palette, bitdepth, transparent, alpha, greyscale
        )
        # These are assertions, because above logic should have
        # corrected or raised all problematic cases.
        if bitdepth < 8:
            assert greyscale or palette
            assert not alpha
        if bitdepth > 8:
            assert not palette
        transparent = check_color(transparent, greyscale, "transparent")
        background = check_color(background, greyscale, "background")
        # It's important that the true boolean values
        # (greyscale, alpha, colormap, interlace) are converted
        # to bool because Iverson's convention is relied upon later on.
        self.width = width
        self.height = height
        self.transparent = transparent
        self.background = background
        self.gamma = gamma
        self.greyscale = greyscale
        self.alpha = alpha
        self.colormap = colormap
        self.bitdepth = int(bitdepth)
        self.compression = compression
        self.chunk_limit = chunk_limit
        self.interlace = bool(interlace)
        self.palette = palette
        self.x_pixels_per_unit = x_pixels_per_unit
        self.y_pixels_per_unit = y_pixels_per_unit
        self.unit_is_meter = bool(unit_is_meter)
        self.color_type = 4 * self.alpha + 2 * (not greyscale) + 1 * self.colormap
        assert self.color_type in (0, 2, 3, 4, 6)
        self.color_planes = color_planes
        self.planes = planes
        # :todo: fix for bitdepth < 8
        self.psize = (self.bitdepth / 8) * self.planes
 
 | 
	png.Writer.__init__ 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	73 
 | 
	packages/python/plotly/_plotly_utils/png.py 
 | 
	def from_array(a, mode=None, info={}):
    """
    Create a PNG :class:`Image` object from a 2-dimensional array.
    One application of this function is easy PIL-style saving:
    ``png.from_array(pixels, 'L').save('foo.png')``.
    Unless they are specified using the *info* parameter,
    the PNG's height and width are taken from the array size.
    The first axis is the height; the second axis is the
    ravelled width and channel index.
    The array is treated is a sequence of rows,
    each row being a sequence of values (``width*channels`` in number).
    So an RGB image that is 16 pixels high and 8 wide will
    occupy a 2-dimensional array that is 16x24
    (each row will be 8*3 = 24 sample values).
    *mode* is a string that specifies the image colour format in a
    PIL-style mode.  It can be:
    ``'L'``
      greyscale (1 channel)
    ``'LA'``
      greyscale with alpha (2 channel)
    ``'RGB'``
      colour image (3 channel)
    ``'RGBA'``
      colour image with alpha (4 channel)
    The mode string can also specify the bit depth
    (overriding how this function normally derives the bit depth,
    see below).
    Appending ``';16'`` to the mode will cause the PNG to be
    16 bits per channel;
    any decimal from 1 to 16 can be used to specify the bit depth.
    When a 2-dimensional array is used *mode* determines how many
    channels the image has, and so allows the width to be derived from
    the second array dimension.
    The array is expected to be a ``numpy`` array,
    but it can be any suitable Python sequence.
    For example, a list of lists can be used:
    ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.
    The exact rules are: ``len(a)`` gives the first dimension, height;
    ``len(a[0])`` gives the second dimension.
    It's slightly more complicated than that because
    an iterator of rows can be used, and it all still works.
    Using an iterator allows data to be streamed efficiently.
    The bit depth of the PNG is normally taken from
    the array element's datatype
    (but if *mode* specifies a bitdepth then that is used instead).
    The array element's datatype is determined in a way which
    is supposed to work both for ``numpy`` arrays and for Python
    ``array.array`` objects.
    A 1 byte datatype will give a bit depth of 8,
    a 2 byte datatype will give a bit depth of 16.
    If the datatype does not have an implicit size,
    like the above example where it is a plain Python list of lists,
    then a default of 8 is used.
    The *info* parameter is a dictionary that can
    be used to specify metadata (in the same style as
    the arguments to the :class:`png.Writer` class).
    For this function the keys that are useful are:
    height
      overrides the height derived from the array dimensions and
      allows *a* to be an iterable.
    width
      overrides the width derived from the array dimensions.
    bitdepth
      overrides the bit depth derived from the element datatype
      (but must match *mode* if that also specifies a bit depth).
    Generally anything specified in the *info* dictionary will
    override any implicit choices that this function would otherwise make,
    but must match any explicit ones.
    For example, if the *info* dictionary has a ``greyscale`` key then
    this must be true when mode is ``'L'`` or ``'LA'`` and
    false when mode is ``'RGB'`` or ``'RGBA'``.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_png.from_array.txt 
 | 
	def from_array(a, mode=None, info={}):
    """
    Create a PNG :class:`Image` object from a 2-dimensional array.
    One application of this function is easy PIL-style saving:
    ``png.from_array(pixels, 'L').save('foo.png')``.
    Unless they are specified using the *info* parameter,
    the PNG's height and width are taken from the array size.
    The first axis is the height; the second axis is the
    ravelled width and channel index.
    The array is treated is a sequence of rows,
    each row being a sequence of values (``width*channels`` in number).
    So an RGB image that is 16 pixels high and 8 wide will
    occupy a 2-dimensional array that is 16x24
    (each row will be 8*3 = 24 sample values).
    *mode* is a string that specifies the image colour format in a
    PIL-style mode.  It can be:
    ``'L'``
      greyscale (1 channel)
    ``'LA'``
      greyscale with alpha (2 channel)
    ``'RGB'``
      colour image (3 channel)
    ``'RGBA'``
      colour image with alpha (4 channel)
    The mode string can also specify the bit depth
    (overriding how this function normally derives the bit depth,
    see below).
    Appending ``';16'`` to the mode will cause the PNG to be
    16 bits per channel;
    any decimal from 1 to 16 can be used to specify the bit depth.
    When a 2-dimensional array is used *mode* determines how many
    channels the image has, and so allows the width to be derived from
    the second array dimension.
    The array is expected to be a ``numpy`` array,
    but it can be any suitable Python sequence.
    For example, a list of lists can be used:
    ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.
    The exact rules are: ``len(a)`` gives the first dimension, height;
    ``len(a[0])`` gives the second dimension.
    It's slightly more complicated than that because
    an iterator of rows can be used, and it all still works.
    Using an iterator allows data to be streamed efficiently.
    The bit depth of the PNG is normally taken from
    the array element's datatype
    (but if *mode* specifies a bitdepth then that is used instead).
    The array element's datatype is determined in a way which
    is supposed to work both for ``numpy`` arrays and for Python
    ``array.array`` objects.
    A 1 byte datatype will give a bit depth of 8,
    a 2 byte datatype will give a bit depth of 16.
    If the datatype does not have an implicit size,
    like the above example where it is a plain Python list of lists,
    then a default of 8 is used.
    The *info* parameter is a dictionary that can
    be used to specify metadata (in the same style as
    the arguments to the :class:`png.Writer` class).
    For this function the keys that are useful are:
    height
      overrides the height derived from the array dimensions and
      allows *a* to be an iterable.
    width
      overrides the width derived from the array dimensions.
    bitdepth
      overrides the bit depth derived from the element datatype
      (but must match *mode* if that also specifies a bit depth).
    Generally anything specified in the *info* dictionary will
    override any implicit choices that this function would otherwise make,
    but must match any explicit ones.
    For example, if the *info* dictionary has a ``greyscale`` key then
    this must be true when mode is ``'L'`` or ``'LA'`` and
    false when mode is ``'RGB'`` or ``'RGBA'``.
    """
    # We abuse the *info* parameter by modifying it.  Take a copy here.
    # (Also typechecks *info* to some extent).
    info = dict(info)
    # Syntax check mode string.
    match = RegexModeDecode.match(mode)
    if not match:
        raise Error("mode string should be 'RGB' or 'L;16' or similar.")
    mode, bitdepth = match.groups()
    if bitdepth:
        bitdepth = int(bitdepth)
    # Colour format.
    if "greyscale" in info:
        if bool(info["greyscale"]) != ("L" in mode):
            raise ProtocolError("info['greyscale'] should match mode.")
    info["greyscale"] = "L" in mode
    alpha = "A" in mode
    if "alpha" in info:
        if bool(info["alpha"]) != alpha:
            raise ProtocolError("info['alpha'] should match mode.")
    info["alpha"] = alpha
    # Get bitdepth from *mode* if possible.
    if bitdepth:
        if info.get("bitdepth") and bitdepth != info["bitdepth"]:
            raise ProtocolError(
                "bitdepth (%d) should match bitdepth of info (%d)."
                % (bitdepth, info["bitdepth"])
            )
        info["bitdepth"] = bitdepth
    # Fill in and/or check entries in *info*.
    # Dimensions.
    width, height = check_sizes(info.get("size"), info.get("width"), info.get("height"))
    if width:
        info["width"] = width
    if height:
        info["height"] = height
    if "height" not in info:
        try:
            info["height"] = len(a)
        except TypeError:
            raise ProtocolError("len(a) does not work, supply info['height'] instead.")
    planes = len(mode)
    if "planes" in info:
        if info["planes"] != planes:
            raise Error("info['planes'] should match mode.")
    # In order to work out whether we the array is 2D or 3D we need its
    # first row, which requires that we take a copy of its iterator.
    # We may also need the first row to derive width and bitdepth.
    a, t = itertools.tee(a)
    row = next(t)
    del t
    testelement = row
    if "width" not in info:
        width = len(row) // planes
        info["width"] = width
    if "bitdepth" not in info:
        try:
            dtype = testelement.dtype
            # goto the "else:" clause.  Sorry.
        except AttributeError:
            try:
                # Try a Python array.array.
                bitdepth = 8 * testelement.itemsize
            except AttributeError:
                # We can't determine it from the array element's datatype,
                # use a default of 8.
                bitdepth = 8
        else:
            # If we got here without exception,
            # we now assume that the array is a numpy array.
            if dtype.kind == "b":
                bitdepth = 1
            else:
                bitdepth = 8 * dtype.itemsize
        info["bitdepth"] = bitdepth
    for thing in ["width", "height", "bitdepth", "greyscale", "alpha"]:
        assert thing in info
    return Image(a, info)
 
 | 
	png.from_array 
 | 
	File-Level 
 | 
					
	plotly.py 
 | 
	74 
 | 
	packages/python/plotly/plotly/subplots.py 
 | 
	def make_subplots(
    rows=1,
    cols=1,
    shared_xaxes=False,
    shared_yaxes=False,
    start_cell="top-left",
    print_grid=False,
    horizontal_spacing=None,
    vertical_spacing=None,
    subplot_titles=None,
    column_widths=None,
    row_heights=None,
    specs=None,
    insets=None,
    column_titles=None,
    row_titles=None,
    x_title=None,
    y_title=None,
    figure=None,
    **kwargs,
) -> go.Figure:
    """
    Return an instance of plotly.graph_objs.Figure with predefined subplots
    configured in 'layout'.
    Parameters
    ----------
    rows: int (default 1)
        Number of rows in the subplot grid. Must be greater than zero.
    cols: int (default 1)
        Number of columns in the subplot grid. Must be greater than zero.
    shared_xaxes: boolean or str (default False)
        Assign shared (linked) x-axes for 2D cartesian subplots
          - True or 'columns': Share axes among subplots in the same column
          - 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    shared_yaxes: boolean or str (default False)
        Assign shared (linked) y-axes for 2D cartesian subplots
          - 'columns': Share axes among subplots in the same column
          - True or 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    start_cell: 'bottom-left' or 'top-left' (default 'top-left')
        Choose the starting cell in the subplot grid used to set the
        domains_grid of the subplots.
          - 'top-left': Subplots are numbered with (1, 1) in the top
                        left corner
          - 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
                           left corner
    print_grid: boolean (default True):
        If True, prints a string representation of the plot grid.  Grid may
        also be printed using the `Figure.print_grid()` method on the
        resulting figure.
    horizontal_spacing: float (default 0.2 / cols)
        Space between subplot columns in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all columns (use 'specs' subplot-dependents spacing)
    vertical_spacing: float (default 0.3 / rows)
        Space between subplot rows in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all rows (use 'specs' subplot-dependents spacing)
    subplot_titles: list of str or None (default None)
        Title of each subplot as a list in row-major ordering.
        Empty strings ("") can be included in the list if no subplot title
        is desired in that space so that the titles are properly indexed.
    specs: list of lists of dict or None (default None)
        Per subplot specifications of subplot type, row/column spanning, and
        spacing.
        ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
        ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
        - Indices of the outer list correspond to subplot grid rows
          starting from the top, if start_cell='top-left',
          or bottom, if start_cell='bottom-left'.
          The number of rows in 'specs' must be equal to 'rows'.
        - Indices of the inner lists correspond to subplot grid columns
          starting from the left. The number of columns in 'specs'
          must be equal to 'cols'.
        - Each item in the 'specs' list corresponds to one subplot
          in a subplot grid. (N.B. The subplot grid has exactly 'rows'
          times 'cols' cells.)
        - Use None for a blank a subplot cell (or to move past a col/row span).
        - Note that specs[0][0] has the specs of the 'start_cell' subplot.
        - Each item in 'specs' is a dictionary.
            The available keys are:
            * type (string, default 'xy'): Subplot type. One of
                - 'xy': 2D Cartesian subplot type for scatter, bar, etc.
                - 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
                - 'polar': Polar subplot for scatterpolar, barpolar, etc.
                - 'ternary': Ternary subplot for scatterternary
                - 'map': Map subplot for scattermap
                - 'mapbox': Mapbox subplot for scattermapbox
                - 'domain': Subplot type for traces that are individually
                            positioned. pie, parcoords, parcats, etc.
                - trace type: A trace type which will be used to determine
                              the appropriate subplot type for that trace
            * secondary_y (bool, default False): If True, create a secondary
                y-axis positioned on the right side of the subplot. Only valid
                if type='xy'.
            * colspan (int, default 1): number of subplot columns
                for this subplot to span.
            * rowspan (int, default 1): number of subplot rows
                for this subplot to span.
            * l (float, default 0.0): padding left of cell
            * r (float, default 0.0): padding right of cell
            * t (float, default 0.0): padding right of cell
            * b (float, default 0.0): padding bottom of cell
        - Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
          the spacing in between the subplots.
    insets: list of dict or None (default None):
        Inset specifications.  Insets are subplots that overlay grid subplots
        - Each item in 'insets' is a dictionary.
            The available keys are:
            * cell (tuple, default=(1,1)): (row, col) index of the
                subplot cell to overlay inset axes onto.
            * type (string, default 'xy'): Subplot type
            * l (float, default=0.0): padding left of inset
                  in fraction of cell width
            * w (float or 'to_end', default='to_end') inset width
                  in fraction of cell width ('to_end': to cell right edge)
            * b (float, default=0.0): padding bottom of inset
                  in fraction of cell height
            * h (float or 'to_end', default='to_end') inset height
                  in fraction of cell height ('to_end': to cell top edge)
    column_widths: list of numbers or None (default None)
        list of length `cols` of the relative widths of each column of subplots.
        Values are normalized internally and used to distribute overall width
        of the figure (excluding padding) among the columns.
        For backward compatibility, may also be specified using the
        `column_width` keyword argument.
    row_heights: list of numbers or None (default None)
        list of length `rows` of the relative heights of each row of subplots.
        If start_cell='top-left' then row heights are applied top to bottom.
        Otherwise, if start_cell='bottom-left' then row heights are applied
        bottom to top.
        For backward compatibility, may also be specified using the
        `row_width` kwarg. If specified as `row_width`, then the width values
        are applied from bottom to top regardless of the value of start_cell.
        This matches the legacy behavior of the `row_width` argument.
    column_titles: list of str or None (default None)
        list of length `cols` of titles to place above the top subplot in
        each column.
    row_titles: list of str or None (default None)
        list of length `rows` of titles to place on the right side of each
        row of subplots. If start_cell='top-left' then row titles are
        applied top to bottom. Otherwise, if start_cell='bottom-left' then
        row titles are applied bottom to top.
    x_title: str or None (default None)
        Title to place below the bottom row of subplots,
        centered horizontally
    y_title: str or None (default None)
        Title to place to the left of the left column of subplots,
        centered vertically
    figure: go.Figure or None (default None)
        If None, a new go.Figure instance will be created and its axes will be
        populated with those corresponding to the requested subplot geometry and
        this new figure will be returned.
        If a go.Figure instance, the axes will be added to the
        layout of this figure and this figure will be returned. If the figure
        already contains axes, they will be overwritten.
    Examples
    --------
    Example 1:
    >>> # Stack two subplots vertically, and add a scatter trace to each
    >>> from plotly.subplots import make_subplots
    >>> import plotly.graph_objects as go
    >>> fig = make_subplots(rows=2)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    or see Figure.append_trace
    Example 2:
    >>> # Stack a scatter plot
    >>> fig = make_subplots(rows=2, shared_xaxes=True)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 3:
    >>> # irregular subplot layout (more examples below under 'specs')
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{}, {}],
    ...                     [{'colspan': 2}, None]])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]  [ (1,2) xaxis2,yaxis2 ]
    [ (2,1) xaxis3,yaxis3           -              ]
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 4:
    >>> # insets
    >>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    With insets:
    [ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
    Figure(...)
    Example 5:
    >>> # include subplot titles
    >>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 6:
    Subplot with mixed subplot types
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{'type': 'xy'},    {'type': 'polar'}],
    ...                            [{'type': 'scene'}, {'type': 'ternary'}]])
    >>> fig.add_traces(
    ...     [go.Scatter(y=[2, 3, 1]),
    ...      go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
    ...      go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
    ...      go.Scatterternary(a=[0.1, 0.2, 0.1],
    ...                        b=[0.2, 0.3, 0.1],
    ...                        c=[0.7, 0.5, 0.8])],
    ...     rows=[1, 1, 2, 2],
    ...     cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
    Figure(...)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_subplots.make_subplots.txt 
 | 
	def make_subplots(
    rows=1,
    cols=1,
    shared_xaxes=False,
    shared_yaxes=False,
    start_cell="top-left",
    print_grid=False,
    horizontal_spacing=None,
    vertical_spacing=None,
    subplot_titles=None,
    column_widths=None,
    row_heights=None,
    specs=None,
    insets=None,
    column_titles=None,
    row_titles=None,
    x_title=None,
    y_title=None,
    figure=None,
    **kwargs,
) -> go.Figure:
    """
    Return an instance of plotly.graph_objs.Figure with predefined subplots
    configured in 'layout'.
    Parameters
    ----------
    rows: int (default 1)
        Number of rows in the subplot grid. Must be greater than zero.
    cols: int (default 1)
        Number of columns in the subplot grid. Must be greater than zero.
    shared_xaxes: boolean or str (default False)
        Assign shared (linked) x-axes for 2D cartesian subplots
          - True or 'columns': Share axes among subplots in the same column
          - 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    shared_yaxes: boolean or str (default False)
        Assign shared (linked) y-axes for 2D cartesian subplots
          - 'columns': Share axes among subplots in the same column
          - True or 'rows': Share axes among subplots in the same row
          - 'all': Share axes across all subplots in the grid.
    start_cell: 'bottom-left' or 'top-left' (default 'top-left')
        Choose the starting cell in the subplot grid used to set the
        domains_grid of the subplots.
          - 'top-left': Subplots are numbered with (1, 1) in the top
                        left corner
          - 'bottom-left': Subplots are numbererd with (1, 1) in the bottom
                           left corner
    print_grid: boolean (default True):
        If True, prints a string representation of the plot grid.  Grid may
        also be printed using the `Figure.print_grid()` method on the
        resulting figure.
    horizontal_spacing: float (default 0.2 / cols)
        Space between subplot columns in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all columns (use 'specs' subplot-dependents spacing)
    vertical_spacing: float (default 0.3 / rows)
        Space between subplot rows in normalized plot coordinates. Must be
        a float between 0 and 1.
        Applies to all rows (use 'specs' subplot-dependents spacing)
    subplot_titles: list of str or None (default None)
        Title of each subplot as a list in row-major ordering.
        Empty strings ("") can be included in the list if no subplot title
        is desired in that space so that the titles are properly indexed.
    specs: list of lists of dict or None (default None)
        Per subplot specifications of subplot type, row/column spanning, and
        spacing.
        ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
        ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
        - Indices of the outer list correspond to subplot grid rows
          starting from the top, if start_cell='top-left',
          or bottom, if start_cell='bottom-left'.
          The number of rows in 'specs' must be equal to 'rows'.
        - Indices of the inner lists correspond to subplot grid columns
          starting from the left. The number of columns in 'specs'
          must be equal to 'cols'.
        - Each item in the 'specs' list corresponds to one subplot
          in a subplot grid. (N.B. The subplot grid has exactly 'rows'
          times 'cols' cells.)
        - Use None for a blank a subplot cell (or to move past a col/row span).
        - Note that specs[0][0] has the specs of the 'start_cell' subplot.
        - Each item in 'specs' is a dictionary.
            The available keys are:
            * type (string, default 'xy'): Subplot type. One of
                - 'xy': 2D Cartesian subplot type for scatter, bar, etc.
                - 'scene': 3D Cartesian subplot for scatter3d, cone, etc.
                - 'polar': Polar subplot for scatterpolar, barpolar, etc.
                - 'ternary': Ternary subplot for scatterternary
                - 'map': Map subplot for scattermap
                - 'mapbox': Mapbox subplot for scattermapbox
                - 'domain': Subplot type for traces that are individually
                            positioned. pie, parcoords, parcats, etc.
                - trace type: A trace type which will be used to determine
                              the appropriate subplot type for that trace
            * secondary_y (bool, default False): If True, create a secondary
                y-axis positioned on the right side of the subplot. Only valid
                if type='xy'.
            * colspan (int, default 1): number of subplot columns
                for this subplot to span.
            * rowspan (int, default 1): number of subplot rows
                for this subplot to span.
            * l (float, default 0.0): padding left of cell
            * r (float, default 0.0): padding right of cell
            * t (float, default 0.0): padding right of cell
            * b (float, default 0.0): padding bottom of cell
        - Note: Use 'horizontal_spacing' and 'vertical_spacing' to adjust
          the spacing in between the subplots.
    insets: list of dict or None (default None):
        Inset specifications.  Insets are subplots that overlay grid subplots
        - Each item in 'insets' is a dictionary.
            The available keys are:
            * cell (tuple, default=(1,1)): (row, col) index of the
                subplot cell to overlay inset axes onto.
            * type (string, default 'xy'): Subplot type
            * l (float, default=0.0): padding left of inset
                  in fraction of cell width
            * w (float or 'to_end', default='to_end') inset width
                  in fraction of cell width ('to_end': to cell right edge)
            * b (float, default=0.0): padding bottom of inset
                  in fraction of cell height
            * h (float or 'to_end', default='to_end') inset height
                  in fraction of cell height ('to_end': to cell top edge)
    column_widths: list of numbers or None (default None)
        list of length `cols` of the relative widths of each column of subplots.
        Values are normalized internally and used to distribute overall width
        of the figure (excluding padding) among the columns.
        For backward compatibility, may also be specified using the
        `column_width` keyword argument.
    row_heights: list of numbers or None (default None)
        list of length `rows` of the relative heights of each row of subplots.
        If start_cell='top-left' then row heights are applied top to bottom.
        Otherwise, if start_cell='bottom-left' then row heights are applied
        bottom to top.
        For backward compatibility, may also be specified using the
        `row_width` kwarg. If specified as `row_width`, then the width values
        are applied from bottom to top regardless of the value of start_cell.
        This matches the legacy behavior of the `row_width` argument.
    column_titles: list of str or None (default None)
        list of length `cols` of titles to place above the top subplot in
        each column.
    row_titles: list of str or None (default None)
        list of length `rows` of titles to place on the right side of each
        row of subplots. If start_cell='top-left' then row titles are
        applied top to bottom. Otherwise, if start_cell='bottom-left' then
        row titles are applied bottom to top.
    x_title: str or None (default None)
        Title to place below the bottom row of subplots,
        centered horizontally
    y_title: str or None (default None)
        Title to place to the left of the left column of subplots,
        centered vertically
    figure: go.Figure or None (default None)
        If None, a new go.Figure instance will be created and its axes will be
        populated with those corresponding to the requested subplot geometry and
        this new figure will be returned.
        If a go.Figure instance, the axes will be added to the
        layout of this figure and this figure will be returned. If the figure
        already contains axes, they will be overwritten.
    Examples
    --------
    Example 1:
    >>> # Stack two subplots vertically, and add a scatter trace to each
    >>> from plotly.subplots import make_subplots
    >>> import plotly.graph_objects as go
    >>> fig = make_subplots(rows=2)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    or see Figure.append_trace
    Example 2:
    >>> # Stack a scatter plot
    >>> fig = make_subplots(rows=2, shared_xaxes=True)
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    [ (2,1) xaxis2,yaxis2 ]
    >>> fig.add_scatter(y=[2, 1, 3], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(y=[1, 3, 2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 3:
    >>> # irregular subplot layout (more examples below under 'specs')
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{}, {}],
    ...                     [{'colspan': 2}, None]])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]  [ (1,2) xaxis2,yaxis2 ]
    [ (2,1) xaxis3,yaxis3           -              ]
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=2) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 4:
    >>> # insets
    >>> fig = make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
    This is the format of your plot grid:
    [ (1,1) xaxis1,yaxis1 ]
    With insets:
    [ xaxis2,yaxis2 ] over [ (1,1) xaxis1,yaxis1 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,1]) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2') # doctest: +ELLIPSIS
    Figure(...)
    Example 5:
    >>> # include subplot titles
    >>> fig = make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    >>> fig.add_scatter(x=[1,2,3], y=[2,1,2], row=1, col=1) # doctest: +ELLIPSIS
    Figure(...)
    >>> fig.add_bar(x=[1,2,3], y=[2,1,2], row=2, col=1) # doctest: +ELLIPSIS
    Figure(...)
    Example 6:
    Subplot with mixed subplot types
    >>> fig = make_subplots(rows=2, cols=2,
    ...                     specs=[[{'type': 'xy'},    {'type': 'polar'}],
    ...                            [{'type': 'scene'}, {'type': 'ternary'}]])
    >>> fig.add_traces(
    ...     [go.Scatter(y=[2, 3, 1]),
    ...      go.Scatterpolar(r=[1, 3, 2], theta=[0, 45, 90]),
    ...      go.Scatter3d(x=[1, 2, 1], y=[2, 3, 1], z=[0, 3, 5]),
    ...      go.Scatterternary(a=[0.1, 0.2, 0.1],
    ...                        b=[0.2, 0.3, 0.1],
    ...                        c=[0.7, 0.5, 0.8])],
    ...     rows=[1, 1, 2, 2],
    ...     cols=[1, 2, 1, 2]) # doctest: +ELLIPSIS
    Figure(...)
    """
    return _sub.make_subplots(
        rows,
        cols,
        shared_xaxes,
        shared_yaxes,
        start_cell,
        print_grid,
        horizontal_spacing,
        vertical_spacing,
        subplot_titles,
        column_widths,
        row_heights,
        specs,
        insets,
        column_titles,
        row_titles,
        x_title,
        y_title,
        figure,
        **kwargs,
    )
 
 | 
	subplots.make_subplots 
 | 
	Self-Contained 
 | 
					
	plotly.py 
 | 
	75 
 | 
	packages/python/plotly/plotly/tools.py 
 | 
	def make_subplots(
    rows=1,
    cols=1,
    shared_xaxes=False,
    shared_yaxes=False,
    start_cell="top-left",
    print_grid=None,
    **kwargs,
):
    """Return an instance of plotly.graph_objs.Figure
    with the subplots domain set in 'layout'.
    Example 1:
    # stack two subplots vertically
    fig = tools.make_subplots(rows=2)
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    # or see Figure.append_trace
    Example 2:
    # subplots with shared x axes
    fig = tools.make_subplots(rows=2, shared_xaxes=True)
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x1,y2 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]
    Example 3:
    # irregular subplot layout (more examples below under 'specs')
    fig = tools.make_subplots(rows=2, cols=2,
                              specs=[[{}, {}],
                                     [{'colspan': 2}, None]])
    This is the format of your plot grid!
    [ (1,1) x1,y1 ]  [ (1,2) x2,y2 ]
    [ (2,1) x3,y3           -      ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]
    Example 4:
    # insets
    fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
    This is the format of your plot grid!
    [ (1,1) x1,y1 ]
    With insets:
    [ x2,y2 ] over [ (1,1) x1,y1 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    Example 5:
    # include subplot titles
    fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    Example 6:
    # Include subplot title on one plot (but not all)
    fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],
                              subplot_titles=('','Inset'))
    This is the format of your plot grid!
    [ (1,1) x1,y1 ]
    With insets:
    [ x2,y2 ] over [ (1,1) x1,y1 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    Keywords arguments with constant defaults:
    rows (kwarg, int greater than 0, default=1):
        Number of rows in the subplot grid.
    cols (kwarg, int greater than 0, default=1):
        Number of columns in the subplot grid.
    shared_xaxes (kwarg, boolean or list, default=False)
        Assign shared x axes.
        If True, subplots in the same grid column have one common
        shared x-axis at the bottom of the gird.
        To assign shared x axes per subplot grid cell (see 'specs'),
        send list (or list of lists, one list per shared x axis)
        of cell index tuples.
    shared_yaxes (kwarg, boolean or list, default=False)
        Assign shared y axes.
        If True, subplots in the same grid row have one common
        shared y-axis on the left-hand side of the gird.
        To assign shared y axes per subplot grid cell (see 'specs'),
        send list (or list of lists, one list per shared y axis)
        of cell index tuples.
    start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')
        Choose the starting cell in the subplot grid used to set the
        domains of the subplots.
    print_grid (kwarg, boolean, default=True):
        If True, prints a tab-delimited string representation of
        your plot grid.
    Keyword arguments with variable defaults:
    horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):
        Space between subplot columns.
        Applies to all columns (use 'specs' subplot-dependents spacing)
    vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):
        Space between subplot rows.
        Applies to all rows (use 'specs' subplot-dependents spacing)
    subplot_titles (kwarg, list of strings, default=empty list):
        Title of each subplot.
        "" can be included in the list if no subplot title is desired in
        that space so that the titles are properly indexed.
    specs (kwarg, list of lists of dictionaries):
        Subplot specifications.
        ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
        ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
        - Indices of the outer list correspond to subplot grid rows
          starting from the bottom. The number of rows in 'specs'
          must be equal to 'rows'.
        - Indices of the inner lists correspond to subplot grid columns
          starting from the left. The number of columns in 'specs'
          must be equal to 'cols'.
        - Each item in the 'specs' list corresponds to one subplot
          in a subplot grid. (N.B. The subplot grid has exactly 'rows'
          times 'cols' cells.)
        - Use None for blank a subplot cell (or to move pass a col/row span).
        - Note that specs[0][0] has the specs of the 'start_cell' subplot.
        - Each item in 'specs' is a dictionary.
            The available keys are:
            * is_3d (boolean, default=False): flag for 3d scenes
            * colspan (int, default=1): number of subplot columns
                for this subplot to span.
            * rowspan (int, default=1): number of subplot rows
                for this subplot to span.
            * l (float, default=0.0): padding left of cell
            * r (float, default=0.0): padding right of cell
            * t (float, default=0.0): padding right of cell
            * b (float, default=0.0): padding bottom of cell
        - Use 'horizontal_spacing' and 'vertical_spacing' to adjust
          the spacing in between the subplots.
    insets (kwarg, list of dictionaries):
        Inset specifications.
        - Each item in 'insets' is a dictionary.
            The available keys are:
            * cell (tuple, default=(1,1)): (row, col) index of the
                subplot cell to overlay inset axes onto.
            * is_3d (boolean, default=False): flag for 3d scenes
            * l (float, default=0.0): padding left of inset
                  in fraction of cell width
            * w (float or 'to_end', default='to_end') inset width
                  in fraction of cell width ('to_end': to cell right edge)
            * b (float, default=0.0): padding bottom of inset
                  in fraction of cell height
            * h (float or 'to_end', default='to_end') inset height
                  in fraction of cell height ('to_end': to cell top edge)
    column_width (kwarg, list of numbers)
        Column_width specifications
        - Functions similarly to `column_width` of `plotly.graph_objs.Table`.
          Specify a list that contains numbers where the amount of numbers in
          the list is equal to `cols`.
        - The numbers in the list indicate the proportions that each column
          domains take across the full horizontal domain excluding padding.
        - For example, if columns_width=[3, 1], horizontal_spacing=0, and
          cols=2, the domains for each column would be [0. 0.75] and [0.75, 1]
    row_width (kwargs, list of numbers)
        Row_width specifications
        - Functions similarly to `column_width`. Specify a list that contains
          numbers where the amount of numbers in the list is equal to `rows`.
        - The numbers in the list indicate the proportions that each row
          domains take along the full vertical domain excluding padding.
        - For example, if row_width=[3, 1], vertical_spacing=0, and
          cols=2, the domains for each row from top to botton would be
          [0. 0.75] and [0.75, 1]
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_tools.make_subplots.txt 
 | 
	def make_subplots(
    rows=1,
    cols=1,
    shared_xaxes=False,
    shared_yaxes=False,
    start_cell="top-left",
    print_grid=None,
    **kwargs,
):
    """Return an instance of plotly.graph_objs.Figure
    with the subplots domain set in 'layout'.
    Example 1:
    # stack two subplots vertically
    fig = tools.make_subplots(rows=2)
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    # or see Figure.append_trace
    Example 2:
    # subplots with shared x axes
    fig = tools.make_subplots(rows=2, shared_xaxes=True)
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x1,y2 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], yaxis='y2')]
    Example 3:
    # irregular subplot layout (more examples below under 'specs')
    fig = tools.make_subplots(rows=2, cols=2,
                              specs=[[{}, {}],
                                     [{'colspan': 2}, None]])
    This is the format of your plot grid!
    [ (1,1) x1,y1 ]  [ (1,2) x2,y2 ]
    [ (2,1) x3,y3           -      ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x3', yaxis='y3')]
    Example 4:
    # insets
    fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}])
    This is the format of your plot grid!
    [ (1,1) x1,y1 ]
    With insets:
    [ x2,y2 ] over [ (1,1) x1,y1 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    Example 5:
    # include subplot titles
    fig = tools.make_subplots(rows=2, subplot_titles=('Plot 1','Plot 2'))
    This is the format of your plot grid:
    [ (1,1) x1,y1 ]
    [ (2,1) x2,y2 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    Example 6:
    # Include subplot title on one plot (but not all)
    fig = tools.make_subplots(insets=[{'cell': (1,1), 'l': 0.7, 'b': 0.3}],
                              subplot_titles=('','Inset'))
    This is the format of your plot grid!
    [ (1,1) x1,y1 ]
    With insets:
    [ x2,y2 ] over [ (1,1) x1,y1 ]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2])]
    fig['data'] += [Scatter(x=[1,2,3], y=[2,1,2], xaxis='x2', yaxis='y2')]
    Keywords arguments with constant defaults:
    rows (kwarg, int greater than 0, default=1):
        Number of rows in the subplot grid.
    cols (kwarg, int greater than 0, default=1):
        Number of columns in the subplot grid.
    shared_xaxes (kwarg, boolean or list, default=False)
        Assign shared x axes.
        If True, subplots in the same grid column have one common
        shared x-axis at the bottom of the gird.
        To assign shared x axes per subplot grid cell (see 'specs'),
        send list (or list of lists, one list per shared x axis)
        of cell index tuples.
    shared_yaxes (kwarg, boolean or list, default=False)
        Assign shared y axes.
        If True, subplots in the same grid row have one common
        shared y-axis on the left-hand side of the gird.
        To assign shared y axes per subplot grid cell (see 'specs'),
        send list (or list of lists, one list per shared y axis)
        of cell index tuples.
    start_cell (kwarg, 'bottom-left' or 'top-left', default='top-left')
        Choose the starting cell in the subplot grid used to set the
        domains of the subplots.
    print_grid (kwarg, boolean, default=True):
        If True, prints a tab-delimited string representation of
        your plot grid.
    Keyword arguments with variable defaults:
    horizontal_spacing (kwarg, float in [0,1], default=0.2 / cols):
        Space between subplot columns.
        Applies to all columns (use 'specs' subplot-dependents spacing)
    vertical_spacing (kwarg, float in [0,1], default=0.3 / rows):
        Space between subplot rows.
        Applies to all rows (use 'specs' subplot-dependents spacing)
    subplot_titles (kwarg, list of strings, default=empty list):
        Title of each subplot.
        "" can be included in the list if no subplot title is desired in
        that space so that the titles are properly indexed.
    specs (kwarg, list of lists of dictionaries):
        Subplot specifications.
        ex1: specs=[[{}, {}], [{'colspan': 2}, None]]
        ex2: specs=[[{'rowspan': 2}, {}], [None, {}]]
        - Indices of the outer list correspond to subplot grid rows
          starting from the bottom. The number of rows in 'specs'
          must be equal to 'rows'.
        - Indices of the inner lists correspond to subplot grid columns
          starting from the left. The number of columns in 'specs'
          must be equal to 'cols'.
        - Each item in the 'specs' list corresponds to one subplot
          in a subplot grid. (N.B. The subplot grid has exactly 'rows'
          times 'cols' cells.)
        - Use None for blank a subplot cell (or to move pass a col/row span).
        - Note that specs[0][0] has the specs of the 'start_cell' subplot.
        - Each item in 'specs' is a dictionary.
            The available keys are:
            * is_3d (boolean, default=False): flag for 3d scenes
            * colspan (int, default=1): number of subplot columns
                for this subplot to span.
            * rowspan (int, default=1): number of subplot rows
                for this subplot to span.
            * l (float, default=0.0): padding left of cell
            * r (float, default=0.0): padding right of cell
            * t (float, default=0.0): padding right of cell
            * b (float, default=0.0): padding bottom of cell
        - Use 'horizontal_spacing' and 'vertical_spacing' to adjust
          the spacing in between the subplots.
    insets (kwarg, list of dictionaries):
        Inset specifications.
        - Each item in 'insets' is a dictionary.
            The available keys are:
            * cell (tuple, default=(1,1)): (row, col) index of the
                subplot cell to overlay inset axes onto.
            * is_3d (boolean, default=False): flag for 3d scenes
            * l (float, default=0.0): padding left of inset
                  in fraction of cell width
            * w (float or 'to_end', default='to_end') inset width
                  in fraction of cell width ('to_end': to cell right edge)
            * b (float, default=0.0): padding bottom of inset
                  in fraction of cell height
            * h (float or 'to_end', default='to_end') inset height
                  in fraction of cell height ('to_end': to cell top edge)
    column_width (kwarg, list of numbers)
        Column_width specifications
        - Functions similarly to `column_width` of `plotly.graph_objs.Table`.
          Specify a list that contains numbers where the amount of numbers in
          the list is equal to `cols`.
        - The numbers in the list indicate the proportions that each column
          domains take across the full horizontal domain excluding padding.
        - For example, if columns_width=[3, 1], horizontal_spacing=0, and
          cols=2, the domains for each column would be [0. 0.75] and [0.75, 1]
    row_width (kwargs, list of numbers)
        Row_width specifications
        - Functions similarly to `column_width`. Specify a list that contains
          numbers where the amount of numbers in the list is equal to `rows`.
        - The numbers in the list indicate the proportions that each row
          domains take along the full vertical domain excluding padding.
        - For example, if row_width=[3, 1], vertical_spacing=0, and
          cols=2, the domains for each row from top to botton would be
          [0. 0.75] and [0.75, 1]
    """
    import plotly.subplots
    warnings.warn(
        "plotly.tools.make_subplots is deprecated, "
        "please use plotly.subplots.make_subplots instead",
        DeprecationWarning,
        stacklevel=1,
    )
    return plotly.subplots.make_subplots(
        rows=rows,
        cols=cols,
        shared_xaxes=shared_xaxes,
        shared_yaxes=shared_yaxes,
        start_cell=start_cell,
        print_grid=print_grid,
        **kwargs,
    )
 
 | 
	tools.make_subplots 
 | 
	Self-Contained 
 | 
					
	sphinx 
 | 
	0 
 | 
	sphinx/application.py 
 | 
	    def __init__(self, srcdir: str | os.PathLike[str], confdir: str | os.PathLike[str] | None,
                 outdir: str | os.PathLike[str], doctreedir: str | os.PathLike[str],
                 buildername: str, confoverrides: dict | None = None,
                 status: IO[str] | None = sys.stdout, warning: IO[str] | None = sys.stderr,
                 freshenv: bool = False, warningiserror: bool = False,
                 tags: Sequence[str] = (),
                 verbosity: int = 0, parallel: int = 0, keep_going: bool = False,
                 pdb: bool = False, exception_on_warning: bool = False) -> None:
        """Initialize the Sphinx application.
        :param srcdir: The path to the source directory.
        :param confdir: The path to the configuration directory.
            If not given, it is assumed to be the same as ``srcdir``.
        :param outdir: Directory for storing build documents.
        :param doctreedir: Directory for caching pickled doctrees.
        :param buildername: The name of the builder to use.
        :param confoverrides: A dictionary of configuration settings that override the
            settings in the configuration file.
        :param status: A file-like object to write status messages to.
        :param warning: A file-like object to write warnings to.
        :param freshenv: If true, clear the cached environment.
        :param warningiserror: If true, warnings become errors.
        :param tags: A list of tags to apply.
        :param verbosity: The verbosity level.
        :param parallel: The maximum number of parallel jobs to use
            when reading/writing documents.
        :param keep_going: Unused.
        :param pdb: If true, enable the Python debugger on an exception.
        :param exception_on_warning: If true, raise an exception on warnings.
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests___init__.txt 
 | 
	    def __init__(self, srcdir: str | os.PathLike[str], confdir: str | os.PathLike[str] | None,
                 outdir: str | os.PathLike[str], doctreedir: str | os.PathLike[str],
                 buildername: str, confoverrides: dict | None = None,
                 status: IO[str] | None = sys.stdout, warning: IO[str] | None = sys.stderr,
                 freshenv: bool = False, warningiserror: bool = False,
                 tags: Sequence[str] = (),
                 verbosity: int = 0, parallel: int = 0, keep_going: bool = False,
                 pdb: bool = False, exception_on_warning: bool = False) -> None:
        """Initialize the Sphinx application.
        :param srcdir: The path to the source directory.
        :param confdir: The path to the configuration directory.
            If not given, it is assumed to be the same as ``srcdir``.
        :param outdir: Directory for storing build documents.
        :param doctreedir: Directory for caching pickled doctrees.
        :param buildername: The name of the builder to use.
        :param confoverrides: A dictionary of configuration settings that override the
            settings in the configuration file.
        :param status: A file-like object to write status messages to.
        :param warning: A file-like object to write warnings to.
        :param freshenv: If true, clear the cached environment.
        :param warningiserror: If true, warnings become errors.
        :param tags: A list of tags to apply.
        :param verbosity: The verbosity level.
        :param parallel: The maximum number of parallel jobs to use
            when reading/writing documents.
        :param keep_going: Unused.
        :param pdb: If true, enable the Python debugger on an exception.
        :param exception_on_warning: If true, raise an exception on warnings.
        """
        self.phase = BuildPhase.INITIALIZATION
        self.verbosity = verbosity
        self._fresh_env_used: bool | None = None
        self.extensions: dict[str, Extension] = {}
        self.registry = SphinxComponentRegistry()
        # validate provided directories
        self.srcdir = _StrPath(srcdir).resolve()
        self.outdir = _StrPath(outdir).resolve()
        self.doctreedir = _StrPath(doctreedir).resolve()
        if not path.isdir(self.srcdir):
            raise ApplicationError(__('Cannot find source directory (%s)') %
                                   self.srcdir)
        if path.exists(self.outdir) and not path.isdir(self.outdir):
            raise ApplicationError(__('Output directory (%s) is not a directory') %
                                   self.outdir)
        if self.srcdir == self.outdir:
            raise ApplicationError(__('Source directory and destination '
                                      'directory cannot be identical'))
        self.parallel = parallel
        if status is None:
            self._status: IO[str] = StringIO()
            self.quiet: bool = True
        else:
            self._status = status
            self.quiet = False
        if warning is None:
            self._warning: IO[str] = StringIO()
        else:
            self._warning = warning
        self._warncount = 0
        self.keep_going = bool(warningiserror)  # Unused
        self._fail_on_warnings = bool(warningiserror)
        self.pdb = pdb
        self._exception_on_warning = exception_on_warning
        logging.setup(self, self._status, self._warning)
        self.events = EventManager(self)
        # keep last few messages for traceback
        # This will be filled by sphinx.util.logging.LastMessagesWriter
        self.messagelog: deque[str] = deque(maxlen=10)
        # say hello to the world
        logger.info(bold(__('Running Sphinx v%s')), sphinx.__display_version__)
        # status code for command-line application
        self.statuscode = 0
        # read config
        self.tags = Tags(tags)
        if confdir is None:
            # set confdir to srcdir if -C given (!= no confdir); a few pieces
            # of code expect a confdir to be set
            self.confdir = self.srcdir
            self.config = Config({}, confoverrides or {})
        else:
            self.confdir = _StrPath(confdir).resolve()
            self.config = Config.read(self.confdir, confoverrides or {}, self.tags)
        # set up translation infrastructure
        self._init_i18n()
        # check the Sphinx version if requested
        if self.config.needs_sphinx and self.config.needs_sphinx > sphinx.__display_version__:
            raise VersionRequirementError(
                __('This project needs at least Sphinx v%s and therefore cannot '
                   'be built with this version.') % self.config.needs_sphinx)
        # load all built-in extension modules, first-party extension modules,
        # and first-party themes
        for extension in builtin_extensions:
            self.setup_extension(extension)
        # load all user-given extension modules
        for extension in self.config.extensions:
            self.setup_extension(extension)
        # preload builder module (before init config values)
        self.preload_builder(buildername)
        if not path.isdir(outdir):
            with progress_message(__('making output directory')):
                ensuredir(outdir)
        # the config file itself can be an extension
        if self.config.setup:
            prefix = __('while setting up extension %s:') % "conf.py"
            with prefixed_warnings(prefix):
                if callable(self.config.setup):
                    self.config.setup(self)
                else:
                    raise ConfigError(
                        __("'setup' as currently defined in conf.py isn't a Python callable. "
                           "Please modify its definition to make it a callable function. "
                           "This is needed for conf.py to behave as a Sphinx extension."),
                    )
        # Report any warnings for overrides.
        self.config._report_override_warnings()
        self.events.emit('config-inited', self.config)
        # create the project
        self.project = Project(self.srcdir, self.config.source_suffix)
        # set up the build environment
        self.env = self._init_env(freshenv)
        # create the builder
        self.builder = self.create_builder(buildername)
        # build environment post-initialisation, after creating the builder
        self._post_init_env()
        # set up the builder
        self._init_builder()
 
 | 
	__init__ 
 | 
	Self-Contained 
 | 
					
	xarray 
 | 
	7 
 | 
	xarray/core/indexing.py 
 | 
	def _decompose_outer_indexer(
    indexer: BasicIndexer | OuterIndexer,
    shape: _Shape,
    indexing_support: IndexingSupport,
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
    """
    Decompose outer indexer to the successive two indexers, where the
    first indexer will be used to index backend arrays, while the second one
    is used to index the loaded on-memory np.ndarray.
    Parameters
    ----------
    indexer : OuterIndexer or BasicIndexer
    indexing_support : One of the entries of IndexingSupport
    Returns
    -------
    backend_indexer: OuterIndexer or BasicIndexer
    np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
    Notes
    -----
    This function is used to realize the vectorized indexing for the backend
    arrays that only support basic or outer indexing.
    As an example, let us consider to index a few elements from a backend array
    with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
    Even if the backend array only supports basic indexing, it is more
    efficient to load a subslice of the array than loading the entire array,
    >>> array = np.arange(36).reshape(6, 6)
    >>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4)))
    >>> # load subslice of the array
    ... array = NumpyIndexingAdapter(array)[backend_indexer]
    >>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))
    >>> # outer indexing for on-memory np.ndarray.
    ... NumpyIndexingAdapter(array).oindex[np_indexer]
    array([[ 2,  3,  2],
           [14, 15, 14],
           [ 8,  9,  8]])
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__decompose_outer_indexer.txt 
 | 
	def _decompose_outer_indexer(
    indexer: BasicIndexer | OuterIndexer,
    shape: _Shape,
    indexing_support: IndexingSupport,
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
    """
    Decompose outer indexer to the successive two indexers, where the
    first indexer will be used to index backend arrays, while the second one
    is used to index the loaded on-memory np.ndarray.
    Parameters
    ----------
    indexer : OuterIndexer or BasicIndexer
    indexing_support : One of the entries of IndexingSupport
    Returns
    -------
    backend_indexer: OuterIndexer or BasicIndexer
    np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
    Notes
    -----
    This function is used to realize the vectorized indexing for the backend
    arrays that only support basic or outer indexing.
    As an example, let us consider to index a few elements from a backend array
    with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
    Even if the backend array only supports basic indexing, it is more
    efficient to load a subslice of the array than loading the entire array,
    >>> array = np.arange(36).reshape(6, 6)
    >>> backend_indexer = BasicIndexer((slice(0, 3), slice(2, 4)))
    >>> # load subslice of the array
    ... array = NumpyIndexingAdapter(array)[backend_indexer]
    >>> np_indexer = OuterIndexer((np.array([0, 2, 1]), np.array([0, 1, 0])))
    >>> # outer indexing for on-memory np.ndarray.
    ... NumpyIndexingAdapter(array).oindex[np_indexer]
    array([[ 2,  3,  2],
           [14, 15, 14],
           [ 8,  9,  8]])
    """
    backend_indexer: list[Any] = []
    np_indexer: list[Any] = []
    assert isinstance(indexer, OuterIndexer | BasicIndexer)
    if indexing_support == IndexingSupport.VECTORIZED:
        for k, s in zip(indexer.tuple, shape, strict=False):
            if isinstance(k, slice):
                # If it is a slice, then we will slice it as-is
                # (but make its step positive) in the backend,
                bk_slice, np_slice = _decompose_slice(k, s)
                backend_indexer.append(bk_slice)
                np_indexer.append(np_slice)
            else:
                backend_indexer.append(k)
                if not is_scalar(k):
                    np_indexer.append(slice(None))
        return type(indexer)(tuple(backend_indexer)), BasicIndexer(tuple(np_indexer))
    # make indexer positive
    pos_indexer: list[np.ndarray | int | np.number] = []
    for k, s in zip(indexer.tuple, shape, strict=False):
        if isinstance(k, np.ndarray):
            pos_indexer.append(np.where(k < 0, k + s, k))
        elif isinstance(k, integer_types) and k < 0:
            pos_indexer.append(k + s)
        else:
            pos_indexer.append(k)
    indexer_elems = pos_indexer
    if indexing_support is IndexingSupport.OUTER_1VECTOR:
        # some backends such as h5py supports only 1 vector in indexers
        # We choose the most efficient axis
        gains = [
            (
                (np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
                if isinstance(k, np.ndarray)
                else 0
            )
            for k in indexer_elems
        ]
        array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
        for i, (k, s) in enumerate(zip(indexer_elems, shape, strict=False)):
            if isinstance(k, np.ndarray) and i != array_index:
                # np.ndarray key is converted to slice that covers the entire
                # entries of this key.
                backend_indexer.append(slice(np.min(k), np.max(k) + 1))
                np_indexer.append(k - np.min(k))
            elif isinstance(k, np.ndarray):
                # Remove duplicates and sort them in the increasing order
                pkey, ekey = np.unique(k, return_inverse=True)
                backend_indexer.append(pkey)
                np_indexer.append(ekey)
            elif isinstance(k, integer_types):
                backend_indexer.append(k)
            else:  # slice:  convert positive step slice for backend
                bk_slice, np_slice = _decompose_slice(k, s)
                backend_indexer.append(bk_slice)
                np_indexer.append(np_slice)
        return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
    if indexing_support == IndexingSupport.OUTER:
        for k, s in zip(indexer_elems, shape, strict=False):
            if isinstance(k, slice):
                # slice:  convert positive step slice for backend
                bk_slice, np_slice = _decompose_slice(k, s)
                backend_indexer.append(bk_slice)
                np_indexer.append(np_slice)
            elif isinstance(k, integer_types):
                backend_indexer.append(k)
            elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
                backend_indexer.append(k)
                np_indexer.append(slice(None))
            else:
                # Remove duplicates and sort them in the increasing order
                oind, vind = np.unique(k, return_inverse=True)
                backend_indexer.append(oind)
                np_indexer.append(vind.reshape(*k.shape))
        return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
    # basic indexer
    assert indexing_support == IndexingSupport.BASIC
    for k, s in zip(indexer_elems, shape, strict=False):
        if isinstance(k, np.ndarray):
            # np.ndarray key is converted to slice that covers the entire
            # entries of this key.
            backend_indexer.append(slice(np.min(k), np.max(k) + 1))
            np_indexer.append(k - np.min(k))
        elif isinstance(k, integer_types):
            backend_indexer.append(k)
        else:  # slice:  convert positive step slice for backend
            bk_slice, np_slice = _decompose_slice(k, s)
            backend_indexer.append(bk_slice)
            np_indexer.append(np_slice)
    return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
 
 | 
	_decompose_outer_indexer 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	9 
 | 
	xarray/plot/utils.py 
 | 
	def _determine_cmap_params(
    plot_data,
    vmin=None,
    vmax=None,
    cmap=None,
    center=None,
    robust=False,
    extend=None,
    levels=None,
    filled=True,
    norm=None,
    _is_facetgrid=False,
):
    """
    Use some heuristics to set good defaults for colorbar and range.
    Parameters
    ----------
    plot_data : Numpy array
        Doesn't handle xarray objects
    Returns
    -------
    cmap_params : dict
        Use depends on the type of the plotting function
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__determine_cmap_params.txt 
 | 
	def _determine_cmap_params(
    plot_data,
    vmin=None,
    vmax=None,
    cmap=None,
    center=None,
    robust=False,
    extend=None,
    levels=None,
    filled=True,
    norm=None,
    _is_facetgrid=False,
):
    """
    Use some heuristics to set good defaults for colorbar and range.
    Parameters
    ----------
    plot_data : Numpy array
        Doesn't handle xarray objects
    Returns
    -------
    cmap_params : dict
        Use depends on the type of the plotting function
    """
    import matplotlib as mpl
    if isinstance(levels, Iterable):
        levels = sorted(levels)
    calc_data = np.ravel(plot_data[np.isfinite(plot_data)])
    # Handle all-NaN input data gracefully
    if calc_data.size == 0:
        # Arbitrary default for when all values are NaN
        calc_data = np.array(0.0)
    # Setting center=False prevents a divergent cmap
    possibly_divergent = center is not False
    # Set center to 0 so math below makes sense but remember its state
    center_is_none = False
    if center is None:
        center = 0
        center_is_none = True
    # Setting both vmin and vmax prevents a divergent cmap
    if (vmin is not None) and (vmax is not None):
        possibly_divergent = False
    # Setting vmin or vmax implies linspaced levels
    user_minmax = (vmin is not None) or (vmax is not None)
    # vlim might be computed below
    vlim = None
    # save state; needed later
    vmin_was_none = vmin is None
    vmax_was_none = vmax is None
    if vmin is None:
        if robust:
            vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
        else:
            vmin = calc_data.min()
    elif possibly_divergent:
        vlim = abs(vmin - center)
    if vmax is None:
        if robust:
            vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
        else:
            vmax = calc_data.max()
    elif possibly_divergent:
        vlim = abs(vmax - center)
    if possibly_divergent:
        levels_are_divergent = (
            isinstance(levels, Iterable) and levels[0] * levels[-1] < 0
        )
        # kwargs not specific about divergent or not: infer defaults from data
        divergent = (
            ((vmin < 0) and (vmax > 0)) or not center_is_none or levels_are_divergent
        )
    else:
        divergent = False
    # A divergent map should be symmetric around the center value
    if divergent:
        if vlim is None:
            vlim = max(abs(vmin - center), abs(vmax - center))
        vmin, vmax = -vlim, vlim
    # Now add in the centering value and set the limits
    vmin += center
    vmax += center
    # now check norm and harmonize with vmin, vmax
    if norm is not None:
        if norm.vmin is None:
            norm.vmin = vmin
        else:
            if not vmin_was_none and vmin != norm.vmin:
                raise ValueError("Cannot supply vmin and a norm with a different vmin.")
            vmin = norm.vmin
        if norm.vmax is None:
            norm.vmax = vmax
        else:
            if not vmax_was_none and vmax != norm.vmax:
                raise ValueError("Cannot supply vmax and a norm with a different vmax.")
            vmax = norm.vmax
    # if BoundaryNorm, then set levels
    if isinstance(norm, mpl.colors.BoundaryNorm):
        levels = norm.boundaries
    # Choose default colormaps if not provided
    if cmap is None:
        if divergent:
            cmap = OPTIONS["cmap_divergent"]
        else:
            cmap = OPTIONS["cmap_sequential"]
    # Handle discrete levels
    if levels is not None:
        if is_scalar(levels):
            if user_minmax:
                levels = np.linspace(vmin, vmax, levels)
            elif levels == 1:
                levels = np.asarray([(vmin + vmax) / 2])
            else:
                # N in MaxNLocator refers to bins, not ticks
                ticker = mpl.ticker.MaxNLocator(levels - 1)
                levels = ticker.tick_values(vmin, vmax)
        vmin, vmax = levels[0], levels[-1]
    # GH3734
    if vmin == vmax:
        vmin, vmax = mpl.ticker.LinearLocator(2).tick_values(vmin, vmax)
    if extend is None:
        extend = _determine_extend(calc_data, vmin, vmax)
    if (levels is not None) and (not isinstance(norm, mpl.colors.BoundaryNorm)):
        cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled)
        norm = newnorm if norm is None else norm
    # vmin & vmax needs to be None if norm is passed
    # TODO: always return a norm with vmin and vmax
    if norm is not None:
        vmin = None
        vmax = None
    return dict(
        vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm
    )
 
 | 
	_determine_cmap_params 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	18 
 | 
	xarray/core/computation.py 
 | 
	def apply_ufunc(
    func: Callable,
    *args: Any,
    input_core_dims: Sequence[Sequence] | None = None,
    output_core_dims: Sequence[Sequence] | None = ((),),
    exclude_dims: Set = frozenset(),
    vectorize: bool = False,
    join: JoinOptions = "exact",
    dataset_join: str = "exact",
    dataset_fill_value: object = _NO_FILL_VALUE,
    keep_attrs: bool | str | None = None,
    kwargs: Mapping | None = None,
    dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden",
    output_dtypes: Sequence | None = None,
    output_sizes: Mapping[Any, int] | None = None,
    meta: Any = None,
    dask_gufunc_kwargs: dict[str, Any] | None = None,
    on_missing_core_dim: MissingCoreDimOptions = "raise",
) -> Any:
    """Apply a vectorized function for unlabeled arrays on xarray objects.
    The function will be mapped over the data variable(s) of the input
    arguments using xarray's standard rules for labeled computation, including
    alignment, broadcasting, looping over GroupBy/Dataset variables, and
    merging of coordinates.
    Parameters
    ----------
    func : callable
        Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
        (``.data``) that returns an array or tuple of arrays. If multiple
        arguments with non-matching dimensions are supplied, this function is
        expected to vectorize (broadcast) over axes of positional arguments in
        the style of NumPy universal functions [1]_ (if this is not the case,
        set ``vectorize=True``). If this function returns multiple outputs, you
        must set ``output_core_dims`` as well.
    *args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \
        numpy.ndarray, dask.array.Array or scalar
        Mix of labeled and/or unlabeled arrays to which to apply the function.
    input_core_dims : sequence of sequence, optional
        List of the same length as ``args`` giving the list of core dimensions
        on each input argument that should not be broadcast. By default, we
        assume there are no core dimensions on any input arguments.
        For example, ``input_core_dims=[[], ['time']]`` indicates that all
        dimensions on the first argument and all dimensions other than 'time'
        on the second argument should be broadcast.
        Core dimensions are automatically moved to the last axes of input
        variables before applying ``func``, which facilitates using NumPy style
        generalized ufuncs [2]_.
    output_core_dims : list of tuple, optional
        List of the same length as the number of output arguments from
        ``func``, giving the list of core dimensions on each output that were
        not broadcast on the inputs. By default, we assume that ``func``
        outputs exactly one array, with axes corresponding to each broadcast
        dimension.
        Core dimensions are assumed to appear as the last dimensions of each
        output in the provided order.
    exclude_dims : set, optional
        Core dimensions on the inputs to exclude from alignment and
        broadcasting entirely. Any input coordinates along these dimensions
        will be dropped. Each excluded dimension must also appear in
        ``input_core_dims`` for at least one argument. Only dimensions listed
        here are allowed to change size between input and output objects.
    vectorize : bool, optional
        If True, then assume ``func`` only takes arrays defined over core
        dimensions as input and vectorize it automatically with
        :py:func:`numpy.vectorize`. This option exists for convenience, but is
        almost always slower than supplying a pre-vectorized function.
    join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
        Method for joining the indexes of the passed objects along each
        dimension, and the variables of Dataset objects with mismatched
        data variables:
        - 'outer': use the union of object indexes
        - 'inner': use the intersection of object indexes
        - 'left': use indexes from the first object with each dimension
        - 'right': use indexes from the last object with each dimension
        - 'exact': raise `ValueError` instead of aligning when indexes to be
          aligned are not equal
    dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
        Method for joining variables of Dataset objects with mismatched
        data variables.
        - 'outer': take variables from both Dataset objects
        - 'inner': take only overlapped variables
        - 'left': take only variables from the first object
        - 'right': take only variables from the last object
        - 'exact': data variables on all Dataset objects must match exactly
    dataset_fill_value : optional
        Value used in place of missing variables on Dataset inputs when the
        datasets do not share the exact same ``data_vars``. Required if
        ``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
    keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional
        - 'drop' or False: empty attrs on returned xarray object.
        - 'identical': all attrs must be the same on every object.
        - 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value.
        - 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped.
        - 'override' or True: skip comparing and copy attrs from the first object to the result.
    kwargs : dict, optional
        Optional keyword arguments passed directly on to call ``func``.
    dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
        How to handle applying to objects containing lazy data in the form of
        dask arrays:
        - 'forbidden' (default): raise an error if a dask array is encountered.
        - 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
          ``func`` natively supports dask arrays.
        - 'parallelized': automatically parallelize ``func`` if any of the
          inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
          arguments are supported. Only use this option if ``func`` does not natively
          support dask arrays (e.g. converts them to numpy arrays).
    dask_gufunc_kwargs : dict, optional
        Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
        dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
        and ``meta``.
    output_dtypes : list of dtype, optional
        Optional list of output dtypes. Only used if ``dask='parallelized'`` or
        ``vectorize=True``.
    output_sizes : dict, optional
        Optional mapping from dimension names to sizes for outputs. Only used
        if dask='parallelized' and new dimensions (not found on inputs) appear
        on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
        parameter. It will be removed as direct parameter in a future version.
    meta : optional
        Size-0 object representing the type of array wrapped by dask array. Passed on to
        :py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
        ``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
        a future version.
    on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise"
        How to handle missing core dimensions on input variables.
    Returns
    -------
    Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
    numpy.ndarray, the first type on that list to appear on an input.
    Notes
    -----
    This function is designed for the more common case where ``func`` can work on numpy
    arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
    it is possible to use :py:func:`xarray.map_blocks`.
    Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
    Examples
    --------
    Calculate the vector magnitude of two arguments:
    >>> def magnitude(a, b):
    ...     func = lambda x, y: np.sqrt(x**2 + y**2)
    ...     return xr.apply_ufunc(func, a, b)
    ...
    You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
    objects, with automatically preserved dimensions and coordinates, e.g.,
    >>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
    >>> magnitude(array, -array)
    <xarray.DataArray (x: 3)> Size: 24B
    array([1.41421356, 2.82842712, 4.24264069])
    Coordinates:
      * x        (x) float64 24B 0.1 0.2 0.3
    Plain scalars, numpy arrays and a mix of these with xarray objects is also
    supported:
    >>> magnitude(3, 4)
    np.float64(5.0)
    >>> magnitude(3, np.array([0, 4]))
    array([3., 5.])
    >>> magnitude(array, 0)
    <xarray.DataArray (x: 3)> Size: 24B
    array([1., 2., 3.])
    Coordinates:
      * x        (x) float64 24B 0.1 0.2 0.3
    Other examples of how you could use ``apply_ufunc`` to write functions to
    (very nearly) replicate existing xarray functionality:
    Compute the mean (``.mean``) over one dimension:
    >>> def mean(obj, dim):
    ...     # note: apply always moves core dimensions to the end
    ...     return apply_ufunc(
    ...         np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
    ...     )
    ...
    Inner product over a specific dimension (like :py:func:`dot`):
    >>> def _inner(x, y):
    ...     result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
    ...     return result[..., 0, 0]
    ...
    >>> def inner_product(a, b, dim):
    ...     return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
    ...
    Stack objects along a new dimension (like :py:func:`concat`):
    >>> def stack(objects, dim, new_coord):
    ...     # note: this version does not stack coordinates
    ...     func = lambda *x: np.stack(x, axis=-1)
    ...     result = apply_ufunc(
    ...         func,
    ...         *objects,
    ...         output_core_dims=[[dim]],
    ...         join="outer",
    ...         dataset_fill_value=np.nan
    ...     )
    ...     result[dim] = new_coord
    ...     return result
    ...
    If your function is not vectorized but can be applied only to core
    dimensions, you can use ``vectorize=True`` to turn into a vectorized
    function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
    terribly fast. Here we'll use it to calculate the distance between
    empirical samples from two probability distributions, using a scipy
    function that needs to be applied to vectors:
    >>> import scipy.stats
    >>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
    ...     return apply_ufunc(
    ...         scipy.stats.wasserstein_distance,
    ...         first_samples,
    ...         second_samples,
    ...         input_core_dims=[[dim], [dim]],
    ...         vectorize=True,
    ...     )
    ...
    Most of NumPy's builtin functions already broadcast their inputs
    appropriately for use in ``apply_ufunc``. You may find helper functions such as
    :py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
    works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
    See Also
    --------
    numpy.broadcast_arrays
    numba.vectorize
    numba.guvectorize
    dask.array.apply_gufunc
    xarray.map_blocks
    Notes
    -----
    :ref:`dask.automatic-parallelization`
        User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
    :doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc`
        Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc`
    References
    ----------
    .. [1] https://numpy.org/doc/stable/reference/ufuncs.html
    .. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_apply_ufunc.txt 
 | 
	def apply_ufunc(
    func: Callable,
    *args: Any,
    input_core_dims: Sequence[Sequence] | None = None,
    output_core_dims: Sequence[Sequence] | None = ((),),
    exclude_dims: Set = frozenset(),
    vectorize: bool = False,
    join: JoinOptions = "exact",
    dataset_join: str = "exact",
    dataset_fill_value: object = _NO_FILL_VALUE,
    keep_attrs: bool | str | None = None,
    kwargs: Mapping | None = None,
    dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden",
    output_dtypes: Sequence | None = None,
    output_sizes: Mapping[Any, int] | None = None,
    meta: Any = None,
    dask_gufunc_kwargs: dict[str, Any] | None = None,
    on_missing_core_dim: MissingCoreDimOptions = "raise",
) -> Any:
    """Apply a vectorized function for unlabeled arrays on xarray objects.
    The function will be mapped over the data variable(s) of the input
    arguments using xarray's standard rules for labeled computation, including
    alignment, broadcasting, looping over GroupBy/Dataset variables, and
    merging of coordinates.
    Parameters
    ----------
    func : callable
        Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
        (``.data``) that returns an array or tuple of arrays. If multiple
        arguments with non-matching dimensions are supplied, this function is
        expected to vectorize (broadcast) over axes of positional arguments in
        the style of NumPy universal functions [1]_ (if this is not the case,
        set ``vectorize=True``). If this function returns multiple outputs, you
        must set ``output_core_dims`` as well.
    *args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \
        numpy.ndarray, dask.array.Array or scalar
        Mix of labeled and/or unlabeled arrays to which to apply the function.
    input_core_dims : sequence of sequence, optional
        List of the same length as ``args`` giving the list of core dimensions
        on each input argument that should not be broadcast. By default, we
        assume there are no core dimensions on any input arguments.
        For example, ``input_core_dims=[[], ['time']]`` indicates that all
        dimensions on the first argument and all dimensions other than 'time'
        on the second argument should be broadcast.
        Core dimensions are automatically moved to the last axes of input
        variables before applying ``func``, which facilitates using NumPy style
        generalized ufuncs [2]_.
    output_core_dims : list of tuple, optional
        List of the same length as the number of output arguments from
        ``func``, giving the list of core dimensions on each output that were
        not broadcast on the inputs. By default, we assume that ``func``
        outputs exactly one array, with axes corresponding to each broadcast
        dimension.
        Core dimensions are assumed to appear as the last dimensions of each
        output in the provided order.
    exclude_dims : set, optional
        Core dimensions on the inputs to exclude from alignment and
        broadcasting entirely. Any input coordinates along these dimensions
        will be dropped. Each excluded dimension must also appear in
        ``input_core_dims`` for at least one argument. Only dimensions listed
        here are allowed to change size between input and output objects.
    vectorize : bool, optional
        If True, then assume ``func`` only takes arrays defined over core
        dimensions as input and vectorize it automatically with
        :py:func:`numpy.vectorize`. This option exists for convenience, but is
        almost always slower than supplying a pre-vectorized function.
    join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
        Method for joining the indexes of the passed objects along each
        dimension, and the variables of Dataset objects with mismatched
        data variables:
        - 'outer': use the union of object indexes
        - 'inner': use the intersection of object indexes
        - 'left': use indexes from the first object with each dimension
        - 'right': use indexes from the last object with each dimension
        - 'exact': raise `ValueError` instead of aligning when indexes to be
          aligned are not equal
    dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
        Method for joining variables of Dataset objects with mismatched
        data variables.
        - 'outer': take variables from both Dataset objects
        - 'inner': take only overlapped variables
        - 'left': take only variables from the first object
        - 'right': take only variables from the last object
        - 'exact': data variables on all Dataset objects must match exactly
    dataset_fill_value : optional
        Value used in place of missing variables on Dataset inputs when the
        datasets do not share the exact same ``data_vars``. Required if
        ``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
    keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional
        - 'drop' or False: empty attrs on returned xarray object.
        - 'identical': all attrs must be the same on every object.
        - 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value.
        - 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped.
        - 'override' or True: skip comparing and copy attrs from the first object to the result.
    kwargs : dict, optional
        Optional keyword arguments passed directly on to call ``func``.
    dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
        How to handle applying to objects containing lazy data in the form of
        dask arrays:
        - 'forbidden' (default): raise an error if a dask array is encountered.
        - 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
          ``func`` natively supports dask arrays.
        - 'parallelized': automatically parallelize ``func`` if any of the
          inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
          arguments are supported. Only use this option if ``func`` does not natively
          support dask arrays (e.g. converts them to numpy arrays).
    dask_gufunc_kwargs : dict, optional
        Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
        dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
        and ``meta``.
    output_dtypes : list of dtype, optional
        Optional list of output dtypes. Only used if ``dask='parallelized'`` or
        ``vectorize=True``.
    output_sizes : dict, optional
        Optional mapping from dimension names to sizes for outputs. Only used
        if dask='parallelized' and new dimensions (not found on inputs) appear
        on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
        parameter. It will be removed as direct parameter in a future version.
    meta : optional
        Size-0 object representing the type of array wrapped by dask array. Passed on to
        :py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
        ``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
        a future version.
    on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise"
        How to handle missing core dimensions on input variables.
    Returns
    -------
    Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
    numpy.ndarray, the first type on that list to appear on an input.
    Notes
    -----
    This function is designed for the more common case where ``func`` can work on numpy
    arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
    it is possible to use :py:func:`xarray.map_blocks`.
    Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
    Examples
    --------
    Calculate the vector magnitude of two arguments:
    >>> def magnitude(a, b):
    ...     func = lambda x, y: np.sqrt(x**2 + y**2)
    ...     return xr.apply_ufunc(func, a, b)
    ...
    You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
    objects, with automatically preserved dimensions and coordinates, e.g.,
    >>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
    >>> magnitude(array, -array)
    <xarray.DataArray (x: 3)> Size: 24B
    array([1.41421356, 2.82842712, 4.24264069])
    Coordinates:
      * x        (x) float64 24B 0.1 0.2 0.3
    Plain scalars, numpy arrays and a mix of these with xarray objects is also
    supported:
    >>> magnitude(3, 4)
    np.float64(5.0)
    >>> magnitude(3, np.array([0, 4]))
    array([3., 5.])
    >>> magnitude(array, 0)
    <xarray.DataArray (x: 3)> Size: 24B
    array([1., 2., 3.])
    Coordinates:
      * x        (x) float64 24B 0.1 0.2 0.3
    Other examples of how you could use ``apply_ufunc`` to write functions to
    (very nearly) replicate existing xarray functionality:
    Compute the mean (``.mean``) over one dimension:
    >>> def mean(obj, dim):
    ...     # note: apply always moves core dimensions to the end
    ...     return apply_ufunc(
    ...         np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
    ...     )
    ...
    Inner product over a specific dimension (like :py:func:`dot`):
    >>> def _inner(x, y):
    ...     result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
    ...     return result[..., 0, 0]
    ...
    >>> def inner_product(a, b, dim):
    ...     return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
    ...
    Stack objects along a new dimension (like :py:func:`concat`):
    >>> def stack(objects, dim, new_coord):
    ...     # note: this version does not stack coordinates
    ...     func = lambda *x: np.stack(x, axis=-1)
    ...     result = apply_ufunc(
    ...         func,
    ...         *objects,
    ...         output_core_dims=[[dim]],
    ...         join="outer",
    ...         dataset_fill_value=np.nan
    ...     )
    ...     result[dim] = new_coord
    ...     return result
    ...
    If your function is not vectorized but can be applied only to core
    dimensions, you can use ``vectorize=True`` to turn into a vectorized
    function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
    terribly fast. Here we'll use it to calculate the distance between
    empirical samples from two probability distributions, using a scipy
    function that needs to be applied to vectors:
    >>> import scipy.stats
    >>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
    ...     return apply_ufunc(
    ...         scipy.stats.wasserstein_distance,
    ...         first_samples,
    ...         second_samples,
    ...         input_core_dims=[[dim], [dim]],
    ...         vectorize=True,
    ...     )
    ...
    Most of NumPy's builtin functions already broadcast their inputs
    appropriately for use in ``apply_ufunc``. You may find helper functions such as
    :py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
    works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
    See Also
    --------
    numpy.broadcast_arrays
    numba.vectorize
    numba.guvectorize
    dask.array.apply_gufunc
    xarray.map_blocks
    Notes
    -----
    :ref:`dask.automatic-parallelization`
        User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
    :doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc`
        Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc`
    References
    ----------
    .. [1] https://numpy.org/doc/stable/reference/ufuncs.html
    .. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
    """
    from xarray.core.dataarray import DataArray
    from xarray.core.groupby import GroupBy
    from xarray.core.variable import Variable
    if input_core_dims is None:
        input_core_dims = ((),) * (len(args))
    elif len(input_core_dims) != len(args):
        raise ValueError(
            f"input_core_dims must be None or a tuple with the length same to "
            f"the number of arguments. "
            f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, "
            f" but number of args is {len(args)}."
        )
    if kwargs is None:
        kwargs = {}
    signature = _UFuncSignature(input_core_dims, output_core_dims)
    if exclude_dims:
        if not isinstance(exclude_dims, set):
            raise TypeError(
                f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead."
            )
        if not exclude_dims <= signature.all_core_dims:
            raise ValueError(
                f"each dimension in `exclude_dims` must also be a "
                f"core dimension in the function signature. "
                f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension"
            )
    # handle dask_gufunc_kwargs
    if dask == "parallelized":
        if dask_gufunc_kwargs is None:
            dask_gufunc_kwargs = {}
        else:
            dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
        # todo: remove warnings after deprecation cycle
        if meta is not None:
            warnings.warn(
                "``meta`` should be given in the ``dask_gufunc_kwargs`` parameter."
                " It will be removed as direct parameter in a future version.",
                FutureWarning,
                stacklevel=2,
            )
            dask_gufunc_kwargs.setdefault("meta", meta)
        if output_sizes is not None:
            warnings.warn(
                "``output_sizes`` should be given in the ``dask_gufunc_kwargs`` "
                "parameter. It will be removed as direct parameter in a future "
                "version.",
                FutureWarning,
                stacklevel=2,
            )
            dask_gufunc_kwargs.setdefault("output_sizes", output_sizes)
    if kwargs:
        func = functools.partial(func, **kwargs)
    if keep_attrs is None:
        keep_attrs = _get_keep_attrs(default=False)
    if isinstance(keep_attrs, bool):
        keep_attrs = "override" if keep_attrs else "drop"
    variables_vfunc = functools.partial(
        apply_variable_ufunc,
        func,
        signature=signature,
        exclude_dims=exclude_dims,
        keep_attrs=keep_attrs,
        dask=dask,
        vectorize=vectorize,
        output_dtypes=output_dtypes,
        dask_gufunc_kwargs=dask_gufunc_kwargs,
    )
    # feed groupby-apply_ufunc through apply_groupby_func
    if any(isinstance(a, GroupBy) for a in args):
        this_apply = functools.partial(
            apply_ufunc,
            func,
            input_core_dims=input_core_dims,
            output_core_dims=output_core_dims,
            exclude_dims=exclude_dims,
            join=join,
            dataset_join=dataset_join,
            dataset_fill_value=dataset_fill_value,
            keep_attrs=keep_attrs,
            dask=dask,
            vectorize=vectorize,
            output_dtypes=output_dtypes,
            dask_gufunc_kwargs=dask_gufunc_kwargs,
        )
        return apply_groupby_func(this_apply, *args)
    # feed datasets apply_variable_ufunc through apply_dataset_vfunc
    elif any(is_dict_like(a) for a in args):
        return apply_dataset_vfunc(
            variables_vfunc,
            *args,
            signature=signature,
            join=join,
            exclude_dims=exclude_dims,
            dataset_join=dataset_join,
            fill_value=dataset_fill_value,
            keep_attrs=keep_attrs,
            on_missing_core_dim=on_missing_core_dim,
        )
    # feed DataArray apply_variable_ufunc through apply_dataarray_vfunc
    elif any(isinstance(a, DataArray) for a in args):
        return apply_dataarray_vfunc(
            variables_vfunc,
            *args,
            signature=signature,
            join=join,
            exclude_dims=exclude_dims,
            keep_attrs=keep_attrs,
        )
    # feed Variables directly through apply_variable_ufunc
    elif any(isinstance(a, Variable) for a in args):
        return variables_vfunc(*args)
    else:
        # feed anything else through apply_array_ufunc
        return apply_array_ufunc(func, *args, dask=dask)
 
 | 
	apply_ufunc 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	23 
 | 
	xarray/coding/cftime_offsets.py 
 | 
	def cftime_range(
    start=None,
    end=None,
    periods=None,
    freq=None,
    normalize=False,
    name=None,
    closed: NoDefault | SideOptions = no_default,
    inclusive: None | InclusiveOptions = None,
    calendar="standard",
) -> CFTimeIndex:
    """Return a fixed frequency CFTimeIndex.
    Parameters
    ----------
    start : str or cftime.datetime, optional
        Left bound for generating dates.
    end : str or cftime.datetime, optional
        Right bound for generating dates.
    periods : int, optional
        Number of periods to generate.
    freq : str or None, default: "D"
        Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D".
    normalize : bool, default: False
        Normalize start/end dates to midnight before generating date range.
    name : str, default: None
        Name of the resulting index
    closed : {None, "left", "right"}, default: "NO_DEFAULT"
        Make the interval closed with respect to the given frequency to the
        "left", "right", or both sides (None).
        .. deprecated:: 2023.02.0
            Following pandas, the ``closed`` parameter is deprecated in favor
            of the ``inclusive`` parameter, and will be removed in a future
            version of xarray.
    inclusive : {None, "both", "neither", "left", "right"}, default None
        Include boundaries; whether to set each bound as closed or open.
        .. versionadded:: 2023.02.0
    calendar : str, default: "standard"
        Calendar type for the datetimes.
    Returns
    -------
    CFTimeIndex
    Notes
    -----
    This function is an analog of ``pandas.date_range`` for use in generating
    sequences of ``cftime.datetime`` objects.  It supports most of the
    features of ``pandas.date_range`` (e.g. specifying how the index is
    ``closed`` on either side, or whether or not to ``normalize`` the start and
    end bounds); however, there are some notable exceptions:
    - You cannot specify a ``tz`` (time zone) argument.
    - Start or end dates specified as partial-datetime strings must use the
      `ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
    - It supports many, but not all, frequencies supported by
      ``pandas.date_range``.  For example it does not currently support any of
      the business-related or semi-monthly frequencies.
    - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
      these can easily be written in terms of the finest common resolution,
      e.g. '61min'.
    Valid simple frequency strings for use with ``cftime``-calendars include
    any multiples of the following.
    +--------+--------------------------+
    | Alias  | Description              |
    +========+==========================+
    | YE     | Year-end frequency       |
    +--------+--------------------------+
    | YS     | Year-start frequency     |
    +--------+--------------------------+
    | QE     | Quarter-end frequency    |
    +--------+--------------------------+
    | QS     | Quarter-start frequency  |
    +--------+--------------------------+
    | ME     | Month-end frequency      |
    +--------+--------------------------+
    | MS     | Month-start frequency    |
    +--------+--------------------------+
    | D      | Day frequency            |
    +--------+--------------------------+
    | h      | Hour frequency           |
    +--------+--------------------------+
    | min    | Minute frequency         |
    +--------+--------------------------+
    | s      | Second frequency         |
    +--------+--------------------------+
    | ms     | Millisecond frequency    |
    +--------+--------------------------+
    | us     | Microsecond frequency    |
    +--------+--------------------------+
    Any multiples of the following anchored offsets are also supported.
    +------------+--------------------------------------------------------------------+
    | Alias      | Description                                                        |
    +============+====================================================================+
    | Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January      |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February     |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March        |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April        |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May          |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June         |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July         |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August       |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September    |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October      |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November     |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December     |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January     |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February    |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March       |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April       |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May         |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June        |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July        |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August      |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September   |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October     |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November    |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December    |
    +------------+--------------------------------------------------------------------+
    Finally, the following calendar aliases are supported.
    +--------------------------------+---------------------------------------+
    | Alias                          | Date type                             |
    +================================+=======================================+
    | standard, gregorian            | ``cftime.DatetimeGregorian``          |
    +--------------------------------+---------------------------------------+
    | proleptic_gregorian            | ``cftime.DatetimeProlepticGregorian`` |
    +--------------------------------+---------------------------------------+
    | noleap, 365_day                | ``cftime.DatetimeNoLeap``             |
    +--------------------------------+---------------------------------------+
    | all_leap, 366_day              | ``cftime.DatetimeAllLeap``            |
    +--------------------------------+---------------------------------------+
    | 360_day                        | ``cftime.Datetime360Day``             |
    +--------------------------------+---------------------------------------+
    | julian                         | ``cftime.DatetimeJulian``             |
    +--------------------------------+---------------------------------------+
    Examples
    --------
    This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
    objects associated with the specified calendar type, e.g.
    >>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
    CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
                 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
                dtype='object', length=6, calendar='noleap', freq='2MS')
    As in the standard pandas function, three of the ``start``, ``end``,
    ``periods``, or ``freq`` arguments must be specified at a given time, with
    the other set to ``None``.  See the `pandas documentation
    <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html>`_
    for more examples of the behavior of ``date_range`` with each of the
    parameters.
    See Also
    --------
    pandas.date_range
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_cftime_range.txt 
 | 
	def cftime_range(
    start=None,
    end=None,
    periods=None,
    freq=None,
    normalize=False,
    name=None,
    closed: NoDefault | SideOptions = no_default,
    inclusive: None | InclusiveOptions = None,
    calendar="standard",
) -> CFTimeIndex:
    """Return a fixed frequency CFTimeIndex.
    Parameters
    ----------
    start : str or cftime.datetime, optional
        Left bound for generating dates.
    end : str or cftime.datetime, optional
        Right bound for generating dates.
    periods : int, optional
        Number of periods to generate.
    freq : str or None, default: "D"
        Frequency strings can have multiples, e.g. "5h" and negative values, e.g. "-1D".
    normalize : bool, default: False
        Normalize start/end dates to midnight before generating date range.
    name : str, default: None
        Name of the resulting index
    closed : {None, "left", "right"}, default: "NO_DEFAULT"
        Make the interval closed with respect to the given frequency to the
        "left", "right", or both sides (None).
        .. deprecated:: 2023.02.0
            Following pandas, the ``closed`` parameter is deprecated in favor
            of the ``inclusive`` parameter, and will be removed in a future
            version of xarray.
    inclusive : {None, "both", "neither", "left", "right"}, default None
        Include boundaries; whether to set each bound as closed or open.
        .. versionadded:: 2023.02.0
    calendar : str, default: "standard"
        Calendar type for the datetimes.
    Returns
    -------
    CFTimeIndex
    Notes
    -----
    This function is an analog of ``pandas.date_range`` for use in generating
    sequences of ``cftime.datetime`` objects.  It supports most of the
    features of ``pandas.date_range`` (e.g. specifying how the index is
    ``closed`` on either side, or whether or not to ``normalize`` the start and
    end bounds); however, there are some notable exceptions:
    - You cannot specify a ``tz`` (time zone) argument.
    - Start or end dates specified as partial-datetime strings must use the
      `ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
    - It supports many, but not all, frequencies supported by
      ``pandas.date_range``.  For example it does not currently support any of
      the business-related or semi-monthly frequencies.
    - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
      these can easily be written in terms of the finest common resolution,
      e.g. '61min'.
    Valid simple frequency strings for use with ``cftime``-calendars include
    any multiples of the following.
    +--------+--------------------------+
    | Alias  | Description              |
    +========+==========================+
    | YE     | Year-end frequency       |
    +--------+--------------------------+
    | YS     | Year-start frequency     |
    +--------+--------------------------+
    | QE     | Quarter-end frequency    |
    +--------+--------------------------+
    | QS     | Quarter-start frequency  |
    +--------+--------------------------+
    | ME     | Month-end frequency      |
    +--------+--------------------------+
    | MS     | Month-start frequency    |
    +--------+--------------------------+
    | D      | Day frequency            |
    +--------+--------------------------+
    | h      | Hour frequency           |
    +--------+--------------------------+
    | min    | Minute frequency         |
    +--------+--------------------------+
    | s      | Second frequency         |
    +--------+--------------------------+
    | ms     | Millisecond frequency    |
    +--------+--------------------------+
    | us     | Microsecond frequency    |
    +--------+--------------------------+
    Any multiples of the following anchored offsets are also supported.
    +------------+--------------------------------------------------------------------+
    | Alias      | Description                                                        |
    +============+====================================================================+
    | Y(E,S)-JAN | Annual frequency, anchored at the (end, beginning) of January      |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-FEB | Annual frequency, anchored at the (end, beginning) of February     |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-MAR | Annual frequency, anchored at the (end, beginning) of March        |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-APR | Annual frequency, anchored at the (end, beginning) of April        |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-MAY | Annual frequency, anchored at the (end, beginning) of May          |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-JUN | Annual frequency, anchored at the (end, beginning) of June         |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-JUL | Annual frequency, anchored at the (end, beginning) of July         |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-AUG | Annual frequency, anchored at the (end, beginning) of August       |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-SEP | Annual frequency, anchored at the (end, beginning) of September    |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-OCT | Annual frequency, anchored at the (end, beginning) of October      |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-NOV | Annual frequency, anchored at the (end, beginning) of November     |
    +------------+--------------------------------------------------------------------+
    | Y(E,S)-DEC | Annual frequency, anchored at the (end, beginning) of December     |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-JAN | Quarter frequency, anchored at the (end, beginning) of January     |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-FEB | Quarter frequency, anchored at the (end, beginning) of February    |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-MAR | Quarter frequency, anchored at the (end, beginning) of March       |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-APR | Quarter frequency, anchored at the (end, beginning) of April       |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-MAY | Quarter frequency, anchored at the (end, beginning) of May         |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-JUN | Quarter frequency, anchored at the (end, beginning) of June        |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-JUL | Quarter frequency, anchored at the (end, beginning) of July        |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-AUG | Quarter frequency, anchored at the (end, beginning) of August      |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-SEP | Quarter frequency, anchored at the (end, beginning) of September   |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-OCT | Quarter frequency, anchored at the (end, beginning) of October     |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-NOV | Quarter frequency, anchored at the (end, beginning) of November    |
    +------------+--------------------------------------------------------------------+
    | Q(E,S)-DEC | Quarter frequency, anchored at the (end, beginning) of December    |
    +------------+--------------------------------------------------------------------+
    Finally, the following calendar aliases are supported.
    +--------------------------------+---------------------------------------+
    | Alias                          | Date type                             |
    +================================+=======================================+
    | standard, gregorian            | ``cftime.DatetimeGregorian``          |
    +--------------------------------+---------------------------------------+
    | proleptic_gregorian            | ``cftime.DatetimeProlepticGregorian`` |
    +--------------------------------+---------------------------------------+
    | noleap, 365_day                | ``cftime.DatetimeNoLeap``             |
    +--------------------------------+---------------------------------------+
    | all_leap, 366_day              | ``cftime.DatetimeAllLeap``            |
    +--------------------------------+---------------------------------------+
    | 360_day                        | ``cftime.Datetime360Day``             |
    +--------------------------------+---------------------------------------+
    | julian                         | ``cftime.DatetimeJulian``             |
    +--------------------------------+---------------------------------------+
    Examples
    --------
    This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
    objects associated with the specified calendar type, e.g.
    >>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
    CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
                 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
                dtype='object', length=6, calendar='noleap', freq='2MS')
    As in the standard pandas function, three of the ``start``, ``end``,
    ``periods``, or ``freq`` arguments must be specified at a given time, with
    the other set to ``None``.  See the `pandas documentation
    <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html>`_
    for more examples of the behavior of ``date_range`` with each of the
    parameters.
    See Also
    --------
    pandas.date_range
    """
    if freq is None and any(arg is None for arg in [periods, start, end]):
        freq = "D"
    # Adapted from pandas.core.indexes.datetimes._generate_range.
    if count_not_none(start, end, periods, freq) != 3:
        raise ValueError(
            "Of the arguments 'start', 'end', 'periods', and 'freq', three "
            "must be specified at a time."
        )
    if start is not None:
        start = to_cftime_datetime(start, calendar)
        start = _maybe_normalize_date(start, normalize)
    if end is not None:
        end = to_cftime_datetime(end, calendar)
        end = _maybe_normalize_date(end, normalize)
    if freq is None:
        dates = _generate_linear_range(start, end, periods)
    else:
        offset = to_offset(freq)
        dates = np.array(list(_generate_range(start, end, periods, offset)))
    inclusive = _infer_inclusive(closed, inclusive)
    if inclusive == "neither":
        left_closed = False
        right_closed = False
    elif inclusive == "left":
        left_closed = True
        right_closed = False
    elif inclusive == "right":
        left_closed = False
        right_closed = True
    elif inclusive == "both":
        left_closed = True
        right_closed = True
    else:
        raise ValueError(
            f"Argument `inclusive` must be either 'both', 'neither', "
            f"'left', 'right', or None.  Got {inclusive}."
        )
    if not left_closed and len(dates) and start is not None and dates[0] == start:
        dates = dates[1:]
    if not right_closed and len(dates) and end is not None and dates[-1] == end:
        dates = dates[:-1]
    return CFTimeIndex(dates, name=name)
 
 | 
	cftime_range 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	25 
 | 
	xarray/core/combine.py 
 | 
	def combine_by_coords(
    data_objects: Iterable[Dataset | DataArray] = [],
    compat: CompatOptions = "no_conflicts",
    data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
    coords: str = "different",
    fill_value: object = dtypes.NA,
    join: JoinOptions = "outer",
    combine_attrs: CombineAttrsOptions = "no_conflicts",
) -> Dataset | DataArray:
    """
    Attempt to auto-magically combine the given datasets (or data arrays)
    into one by using dimension coordinates.
    This function attempts to combine a group of datasets along any number of
    dimensions into a single entity by inspecting coords and metadata and using
    a combination of concat and merge.
    Will attempt to order the datasets such that the values in their dimension
    coordinates are monotonic along all dimensions. If it cannot determine the
    order in which to concatenate the datasets, it will raise a ValueError.
    Non-coordinate dimensions will be ignored, as will any coordinate
    dimensions which do not vary between each dataset.
    Aligns coordinates, but different variables on datasets can cause it
    to fail under some scenarios. In complex cases, you may need to clean up
    your data and use concat/merge explicitly (also see `combine_nested`).
    Works well if, for example, you have N years of data and M data variables,
    and each combination of a distinct time period and set of data variables is
    saved as its own dataset. Also useful for if you have a simulation which is
    parallelized in multiple dimensions, but has global coordinates saved in
    each file specifying the positions of points within the global domain.
    Parameters
    ----------
    data_objects : Iterable of Datasets or DataArrays
        Data objects to combine.
    compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
        String indicating how to compare variables of the same name for
        potential conflicts:
        - "broadcast_equals": all values must be equal when variables are
          broadcast against each other to ensure common dimensions.
        - "equals": all values and dimensions must be the same.
        - "identical": all values, dimensions and attributes must be the
          same.
        - "no_conflicts": only values which are not null in both datasets
          must be equal. The returned dataset then contains the combination
          of all non-null values.
        - "override": skip comparing and pick variable from first dataset
    data_vars : {"minimal", "different", "all" or list of str}, optional
        These data variables will be concatenated together:
        - "minimal": Only data variables in which the dimension already
          appears are included.
        - "different": Data variables which are not equal (ignoring
          attributes) across all datasets are also concatenated (as well as
          all for which dimension already appears). Beware: this option may
          load the data payload of data variables into memory if they are not
          already loaded.
        - "all": All data variables will be concatenated.
        - list of str: The listed data variables will be concatenated, in
          addition to the "minimal" data variables.
        If objects are DataArrays, `data_vars` must be "all".
    coords : {"minimal", "different", "all"} or list of str, optional
        As per the "data_vars" kwarg, but for coordinate variables.
    fill_value : scalar or dict-like, optional
        Value to use for newly missing values. If a dict-like, maps
        variable names to fill values. Use a data array's name to
        refer to its values. If None, raises a ValueError if
        the passed Datasets do not create a complete hypercube.
    join : {"outer", "inner", "left", "right", "exact"}, optional
        String indicating how to combine differing indexes in objects
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "no_conflicts"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    Returns
    -------
    combined : xarray.Dataset or xarray.DataArray
        Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a
        DataArray will be returned.
    See also
    --------
    concat
    merge
    combine_nested
    Examples
    --------
    Combining two datasets using their common dimension coordinates. Notice
    they are concatenated based on the values in their dimension coordinates,
    not on their position in the list passed to `combine_by_coords`.
    >>> x1 = xr.Dataset(
    ...     {
    ...         "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
    ...         "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
    ...     },
    ...     coords={"y": [0, 1], "x": [10, 20, 30]},
    ... )
    >>> x2 = xr.Dataset(
    ...     {
    ...         "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
    ...         "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
    ...     },
    ...     coords={"y": [2, 3], "x": [10, 20, 30]},
    ... )
    >>> x3 = xr.Dataset(
    ...     {
    ...         "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
    ...         "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
    ...     },
    ...     coords={"y": [2, 3], "x": [40, 50, 60]},
    ... )
    >>> x1
    <xarray.Dataset> Size: 136B
    Dimensions:        (y: 2, x: 3)
    Coordinates:
      * y              (y) int64 16B 0 1
      * x              (x) int64 24B 10 20 30
    Data variables:
        temperature    (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92
        precipitation  (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
    >>> x2
    <xarray.Dataset> Size: 136B
    Dimensions:        (y: 2, x: 3)
    Coordinates:
      * y              (y) int64 16B 2 3
      * x              (x) int64 24B 10 20 30
    Data variables:
        temperature    (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65
        precipitation  (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
    >>> x3
    <xarray.Dataset> Size: 136B
    Dimensions:        (y: 2, x: 3)
    Coordinates:
      * y              (y) int64 16B 2 3
      * x              (x) int64 24B 40 50 60
    Data variables:
        temperature    (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293
        precipitation  (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
    >>> xr.combine_by_coords([x2, x1])
    <xarray.Dataset> Size: 248B
    Dimensions:        (y: 4, x: 3)
    Coordinates:
      * y              (y) int64 32B 0 1 2 3
      * x              (x) int64 24B 10 20 30
    Data variables:
        temperature    (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65
        precipitation  (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805
    >>> xr.combine_by_coords([x3, x1])
    <xarray.Dataset> Size: 464B
    Dimensions:        (y: 4, x: 6)
    Coordinates:
      * y              (y) int64 32B 0 1 2 3
      * x              (x) int64 48B 10 20 30 40 50 60
    Data variables:
        temperature    (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
        precipitation  (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
    >>> xr.combine_by_coords([x3, x1], join="override")
    <xarray.Dataset> Size: 256B
    Dimensions:        (y: 2, x: 6)
    Coordinates:
      * y              (y) int64 16B 0 1
      * x              (x) int64 48B 10 20 30 40 50 60
    Data variables:
        temperature    (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293
        precipitation  (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
    >>> xr.combine_by_coords([x1, x2, x3])
    <xarray.Dataset> Size: 464B
    Dimensions:        (y: 4, x: 6)
    Coordinates:
      * y              (y) int64 32B 0 1 2 3
      * x              (x) int64 48B 10 20 30 40 50 60
    Data variables:
        temperature    (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
        precipitation  (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
    You can also combine DataArray objects, but the behaviour will differ depending on
    whether or not the DataArrays are named. If all DataArrays are named then they will
    be promoted to Datasets before combining, and then the resultant Dataset will be
    returned, e.g.
    >>> named_da1 = xr.DataArray(
    ...     name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"
    ... )
    >>> named_da1
    <xarray.DataArray 'a' (x: 2)> Size: 16B
    array([1., 2.])
    Coordinates:
      * x        (x) int64 16B 0 1
    >>> named_da2 = xr.DataArray(
    ...     name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"
    ... )
    >>> named_da2
    <xarray.DataArray 'a' (x: 2)> Size: 16B
    array([3., 4.])
    Coordinates:
      * x        (x) int64 16B 2 3
    >>> xr.combine_by_coords([named_da1, named_da2])
    <xarray.Dataset> Size: 64B
    Dimensions:  (x: 4)
    Coordinates:
      * x        (x) int64 32B 0 1 2 3
    Data variables:
        a        (x) float64 32B 1.0 2.0 3.0 4.0
    If all the DataArrays are unnamed, a single DataArray will be returned, e.g.
    >>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
    >>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
    >>> xr.combine_by_coords([unnamed_da1, unnamed_da2])
    <xarray.DataArray (x: 4)> Size: 32B
    array([1., 2., 3., 4.])
    Coordinates:
      * x        (x) int64 32B 0 1 2 3
    Finally, if you attempt to combine a mix of unnamed DataArrays with either named
    DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation).
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_combine_by_coords.txt 
 | 
	def combine_by_coords(
    data_objects: Iterable[Dataset | DataArray] = [],
    compat: CompatOptions = "no_conflicts",
    data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
    coords: str = "different",
    fill_value: object = dtypes.NA,
    join: JoinOptions = "outer",
    combine_attrs: CombineAttrsOptions = "no_conflicts",
) -> Dataset | DataArray:
    """
    Attempt to auto-magically combine the given datasets (or data arrays)
    into one by using dimension coordinates.
    This function attempts to combine a group of datasets along any number of
    dimensions into a single entity by inspecting coords and metadata and using
    a combination of concat and merge.
    Will attempt to order the datasets such that the values in their dimension
    coordinates are monotonic along all dimensions. If it cannot determine the
    order in which to concatenate the datasets, it will raise a ValueError.
    Non-coordinate dimensions will be ignored, as will any coordinate
    dimensions which do not vary between each dataset.
    Aligns coordinates, but different variables on datasets can cause it
    to fail under some scenarios. In complex cases, you may need to clean up
    your data and use concat/merge explicitly (also see `combine_nested`).
    Works well if, for example, you have N years of data and M data variables,
    and each combination of a distinct time period and set of data variables is
    saved as its own dataset. Also useful for if you have a simulation which is
    parallelized in multiple dimensions, but has global coordinates saved in
    each file specifying the positions of points within the global domain.
    Parameters
    ----------
    data_objects : Iterable of Datasets or DataArrays
        Data objects to combine.
    compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
        String indicating how to compare variables of the same name for
        potential conflicts:
        - "broadcast_equals": all values must be equal when variables are
          broadcast against each other to ensure common dimensions.
        - "equals": all values and dimensions must be the same.
        - "identical": all values, dimensions and attributes must be the
          same.
        - "no_conflicts": only values which are not null in both datasets
          must be equal. The returned dataset then contains the combination
          of all non-null values.
        - "override": skip comparing and pick variable from first dataset
    data_vars : {"minimal", "different", "all" or list of str}, optional
        These data variables will be concatenated together:
        - "minimal": Only data variables in which the dimension already
          appears are included.
        - "different": Data variables which are not equal (ignoring
          attributes) across all datasets are also concatenated (as well as
          all for which dimension already appears). Beware: this option may
          load the data payload of data variables into memory if they are not
          already loaded.
        - "all": All data variables will be concatenated.
        - list of str: The listed data variables will be concatenated, in
          addition to the "minimal" data variables.
        If objects are DataArrays, `data_vars` must be "all".
    coords : {"minimal", "different", "all"} or list of str, optional
        As per the "data_vars" kwarg, but for coordinate variables.
    fill_value : scalar or dict-like, optional
        Value to use for newly missing values. If a dict-like, maps
        variable names to fill values. Use a data array's name to
        refer to its values. If None, raises a ValueError if
        the passed Datasets do not create a complete hypercube.
    join : {"outer", "inner", "left", "right", "exact"}, optional
        String indicating how to combine differing indexes in objects
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "no_conflicts"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    Returns
    -------
    combined : xarray.Dataset or xarray.DataArray
        Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a
        DataArray will be returned.
    See also
    --------
    concat
    merge
    combine_nested
    Examples
    --------
    Combining two datasets using their common dimension coordinates. Notice
    they are concatenated based on the values in their dimension coordinates,
    not on their position in the list passed to `combine_by_coords`.
    >>> x1 = xr.Dataset(
    ...     {
    ...         "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
    ...         "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
    ...     },
    ...     coords={"y": [0, 1], "x": [10, 20, 30]},
    ... )
    >>> x2 = xr.Dataset(
    ...     {
    ...         "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
    ...         "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
    ...     },
    ...     coords={"y": [2, 3], "x": [10, 20, 30]},
    ... )
    >>> x3 = xr.Dataset(
    ...     {
    ...         "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
    ...         "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
    ...     },
    ...     coords={"y": [2, 3], "x": [40, 50, 60]},
    ... )
    >>> x1
    <xarray.Dataset> Size: 136B
    Dimensions:        (y: 2, x: 3)
    Coordinates:
      * y              (y) int64 16B 0 1
      * x              (x) int64 24B 10 20 30
    Data variables:
        temperature    (y, x) float64 48B 10.98 14.3 12.06 10.9 8.473 12.92
        precipitation  (y, x) float64 48B 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
    >>> x2
    <xarray.Dataset> Size: 136B
    Dimensions:        (y: 2, x: 3)
    Coordinates:
      * y              (y) int64 16B 2 3
      * x              (x) int64 24B 10 20 30
    Data variables:
        temperature    (y, x) float64 48B 11.36 18.51 1.421 1.743 0.4044 16.65
        precipitation  (y, x) float64 48B 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
    >>> x3
    <xarray.Dataset> Size: 136B
    Dimensions:        (y: 2, x: 3)
    Coordinates:
      * y              (y) int64 16B 2 3
      * x              (x) int64 24B 40 50 60
    Data variables:
        temperature    (y, x) float64 48B 2.365 12.8 2.867 18.89 10.44 8.293
        precipitation  (y, x) float64 48B 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
    >>> xr.combine_by_coords([x2, x1])
    <xarray.Dataset> Size: 248B
    Dimensions:        (y: 4, x: 3)
    Coordinates:
      * y              (y) int64 32B 0 1 2 3
      * x              (x) int64 24B 10 20 30
    Data variables:
        temperature    (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65
        precipitation  (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805
    >>> xr.combine_by_coords([x3, x1])
    <xarray.Dataset> Size: 464B
    Dimensions:        (y: 4, x: 6)
    Coordinates:
      * y              (y) int64 32B 0 1 2 3
      * x              (x) int64 48B 10 20 30 40 50 60
    Data variables:
        temperature    (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
        precipitation  (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
    >>> xr.combine_by_coords([x3, x1], join="override")
    <xarray.Dataset> Size: 256B
    Dimensions:        (y: 2, x: 6)
    Coordinates:
      * y              (y) int64 16B 0 1
      * x              (x) int64 48B 10 20 30 40 50 60
    Data variables:
        temperature    (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293
        precipitation  (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
    >>> xr.combine_by_coords([x1, x2, x3])
    <xarray.Dataset> Size: 464B
    Dimensions:        (y: 4, x: 6)
    Coordinates:
      * y              (y) int64 32B 0 1 2 3
      * x              (x) int64 48B 10 20 30 40 50 60
    Data variables:
        temperature    (y, x) float64 192B 10.98 14.3 12.06 ... 18.89 10.44 8.293
        precipitation  (y, x) float64 192B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
    You can also combine DataArray objects, but the behaviour will differ depending on
    whether or not the DataArrays are named. If all DataArrays are named then they will
    be promoted to Datasets before combining, and then the resultant Dataset will be
    returned, e.g.
    >>> named_da1 = xr.DataArray(
    ...     name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"
    ... )
    >>> named_da1
    <xarray.DataArray 'a' (x: 2)> Size: 16B
    array([1., 2.])
    Coordinates:
      * x        (x) int64 16B 0 1
    >>> named_da2 = xr.DataArray(
    ...     name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"
    ... )
    >>> named_da2
    <xarray.DataArray 'a' (x: 2)> Size: 16B
    array([3., 4.])
    Coordinates:
      * x        (x) int64 16B 2 3
    >>> xr.combine_by_coords([named_da1, named_da2])
    <xarray.Dataset> Size: 64B
    Dimensions:  (x: 4)
    Coordinates:
      * x        (x) int64 32B 0 1 2 3
    Data variables:
        a        (x) float64 32B 1.0 2.0 3.0 4.0
    If all the DataArrays are unnamed, a single DataArray will be returned, e.g.
    >>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
    >>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
    >>> xr.combine_by_coords([unnamed_da1, unnamed_da2])
    <xarray.DataArray (x: 4)> Size: 32B
    array([1., 2., 3., 4.])
    Coordinates:
      * x        (x) int64 32B 0 1 2 3
    Finally, if you attempt to combine a mix of unnamed DataArrays with either named
    DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation).
    """
    if not data_objects:
        return Dataset()
    objs_are_unnamed_dataarrays = [
        isinstance(data_object, DataArray) and data_object.name is None
        for data_object in data_objects
    ]
    if any(objs_are_unnamed_dataarrays):
        if all(objs_are_unnamed_dataarrays):
            # Combine into a single larger DataArray
            temp_datasets = [
                unnamed_dataarray._to_temp_dataset()
                for unnamed_dataarray in data_objects
            ]
            combined_temp_dataset = _combine_single_variable_hypercube(
                temp_datasets,
                fill_value=fill_value,
                data_vars=data_vars,
                coords=coords,
                compat=compat,
                join=join,
                combine_attrs=combine_attrs,
            )
            return DataArray()._from_temp_dataset(combined_temp_dataset)
        else:
            # Must be a mix of unnamed dataarrays with either named dataarrays or with datasets
            # Can't combine these as we wouldn't know whether to merge or concatenate the arrays
            raise ValueError(
                "Can't automatically combine unnamed DataArrays with either named DataArrays or Datasets."
            )
    else:
        # Promote any named DataArrays to single-variable Datasets to simplify combining
        data_objects = [
            obj.to_dataset() if isinstance(obj, DataArray) else obj
            for obj in data_objects
        ]
        # Group by data vars
        sorted_datasets = sorted(data_objects, key=vars_as_keys)
        grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
        # Perform the multidimensional combine on each group of data variables
        # before merging back together
        concatenated_grouped_by_data_vars = tuple(
            _combine_single_variable_hypercube(
                tuple(datasets_with_same_vars),
                fill_value=fill_value,
                data_vars=data_vars,
                coords=coords,
                compat=compat,
                join=join,
                combine_attrs=combine_attrs,
            )
            for vars, datasets_with_same_vars in grouped_by_vars
        )
    return merge(
        concatenated_grouped_by_data_vars,
        compat=compat,
        fill_value=fill_value,
        join=join,
        combine_attrs=combine_attrs,
    )
 
 | 
	combine_by_coords 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	26 
 | 
	xarray/core/combine.py 
 | 
	def combine_nested(
    datasets: DATASET_HYPERCUBE,
    concat_dim: str | DataArray | None | Sequence[str | DataArray | pd.Index | None],
    compat: str = "no_conflicts",
    data_vars: str = "all",
    coords: str = "different",
    fill_value: object = dtypes.NA,
    join: JoinOptions = "outer",
    combine_attrs: CombineAttrsOptions = "drop",
) -> Dataset:
    """
    Explicitly combine an N-dimensional grid of datasets into one by using a
    succession of concat and merge operations along each dimension of the grid.
    Does not sort the supplied datasets under any circumstances, so the
    datasets must be passed in the order you wish them to be concatenated. It
    does align coordinates, but different variables on datasets can cause it to
    fail under some scenarios. In complex cases, you may need to clean up your
    data and use concat/merge explicitly.
    To concatenate along multiple dimensions the datasets must be passed as a
    nested list-of-lists, with a depth equal to the length of ``concat_dims``.
    ``combine_nested`` will concatenate along the top-level list first.
    Useful for combining datasets from a set of nested directories, or for
    collecting the output of a simulation parallelized along multiple
    dimensions.
    Parameters
    ----------
    datasets : list or nested list of Dataset
        Dataset objects to combine.
        If concatenation or merging along more than one dimension is desired,
        then datasets must be supplied in a nested list-of-lists.
    concat_dim : str, or list of str, DataArray, Index or None
        Dimensions along which to concatenate variables, as used by
        :py:func:`xarray.concat`.
        Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
        and merge instead along a particular dimension.
        The position of ``None`` in the list specifies the dimension of the
        nested-list input along which to merge.
        Must be the same length as the depth of the list passed to
        ``datasets``.
    compat : {"identical", "equals", "broadcast_equals", \
              "no_conflicts", "override"}, optional
        String indicating how to compare variables of the same name for
        potential merge conflicts:
        - "broadcast_equals": all values must be equal when variables are
          broadcast against each other to ensure common dimensions.
        - "equals": all values and dimensions must be the same.
        - "identical": all values, dimensions and attributes must be the
          same.
        - "no_conflicts": only values which are not null in both datasets
          must be equal. The returned dataset then contains the combination
          of all non-null values.
        - "override": skip comparing and pick variable from first dataset
    data_vars : {"minimal", "different", "all" or list of str}, optional
        Details are in the documentation of concat
    coords : {"minimal", "different", "all" or list of str}, optional
        Details are in the documentation of concat
    fill_value : scalar or dict-like, optional
        Value to use for newly missing values. If a dict-like, maps
        variable names to fill values. Use a data array's name to
        refer to its values.
    join : {"outer", "inner", "left", "right", "exact"}, optional
        String indicating how to combine differing indexes
        (excluding concat_dim) in objects
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "drop"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    Returns
    -------
    combined : xarray.Dataset
    Examples
    --------
    A common task is collecting data from a parallelized simulation in which
    each process wrote out to a separate file. A domain which was decomposed
    into 4 parts, 2 each along both the x and y axes, requires organising the
    datasets into a doubly-nested list, e.g:
    >>> x1y1 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> x1y1
    <xarray.Dataset> Size: 64B
    Dimensions:        (x: 2, y: 2)
    Dimensions without coordinates: x, y
    Data variables:
        temperature    (x, y) float64 32B 1.764 0.4002 0.9787 2.241
        precipitation  (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514
    >>> x1y2 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> x2y1 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> x2y2 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
    >>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
    >>> combined
    <xarray.Dataset> Size: 256B
    Dimensions:        (x: 4, y: 4)
    Dimensions without coordinates: x, y
    Data variables:
        temperature    (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872
        precipitation  (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782
    ``combine_nested`` can also be used to explicitly merge datasets with
    different variables. For example if we have 4 datasets, which are divided
    along two times, and contain two different variables, we can pass ``None``
    to ``concat_dim`` to specify the dimension of the nested list over which
    we wish to use ``merge`` instead of ``concat``:
    >>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
    >>> t1temp
    <xarray.Dataset> Size: 40B
    Dimensions:      (t: 5)
    Dimensions without coordinates: t
    Data variables:
        temperature  (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23
    >>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
    >>> t1precip
    <xarray.Dataset> Size: 40B
    Dimensions:        (t: 5)
    Dimensions without coordinates: t
    Data variables:
        precipitation  (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42
    >>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
    >>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
    >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
    >>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
    >>> combined
    <xarray.Dataset> Size: 160B
    Dimensions:        (t: 10)
    Dimensions without coordinates: t
    Data variables:
        temperature    (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253
        precipitation  (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869
    See also
    --------
    concat
    merge
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_combine_nested.txt 
 | 
	def combine_nested(
    datasets: DATASET_HYPERCUBE,
    concat_dim: str | DataArray | None | Sequence[str | DataArray | pd.Index | None],
    compat: str = "no_conflicts",
    data_vars: str = "all",
    coords: str = "different",
    fill_value: object = dtypes.NA,
    join: JoinOptions = "outer",
    combine_attrs: CombineAttrsOptions = "drop",
) -> Dataset:
    """
    Explicitly combine an N-dimensional grid of datasets into one by using a
    succession of concat and merge operations along each dimension of the grid.
    Does not sort the supplied datasets under any circumstances, so the
    datasets must be passed in the order you wish them to be concatenated. It
    does align coordinates, but different variables on datasets can cause it to
    fail under some scenarios. In complex cases, you may need to clean up your
    data and use concat/merge explicitly.
    To concatenate along multiple dimensions the datasets must be passed as a
    nested list-of-lists, with a depth equal to the length of ``concat_dims``.
    ``combine_nested`` will concatenate along the top-level list first.
    Useful for combining datasets from a set of nested directories, or for
    collecting the output of a simulation parallelized along multiple
    dimensions.
    Parameters
    ----------
    datasets : list or nested list of Dataset
        Dataset objects to combine.
        If concatenation or merging along more than one dimension is desired,
        then datasets must be supplied in a nested list-of-lists.
    concat_dim : str, or list of str, DataArray, Index or None
        Dimensions along which to concatenate variables, as used by
        :py:func:`xarray.concat`.
        Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
        and merge instead along a particular dimension.
        The position of ``None`` in the list specifies the dimension of the
        nested-list input along which to merge.
        Must be the same length as the depth of the list passed to
        ``datasets``.
    compat : {"identical", "equals", "broadcast_equals", \
              "no_conflicts", "override"}, optional
        String indicating how to compare variables of the same name for
        potential merge conflicts:
        - "broadcast_equals": all values must be equal when variables are
          broadcast against each other to ensure common dimensions.
        - "equals": all values and dimensions must be the same.
        - "identical": all values, dimensions and attributes must be the
          same.
        - "no_conflicts": only values which are not null in both datasets
          must be equal. The returned dataset then contains the combination
          of all non-null values.
        - "override": skip comparing and pick variable from first dataset
    data_vars : {"minimal", "different", "all" or list of str}, optional
        Details are in the documentation of concat
    coords : {"minimal", "different", "all" or list of str}, optional
        Details are in the documentation of concat
    fill_value : scalar or dict-like, optional
        Value to use for newly missing values. If a dict-like, maps
        variable names to fill values. Use a data array's name to
        refer to its values.
    join : {"outer", "inner", "left", "right", "exact"}, optional
        String indicating how to combine differing indexes
        (excluding concat_dim) in objects
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "drop"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    Returns
    -------
    combined : xarray.Dataset
    Examples
    --------
    A common task is collecting data from a parallelized simulation in which
    each process wrote out to a separate file. A domain which was decomposed
    into 4 parts, 2 each along both the x and y axes, requires organising the
    datasets into a doubly-nested list, e.g:
    >>> x1y1 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> x1y1
    <xarray.Dataset> Size: 64B
    Dimensions:        (x: 2, y: 2)
    Dimensions without coordinates: x, y
    Data variables:
        temperature    (x, y) float64 32B 1.764 0.4002 0.9787 2.241
        precipitation  (x, y) float64 32B 1.868 -0.9773 0.9501 -0.1514
    >>> x1y2 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> x2y1 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> x2y2 = xr.Dataset(
    ...     {
    ...         "temperature": (("x", "y"), np.random.randn(2, 2)),
    ...         "precipitation": (("x", "y"), np.random.randn(2, 2)),
    ...     }
    ... )
    >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
    >>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
    >>> combined
    <xarray.Dataset> Size: 256B
    Dimensions:        (x: 4, y: 4)
    Dimensions without coordinates: x, y
    Data variables:
        temperature    (x, y) float64 128B 1.764 0.4002 -0.1032 ... 0.04576 -0.1872
        precipitation  (x, y) float64 128B 1.868 -0.9773 0.761 ... 0.1549 0.3782
    ``combine_nested`` can also be used to explicitly merge datasets with
    different variables. For example if we have 4 datasets, which are divided
    along two times, and contain two different variables, we can pass ``None``
    to ``concat_dim`` to specify the dimension of the nested list over which
    we wish to use ``merge`` instead of ``concat``:
    >>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
    >>> t1temp
    <xarray.Dataset> Size: 40B
    Dimensions:      (t: 5)
    Dimensions without coordinates: t
    Data variables:
        temperature  (t) float64 40B -0.8878 -1.981 -0.3479 0.1563 1.23
    >>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
    >>> t1precip
    <xarray.Dataset> Size: 40B
    Dimensions:        (t: 5)
    Dimensions without coordinates: t
    Data variables:
        precipitation  (t) float64 40B 1.202 -0.3873 -0.3023 -1.049 -1.42
    >>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
    >>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
    >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
    >>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
    >>> combined
    <xarray.Dataset> Size: 160B
    Dimensions:        (t: 10)
    Dimensions without coordinates: t
    Data variables:
        temperature    (t) float64 80B -0.8878 -1.981 -0.3479 ... -0.4381 -1.253
        precipitation  (t) float64 80B 1.202 -0.3873 -0.3023 ... -0.8955 0.3869
    See also
    --------
    concat
    merge
    """
    mixed_datasets_and_arrays = any(
        isinstance(obj, Dataset) for obj in iterate_nested(datasets)
    ) and any(
        isinstance(obj, DataArray) and obj.name is None
        for obj in iterate_nested(datasets)
    )
    if mixed_datasets_and_arrays:
        raise ValueError("Can't combine datasets with unnamed arrays.")
    if isinstance(concat_dim, str | DataArray) or concat_dim is None:
        concat_dim = [concat_dim]
    # The IDs argument tells _nested_combine that datasets aren't yet sorted
    return _nested_combine(
        datasets,
        concat_dims=concat_dim,
        compat=compat,
        data_vars=data_vars,
        coords=coords,
        ids=False,
        fill_value=fill_value,
        join=join,
        combine_attrs=combine_attrs,
    )
 
 | 
	combine_nested 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	28 
 | 
	xarray/coding/calendar_ops.py 
 | 
	def convert_calendar(
    obj,
    calendar,
    dim="time",
    align_on=None,
    missing=None,
    use_cftime=None,
):
    """Transform a time-indexed Dataset or DataArray to one that uses another calendar.
    This function only converts the individual timestamps; it does not modify any
    data except in dropping invalid/surplus dates, or inserting values for missing dates.
    If the source and target calendars are both from a standard type, only the
    type of the time array is modified. When converting to a calendar with a
    leap year from to a calendar without a leap year, the 29th of February will
    be removed from the array. In the other direction the 29th of February will
    be missing in the output, unless `missing` is specified, in which case that
    value is inserted. For conversions involving the `360_day` calendar, see Notes.
    This method is safe to use with sub-daily data as it doesn't touch the time
    part of the timestamps.
    Parameters
    ----------
    obj : DataArray or Dataset
      Input DataArray or Dataset with a time coordinate of a valid dtype
      (:py:class:`numpy.datetime64`  or :py:class:`cftime.datetime`).
    calendar : str
      The target calendar name.
    dim : str
      Name of the time coordinate in the input DataArray or Dataset.
    align_on : {None, 'date', 'year', 'random'}
      Must be specified when either the source or target is a `"360_day"`
      calendar; ignored otherwise. See Notes.
    missing : any, optional
      By default, i.e. if the value is None, this method will simply attempt
      to convert the dates in the source calendar to the same dates in the
      target calendar, and drop any of those that are not possible to
      represent.  If a value is provided, a new time coordinate will be
      created in the target calendar with the same frequency as the original
      time coordinate; for any dates that are not present in the source, the
      data will be filled with this value.  Note that using this mode requires
      that the source data have an inferable frequency; for more information
      see :py:func:`xarray.infer_freq`.  For certain frequency, source, and
      target calendar combinations, this could result in many missing values, see notes.
    use_cftime : bool, optional
      Whether to use cftime objects in the output, only used if `calendar` is
      one of {"proleptic_gregorian", "gregorian" or "standard"}.
      If True, the new time axis uses cftime objects.
      If None (default), it uses :py:class:`numpy.datetime64` values if the date
          range permits it, and :py:class:`cftime.datetime` objects if not.
      If False, it uses :py:class:`numpy.datetime64`  or fails.
    Returns
    -------
      Copy of source with the time coordinate converted to the target calendar.
      If `missing` was None (default), invalid dates in the new calendar are
      dropped, but missing dates are not inserted.
      If `missing` was given, the new data is reindexed to have a time axis
      with the same frequency as the source, but in the new calendar; any
      missing datapoints are filled with `missing`.
    Notes
    -----
    Passing a value to `missing` is only usable if the source's time coordinate as an
    inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate
    if the target coordinate, generated from this frequency, has dates equivalent to the
    source. It is usually **not** appropriate to use this mode with:
    - Period-end frequencies: 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS'
    - Sub-monthly frequencies that do not divide a day evenly: 'W', 'nD' where `n != 1`
      or 'mH' where 24 % m != 0).
    If one of the source or target calendars is `"360_day"`, `align_on` must
    be specified and two options are offered.
    "year"
      The dates are translated according to their relative position in the year,
      ignoring their original month and day information, meaning that the
      missing/surplus days are added/removed at regular intervals.
      From a `360_day` to a standard calendar, the output will be missing the
      following dates (day of year in parentheses):
        To a leap year:
          January 31st (31), March 31st (91), June 1st (153), July 31st (213),
          September 31st (275) and November 30th (335).
        To a non-leap year:
          February 6th (36), April 19th (109), July 2nd (183),
          September 12th (255), November 25th (329).
      From a standard calendar to a `"360_day"`, the following dates in the
      source array will be dropped:
        From a leap year:
          January 31st (31), April 1st (92), June 1st (153), August 1st (214),
          September 31st (275), December 1st (336)
        From a non-leap year:
          February 6th (37), April 20th (110), July 2nd (183),
          September 13th (256), November 25th (329)
      This option is best used on daily and subdaily data.
    "date"
      The month/day information is conserved and invalid dates are dropped
      from the output. This means that when converting from a `"360_day"` to a
      standard calendar, all 31sts (Jan, March, May, July, August, October and
      December) will be missing as there is no equivalent dates in the
      `"360_day"` calendar and the 29th (on non-leap years) and 30th of February
      will be dropped as there are no equivalent dates in a standard calendar.
      This option is best used with data on a frequency coarser than daily.
    "random"
      Similar to "year", each day of year of the source is mapped to another day of year
      of the target. However, instead of having always the same missing days according
      the source and target years, here 5 days are chosen randomly, one for each fifth
      of the year. However, February 29th is always missing when converting to a leap year,
      or its value is dropped when converting from a leap year. This is similar to the method
      used in the LOCA dataset (see Pierce, Cayan, and Thrasher (2014). doi:10.1175/JHM-D-14-0082.1).
      This option is best used on daily data.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_convert_calendar.txt 
 | 
	def convert_calendar(
    obj,
    calendar,
    dim="time",
    align_on=None,
    missing=None,
    use_cftime=None,
):
    """Transform a time-indexed Dataset or DataArray to one that uses another calendar.
    This function only converts the individual timestamps; it does not modify any
    data except in dropping invalid/surplus dates, or inserting values for missing dates.
    If the source and target calendars are both from a standard type, only the
    type of the time array is modified. When converting to a calendar with a
    leap year from to a calendar without a leap year, the 29th of February will
    be removed from the array. In the other direction the 29th of February will
    be missing in the output, unless `missing` is specified, in which case that
    value is inserted. For conversions involving the `360_day` calendar, see Notes.
    This method is safe to use with sub-daily data as it doesn't touch the time
    part of the timestamps.
    Parameters
    ----------
    obj : DataArray or Dataset
      Input DataArray or Dataset with a time coordinate of a valid dtype
      (:py:class:`numpy.datetime64`  or :py:class:`cftime.datetime`).
    calendar : str
      The target calendar name.
    dim : str
      Name of the time coordinate in the input DataArray or Dataset.
    align_on : {None, 'date', 'year', 'random'}
      Must be specified when either the source or target is a `"360_day"`
      calendar; ignored otherwise. See Notes.
    missing : any, optional
      By default, i.e. if the value is None, this method will simply attempt
      to convert the dates in the source calendar to the same dates in the
      target calendar, and drop any of those that are not possible to
      represent.  If a value is provided, a new time coordinate will be
      created in the target calendar with the same frequency as the original
      time coordinate; for any dates that are not present in the source, the
      data will be filled with this value.  Note that using this mode requires
      that the source data have an inferable frequency; for more information
      see :py:func:`xarray.infer_freq`.  For certain frequency, source, and
      target calendar combinations, this could result in many missing values, see notes.
    use_cftime : bool, optional
      Whether to use cftime objects in the output, only used if `calendar` is
      one of {"proleptic_gregorian", "gregorian" or "standard"}.
      If True, the new time axis uses cftime objects.
      If None (default), it uses :py:class:`numpy.datetime64` values if the date
          range permits it, and :py:class:`cftime.datetime` objects if not.
      If False, it uses :py:class:`numpy.datetime64`  or fails.
    Returns
    -------
      Copy of source with the time coordinate converted to the target calendar.
      If `missing` was None (default), invalid dates in the new calendar are
      dropped, but missing dates are not inserted.
      If `missing` was given, the new data is reindexed to have a time axis
      with the same frequency as the source, but in the new calendar; any
      missing datapoints are filled with `missing`.
    Notes
    -----
    Passing a value to `missing` is only usable if the source's time coordinate as an
    inferable frequencies (see :py:func:`~xarray.infer_freq`) and is only appropriate
    if the target coordinate, generated from this frequency, has dates equivalent to the
    source. It is usually **not** appropriate to use this mode with:
    - Period-end frequencies: 'A', 'Y', 'Q' or 'M', in opposition to 'AS' 'YS', 'QS' and 'MS'
    - Sub-monthly frequencies that do not divide a day evenly: 'W', 'nD' where `n != 1`
      or 'mH' where 24 % m != 0).
    If one of the source or target calendars is `"360_day"`, `align_on` must
    be specified and two options are offered.
    "year"
      The dates are translated according to their relative position in the year,
      ignoring their original month and day information, meaning that the
      missing/surplus days are added/removed at regular intervals.
      From a `360_day` to a standard calendar, the output will be missing the
      following dates (day of year in parentheses):
        To a leap year:
          January 31st (31), March 31st (91), June 1st (153), July 31st (213),
          September 31st (275) and November 30th (335).
        To a non-leap year:
          February 6th (36), April 19th (109), July 2nd (183),
          September 12th (255), November 25th (329).
      From a standard calendar to a `"360_day"`, the following dates in the
      source array will be dropped:
        From a leap year:
          January 31st (31), April 1st (92), June 1st (153), August 1st (214),
          September 31st (275), December 1st (336)
        From a non-leap year:
          February 6th (37), April 20th (110), July 2nd (183),
          September 13th (256), November 25th (329)
      This option is best used on daily and subdaily data.
    "date"
      The month/day information is conserved and invalid dates are dropped
      from the output. This means that when converting from a `"360_day"` to a
      standard calendar, all 31sts (Jan, March, May, July, August, October and
      December) will be missing as there is no equivalent dates in the
      `"360_day"` calendar and the 29th (on non-leap years) and 30th of February
      will be dropped as there are no equivalent dates in a standard calendar.
      This option is best used with data on a frequency coarser than daily.
    "random"
      Similar to "year", each day of year of the source is mapped to another day of year
      of the target. However, instead of having always the same missing days according
      the source and target years, here 5 days are chosen randomly, one for each fifth
      of the year. However, February 29th is always missing when converting to a leap year,
      or its value is dropped when converting from a leap year. This is similar to the method
      used in the LOCA dataset (see Pierce, Cayan, and Thrasher (2014). doi:10.1175/JHM-D-14-0082.1).
      This option is best used on daily data.
    """
    from xarray.core.dataarray import DataArray
    time = obj[dim]
    if not _contains_datetime_like_objects(time.variable):
        raise ValueError(f"Coordinate {dim} must contain datetime objects.")
    use_cftime = _should_cftime_be_used(time, calendar, use_cftime)
    source_calendar = time.dt.calendar
    # Do nothing if request calendar is the same as the source
    # AND source is np XOR use_cftime
    if source_calendar == calendar and is_np_datetime_like(time.dtype) ^ use_cftime:
        return obj
    if (time.dt.year == 0).any() and calendar in _CALENDARS_WITHOUT_YEAR_ZERO:
        raise ValueError(
            f"Source time coordinate contains dates with year 0, which is not supported by target calendar {calendar}."
        )
    if (source_calendar == "360_day" or calendar == "360_day") and align_on is None:
        raise ValueError(
            "Argument `align_on` must be specified with either 'date' or "
            "'year' when converting to or from a '360_day' calendar."
        )
    if source_calendar != "360_day" and calendar != "360_day":
        align_on = "date"
    out = obj.copy()
    if align_on in ["year", "random"]:
        # Special case for conversion involving 360_day calendar
        if align_on == "year":
            # Instead of translating dates directly, this tries to keep the position within a year similar.
            new_doy = _interpolate_day_of_year(time, target_calendar=calendar)
        elif align_on == "random":
            # The 5 days to remove are randomly chosen, one for each of the five 72-days periods of the year.
            new_doy = time.groupby(f"{dim}.year").map(
                _random_day_of_year, target_calendar=calendar, use_cftime=use_cftime
            )
        # Convert the source datetimes, but override the day of year with our new day of years.
        out[dim] = DataArray(
            [
                _convert_to_new_calendar_with_new_day_of_year(
                    date, newdoy, calendar, use_cftime
                )
                for date, newdoy in zip(time.variable._data.array, new_doy, strict=True)
            ],
            dims=(dim,),
            name=dim,
        )
        # Remove duplicate timestamps, happens when reducing the number of days
        out = out.isel({dim: np.unique(out[dim], return_index=True)[1]})
    elif align_on == "date":
        new_times = convert_times(
            time.data,
            get_date_type(calendar, use_cftime=use_cftime),
            raise_on_invalid=False,
        )
        out[dim] = new_times
        # Remove NaN that where put on invalid dates in target calendar
        out = out.where(out[dim].notnull(), drop=True)
        if use_cftime:
            # Reassign times to ensure time index of output is a CFTimeIndex
            # (previously it was an Index due to the presence of NaN values).
            # Note this is not needed in the case that the output time index is
            # a DatetimeIndex, since DatetimeIndexes can handle NaN values.
            out[dim] = CFTimeIndex(out[dim].data)
    if missing is not None:
        time_target = date_range_like(time, calendar=calendar, use_cftime=use_cftime)
        out = out.reindex({dim: time_target}, fill_value=missing)
    # Copy attrs but remove `calendar` if still present.
    out[dim].attrs.update(time.attrs)
    out[dim].attrs.pop("calendar", None)
    return out
 
 | 
	convert_calendar 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	33 
 | 
	xarray/core/computation.py 
 | 
	def cross(
    a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable
) -> DataArray | Variable:
    """
    Compute the cross product of two (arrays of) vectors.
    The cross product of `a` and `b` in :math:`R^3` is a vector
    perpendicular to both `a` and `b`. The vectors in `a` and `b` are
    defined by the values along the dimension `dim` and can have sizes
    1, 2 or 3. Where the size of either `a` or `b` is
    1 or 2, the remaining components of the input vector is assumed to
    be zero and the cross product calculated accordingly. In cases where
    both input vectors have dimension 2, the z-component of the cross
    product is returned.
    Parameters
    ----------
    a, b : DataArray or Variable
        Components of the first and second vector(s).
    dim : hashable
        The dimension along which the cross product will be computed.
        Must be available in both vectors.
    Examples
    --------
    Vector cross-product with 3 dimensions:
    >>> a = xr.DataArray([1, 2, 3])
    >>> b = xr.DataArray([4, 5, 6])
    >>> xr.cross(a, b, dim="dim_0")
    <xarray.DataArray (dim_0: 3)> Size: 24B
    array([-3,  6, -3])
    Dimensions without coordinates: dim_0
    Vector cross-product with 3 dimensions but zeros at the last axis
    yields the same results as with 2 dimensions:
    >>> a = xr.DataArray([1, 2, 0])
    >>> b = xr.DataArray([4, 5, 0])
    >>> xr.cross(a, b, dim="dim_0")
    <xarray.DataArray (dim_0: 3)> Size: 24B
    array([ 0,  0, -3])
    Dimensions without coordinates: dim_0
    Multiple vector cross-products. Note that the direction of the
    cross product vector is defined by the right-hand rule:
    >>> a = xr.DataArray(
    ...     [[1, 2, 3], [4, 5, 6]],
    ...     dims=("time", "cartesian"),
    ...     coords=dict(
    ...         time=(["time"], [0, 1]),
    ...         cartesian=(["cartesian"], ["x", "y", "z"]),
    ...     ),
    ... )
    >>> b = xr.DataArray(
    ...     [[4, 5, 6], [1, 2, 3]],
    ...     dims=("time", "cartesian"),
    ...     coords=dict(
    ...         time=(["time"], [0, 1]),
    ...         cartesian=(["cartesian"], ["x", "y", "z"]),
    ...     ),
    ... )
    >>> xr.cross(a, b, dim="cartesian")
    <xarray.DataArray (time: 2, cartesian: 3)> Size: 48B
    array([[-3,  6, -3],
           [ 3, -6,  3]])
    Coordinates:
      * time       (time) int64 16B 0 1
      * cartesian  (cartesian) <U1 12B 'x' 'y' 'z'
    Cross can be called on Datasets by converting to DataArrays and later
    back to a Dataset:
    >>> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3])))
    >>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6])))
    >>> c = xr.cross(
    ...     ds_a.to_dataarray("cartesian"),
    ...     ds_b.to_dataarray("cartesian"),
    ...     dim="cartesian",
    ... )
    >>> c.to_dataset(dim="cartesian")
    <xarray.Dataset> Size: 24B
    Dimensions:  (dim_0: 1)
    Dimensions without coordinates: dim_0
    Data variables:
        x        (dim_0) int64 8B -3
        y        (dim_0) int64 8B 6
        z        (dim_0) int64 8B -3
    See Also
    --------
    numpy.cross : Corresponding numpy function
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_cross.txt 
 | 
	def cross(
    a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable
) -> DataArray | Variable:
    """
    Compute the cross product of two (arrays of) vectors.
    The cross product of `a` and `b` in :math:`R^3` is a vector
    perpendicular to both `a` and `b`. The vectors in `a` and `b` are
    defined by the values along the dimension `dim` and can have sizes
    1, 2 or 3. Where the size of either `a` or `b` is
    1 or 2, the remaining components of the input vector is assumed to
    be zero and the cross product calculated accordingly. In cases where
    both input vectors have dimension 2, the z-component of the cross
    product is returned.
    Parameters
    ----------
    a, b : DataArray or Variable
        Components of the first and second vector(s).
    dim : hashable
        The dimension along which the cross product will be computed.
        Must be available in both vectors.
    Examples
    --------
    Vector cross-product with 3 dimensions:
    >>> a = xr.DataArray([1, 2, 3])
    >>> b = xr.DataArray([4, 5, 6])
    >>> xr.cross(a, b, dim="dim_0")
    <xarray.DataArray (dim_0: 3)> Size: 24B
    array([-3,  6, -3])
    Dimensions without coordinates: dim_0
    Vector cross-product with 3 dimensions but zeros at the last axis
    yields the same results as with 2 dimensions:
    >>> a = xr.DataArray([1, 2, 0])
    >>> b = xr.DataArray([4, 5, 0])
    >>> xr.cross(a, b, dim="dim_0")
    <xarray.DataArray (dim_0: 3)> Size: 24B
    array([ 0,  0, -3])
    Dimensions without coordinates: dim_0
    Multiple vector cross-products. Note that the direction of the
    cross product vector is defined by the right-hand rule:
    >>> a = xr.DataArray(
    ...     [[1, 2, 3], [4, 5, 6]],
    ...     dims=("time", "cartesian"),
    ...     coords=dict(
    ...         time=(["time"], [0, 1]),
    ...         cartesian=(["cartesian"], ["x", "y", "z"]),
    ...     ),
    ... )
    >>> b = xr.DataArray(
    ...     [[4, 5, 6], [1, 2, 3]],
    ...     dims=("time", "cartesian"),
    ...     coords=dict(
    ...         time=(["time"], [0, 1]),
    ...         cartesian=(["cartesian"], ["x", "y", "z"]),
    ...     ),
    ... )
    >>> xr.cross(a, b, dim="cartesian")
    <xarray.DataArray (time: 2, cartesian: 3)> Size: 48B
    array([[-3,  6, -3],
           [ 3, -6,  3]])
    Coordinates:
      * time       (time) int64 16B 0 1
      * cartesian  (cartesian) <U1 12B 'x' 'y' 'z'
    Cross can be called on Datasets by converting to DataArrays and later
    back to a Dataset:
    >>> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3])))
    >>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6])))
    >>> c = xr.cross(
    ...     ds_a.to_dataarray("cartesian"),
    ...     ds_b.to_dataarray("cartesian"),
    ...     dim="cartesian",
    ... )
    >>> c.to_dataset(dim="cartesian")
    <xarray.Dataset> Size: 24B
    Dimensions:  (dim_0: 1)
    Dimensions without coordinates: dim_0
    Data variables:
        x        (dim_0) int64 8B -3
        y        (dim_0) int64 8B 6
        z        (dim_0) int64 8B -3
    See Also
    --------
    numpy.cross : Corresponding numpy function
    """
    if dim not in a.dims:
        raise ValueError(f"Dimension {dim!r} not on a")
    elif dim not in b.dims:
        raise ValueError(f"Dimension {dim!r} not on b")
    if not 1 <= a.sizes[dim] <= 3:
        raise ValueError(
            f"The size of {dim!r} on a must be 1, 2, or 3 to be "
            f"compatible with a cross product but is {a.sizes[dim]}"
        )
    elif not 1 <= b.sizes[dim] <= 3:
        raise ValueError(
            f"The size of {dim!r} on b must be 1, 2, or 3 to be "
            f"compatible with a cross product but is {b.sizes[dim]}"
        )
    all_dims = list(dict.fromkeys(a.dims + b.dims))
    if a.sizes[dim] != b.sizes[dim]:
        # Arrays have different sizes. Append zeros where the smaller
        # array is missing a value, zeros will not affect np.cross:
        if (
            not isinstance(a, Variable)  # Only used to make mypy happy.
            and dim in getattr(a, "coords", {})
            and not isinstance(b, Variable)  # Only used to make mypy happy.
            and dim in getattr(b, "coords", {})
        ):
            # If the arrays have coords we know which indexes to fill
            # with zeros:
            a, b = align(
                a,
                b,
                fill_value=0,
                join="outer",
                exclude=set(all_dims) - {dim},
            )
        elif min(a.sizes[dim], b.sizes[dim]) == 2:
            # If the array doesn't have coords we can only infer
            # that it has composite values if the size is at least 2.
            # Once padded, rechunk the padded array because apply_ufunc
            # requires core dimensions not to be chunked:
            if a.sizes[dim] < b.sizes[dim]:
                a = a.pad({dim: (0, 1)}, constant_values=0)
                # TODO: Should pad or apply_ufunc handle correct chunking?
                a = a.chunk({dim: -1}) if is_chunked_array(a.data) else a
            else:
                b = b.pad({dim: (0, 1)}, constant_values=0)
                # TODO: Should pad or apply_ufunc handle correct chunking?
                b = b.chunk({dim: -1}) if is_chunked_array(b.data) else b
        else:
            raise ValueError(
                f"{dim!r} on {'a' if a.sizes[dim] == 1 else 'b'} is incompatible:"
                " dimensions without coordinates must have have a length of 2 or 3"
            )
    c = apply_ufunc(
        np.cross,
        a,
        b,
        input_core_dims=[[dim], [dim]],
        output_core_dims=[[dim] if a.sizes[dim] == 3 else []],
        dask="parallelized",
        output_dtypes=[np.result_type(a, b)],
    )
    c = c.transpose(*all_dims, missing_dims="ignore")
    return c
 
 | 
	cross 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	37 
 | 
	xarray/conventions.py 
 | 
	def decode_cf_variable(
    name: Hashable,
    var: Variable,
    concat_characters: bool = True,
    mask_and_scale: bool = True,
    decode_times: bool = True,
    decode_endianness: bool = True,
    stack_char_dim: bool = True,
    use_cftime: bool | None = None,
    decode_timedelta: bool | None = None,
) -> Variable:
    """
    Decodes a variable which may hold CF encoded information.
    This includes variables that have been masked and scaled, which
    hold CF style time variables (this is almost always the case if
    the dataset has been serialized) and which have strings encoded
    as character arrays.
    Parameters
    ----------
    name : str
        Name of the variable. Used for better error messages.
    var : Variable
        A variable holding potentially CF encoded information.
    concat_characters : bool
        Should character arrays be concatenated to strings, for
        example: ["h", "e", "l", "l", "o"] -> "hello"
    mask_and_scale : bool
        Lazily scale (using scale_factor and add_offset) and mask
        (using _FillValue). If the _Unsigned attribute is present
        treat integer arrays as unsigned.
    decode_times : bool
        Decode cf times ("hours since 2000-01-01") to np.datetime64.
    decode_endianness : bool
        Decode arrays from non-native to native endianness.
    stack_char_dim : bool
        Whether to stack characters into bytes along the last dimension of this
        array. Passed as an argument because we need to look at the full
        dataset to figure out if this is appropriate.
    use_cftime : bool, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error.
    Returns
    -------
    out : Variable
        A variable holding the decoded equivalent of var.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_decode_cf_variable.txt 
 | 
	def decode_cf_variable(
    name: Hashable,
    var: Variable,
    concat_characters: bool = True,
    mask_and_scale: bool = True,
    decode_times: bool = True,
    decode_endianness: bool = True,
    stack_char_dim: bool = True,
    use_cftime: bool | None = None,
    decode_timedelta: bool | None = None,
) -> Variable:
    """
    Decodes a variable which may hold CF encoded information.
    This includes variables that have been masked and scaled, which
    hold CF style time variables (this is almost always the case if
    the dataset has been serialized) and which have strings encoded
    as character arrays.
    Parameters
    ----------
    name : str
        Name of the variable. Used for better error messages.
    var : Variable
        A variable holding potentially CF encoded information.
    concat_characters : bool
        Should character arrays be concatenated to strings, for
        example: ["h", "e", "l", "l", "o"] -> "hello"
    mask_and_scale : bool
        Lazily scale (using scale_factor and add_offset) and mask
        (using _FillValue). If the _Unsigned attribute is present
        treat integer arrays as unsigned.
    decode_times : bool
        Decode cf times ("hours since 2000-01-01") to np.datetime64.
    decode_endianness : bool
        Decode arrays from non-native to native endianness.
    stack_char_dim : bool
        Whether to stack characters into bytes along the last dimension of this
        array. Passed as an argument because we need to look at the full
        dataset to figure out if this is appropriate.
    use_cftime : bool, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error.
    Returns
    -------
    out : Variable
        A variable holding the decoded equivalent of var.
    """
    # Ensure datetime-like Variables are passed through unmodified (GH 6453)
    if _contains_datetime_like_objects(var):
        return var
    original_dtype = var.dtype
    if decode_timedelta is None:
        decode_timedelta = decode_times
    if concat_characters:
        if stack_char_dim:
            var = strings.CharacterArrayCoder().decode(var, name=name)
        var = strings.EncodedStringCoder().decode(var)
    if original_dtype.kind == "O":
        var = variables.ObjectVLenStringCoder().decode(var)
        original_dtype = var.dtype
    if mask_and_scale:
        for coder in [
            variables.CFMaskCoder(),
            variables.CFScaleOffsetCoder(),
        ]:
            var = coder.decode(var, name=name)
    if decode_timedelta:
        var = times.CFTimedeltaCoder().decode(var, name=name)
    if decode_times:
        var = times.CFDatetimeCoder(use_cftime=use_cftime).decode(var, name=name)
    if decode_endianness and not var.dtype.isnative:
        var = variables.EndianCoder().decode(var)
        original_dtype = var.dtype
    var = variables.BooleanCoder().decode(var)
    dimensions, data, attributes, encoding = variables.unpack_for_decoding(var)
    encoding.setdefault("dtype", original_dtype)
    if not is_duck_dask_array(data):
        data = indexing.LazilyIndexedArray(data)
    return Variable(dimensions, data, attributes, encoding=encoding, fastpath=True)
 
 | 
	decode_cf_variable 
 | 
	Self-Contained 
 | 
					
	xarray 
 | 
	38 
 | 
	xarray/core/computation.py 
 | 
	def dot(
    *arrays,
    dim: Dims = None,
    **kwargs: Any,
):
    """Generalized dot product for xarray objects. Like ``np.einsum``, but
    provides a simpler interface based on array dimension names.
    Parameters
    ----------
    *arrays : DataArray or Variable
        Arrays to compute.
    dim : str, iterable of hashable, "..." or None, optional
        Which dimensions to sum over. Ellipsis ('...') sums over all dimensions.
        If not specified, then all the common dimensions are summed over.
    **kwargs : dict
        Additional keyword arguments passed to ``numpy.einsum`` or
        ``dask.array.einsum``
    Returns
    -------
    DataArray
    See Also
    --------
    numpy.einsum
    dask.array.einsum
    opt_einsum.contract
    Notes
    -----
    We recommend installing the optional ``opt_einsum`` package, or alternatively passing ``optimize=True``,
    which is passed through to ``np.einsum``, and works for most array backends.
    Examples
    --------
    >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"])
    >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"])
    >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"])
    >>> da_a
    <xarray.DataArray (a: 3, b: 2)> Size: 48B
    array([[0, 1],
           [2, 3],
           [4, 5]])
    Dimensions without coordinates: a, b
    >>> da_b
    <xarray.DataArray (a: 3, b: 2, c: 2)> Size: 96B
    array([[[ 0,  1],
            [ 2,  3]],
    <BLANKLINE>
           [[ 4,  5],
            [ 6,  7]],
    <BLANKLINE>
           [[ 8,  9],
            [10, 11]]])
    Dimensions without coordinates: a, b, c
    >>> da_c
    <xarray.DataArray (c: 2, d: 3)> Size: 48B
    array([[0, 1, 2],
           [3, 4, 5]])
    Dimensions without coordinates: c, d
    >>> xr.dot(da_a, da_b, dim=["a", "b"])
    <xarray.DataArray (c: 2)> Size: 16B
    array([110, 125])
    Dimensions without coordinates: c
    >>> xr.dot(da_a, da_b, dim=["a"])
    <xarray.DataArray (b: 2, c: 2)> Size: 32B
    array([[40, 46],
           [70, 79]])
    Dimensions without coordinates: b, c
    >>> xr.dot(da_a, da_b, da_c, dim=["b", "c"])
    <xarray.DataArray (a: 3, d: 3)> Size: 72B
    array([[  9,  14,  19],
           [ 93, 150, 207],
           [273, 446, 619]])
    Dimensions without coordinates: a, d
    >>> xr.dot(da_a, da_b)
    <xarray.DataArray (c: 2)> Size: 16B
    array([110, 125])
    Dimensions without coordinates: c
    >>> xr.dot(da_a, da_b, dim=...)
    <xarray.DataArray ()> Size: 8B
    array(235)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_dot.txt 
 | 
	def dot(
    *arrays,
    dim: Dims = None,
    **kwargs: Any,
):
    """Generalized dot product for xarray objects. Like ``np.einsum``, but
    provides a simpler interface based on array dimension names.
    Parameters
    ----------
    *arrays : DataArray or Variable
        Arrays to compute.
    dim : str, iterable of hashable, "..." or None, optional
        Which dimensions to sum over. Ellipsis ('...') sums over all dimensions.
        If not specified, then all the common dimensions are summed over.
    **kwargs : dict
        Additional keyword arguments passed to ``numpy.einsum`` or
        ``dask.array.einsum``
    Returns
    -------
    DataArray
    See Also
    --------
    numpy.einsum
    dask.array.einsum
    opt_einsum.contract
    Notes
    -----
    We recommend installing the optional ``opt_einsum`` package, or alternatively passing ``optimize=True``,
    which is passed through to ``np.einsum``, and works for most array backends.
    Examples
    --------
    >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"])
    >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"])
    >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"])
    >>> da_a
    <xarray.DataArray (a: 3, b: 2)> Size: 48B
    array([[0, 1],
           [2, 3],
           [4, 5]])
    Dimensions without coordinates: a, b
    >>> da_b
    <xarray.DataArray (a: 3, b: 2, c: 2)> Size: 96B
    array([[[ 0,  1],
            [ 2,  3]],
    <BLANKLINE>
           [[ 4,  5],
            [ 6,  7]],
    <BLANKLINE>
           [[ 8,  9],
            [10, 11]]])
    Dimensions without coordinates: a, b, c
    >>> da_c
    <xarray.DataArray (c: 2, d: 3)> Size: 48B
    array([[0, 1, 2],
           [3, 4, 5]])
    Dimensions without coordinates: c, d
    >>> xr.dot(da_a, da_b, dim=["a", "b"])
    <xarray.DataArray (c: 2)> Size: 16B
    array([110, 125])
    Dimensions without coordinates: c
    >>> xr.dot(da_a, da_b, dim=["a"])
    <xarray.DataArray (b: 2, c: 2)> Size: 32B
    array([[40, 46],
           [70, 79]])
    Dimensions without coordinates: b, c
    >>> xr.dot(da_a, da_b, da_c, dim=["b", "c"])
    <xarray.DataArray (a: 3, d: 3)> Size: 72B
    array([[  9,  14,  19],
           [ 93, 150, 207],
           [273, 446, 619]])
    Dimensions without coordinates: a, d
    >>> xr.dot(da_a, da_b)
    <xarray.DataArray (c: 2)> Size: 16B
    array([110, 125])
    Dimensions without coordinates: c
    >>> xr.dot(da_a, da_b, dim=...)
    <xarray.DataArray ()> Size: 8B
    array(235)
    """
    from xarray.core.dataarray import DataArray
    from xarray.core.variable import Variable
    if any(not isinstance(arr, Variable | DataArray) for arr in arrays):
        raise TypeError(
            "Only xr.DataArray and xr.Variable are supported."
            f"Given {[type(arr) for arr in arrays]}."
        )
    if len(arrays) == 0:
        raise TypeError("At least one array should be given.")
    common_dims: set[Hashable] = set.intersection(*(set(arr.dims) for arr in arrays))
    all_dims = []
    for arr in arrays:
        all_dims += [d for d in arr.dims if d not in all_dims]
    einsum_axes = "abcdefghijklmnopqrstuvwxyz"
    dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
    if dim is None:
        # find dimensions that occur more than once
        dim_counts: Counter = Counter()
        for arr in arrays:
            dim_counts.update(arr.dims)
        dim = tuple(d for d, c in dim_counts.items() if c > 1)
    else:
        dim = parse_dims(dim, all_dims=tuple(all_dims))
    dot_dims: set[Hashable] = set(dim)
    # dimensions to be parallelized
    broadcast_dims = common_dims - dot_dims
    input_core_dims = [
        [d for d in arr.dims if d not in broadcast_dims] for arr in arrays
    ]
    output_core_dims = [
        [d for d in all_dims if d not in dot_dims and d not in broadcast_dims]
    ]
    # construct einsum subscripts, such as '...abc,...ab->...c'
    # Note: input_core_dims are always moved to the last position
    subscripts_list = [
        "..." + "".join(dim_map[d] for d in ds) for ds in input_core_dims
    ]
    subscripts = ",".join(subscripts_list)
    subscripts += "->..." + "".join(dim_map[d] for d in output_core_dims[0])
    join = OPTIONS["arithmetic_join"]
    # using "inner" emulates `(a * b).sum()` for all joins (except "exact")
    if join != "exact":
        join = "inner"
    # subscripts should be passed to np.einsum as arg, not as kwargs. We need
    # to construct a partial function for apply_ufunc to work.
    func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
    result = apply_ufunc(
        func,
        *arrays,
        input_core_dims=input_core_dims,
        output_core_dims=output_core_dims,
        join=join,
        dask="allowed",
    )
    return result.transpose(*all_dims, missing_dims="ignore")
 
 | 
	dot 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	50 
 | 
	xarray/core/dataset.py 
 | 
	    def isel(
        self,
        indexers: Mapping[Any, Any] | None = None,
        drop: bool = False,
        missing_dims: ErrorOptionsWithWarn = "raise",
        **indexers_kwargs: Any,
    ) -> Self:
        """Returns a new dataset with each array indexed along the specified
        dimension(s).
        This method selects values from each array using its `__getitem__`
        method, except this method does not require knowing the order of
        each array's dimensions.
        Parameters
        ----------
        indexers : dict, optional
            A dict with keys matching dimensions and values given
            by integers, slice objects or arrays.
            indexer can be a integer, slice, array-like or DataArray.
            If DataArrays are passed as indexers, xarray-style indexing will be
            carried out. See :ref:`indexing` for the details.
            One of indexers or indexers_kwargs must be provided.
        drop : bool, default: False
            If ``drop=True``, drop coordinates variables indexed by integers
            instead of making them scalar.
        missing_dims : {"raise", "warn", "ignore"}, default: "raise"
            What to do if dimensions that should be selected from are not present in the
            Dataset:
            - "raise": raise an exception
            - "warn": raise a warning, and ignore the missing dimensions
            - "ignore": ignore the missing dimensions
        **indexers_kwargs : {dim: indexer, ...}, optional
            The keyword arguments form of ``indexers``.
            One of indexers or indexers_kwargs must be provided.
        Returns
        -------
        obj : Dataset
            A new Dataset with the same contents as this dataset, except each
            array and dimension is indexed by the appropriate indexers.
            If indexer DataArrays have coordinates that do not conflict with
            this object, then these coordinates will be attached.
            In general, each array's data will be a view of the array's data
            in this dataset, unless vectorized indexing was triggered by using
            an array indexer, in which case the data will be a copy.
        Examples
        --------
        >>> dataset = xr.Dataset(
        ...     {
        ...         "math_scores": (
        ...             ["student", "test"],
        ...             [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
        ...         ),
        ...         "english_scores": (
        ...             ["student", "test"],
        ...             [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
        ...         ),
        ...     },
        ...     coords={
        ...         "student": ["Alice", "Bob", "Charlie"],
        ...         "test": ["Test 1", "Test 2", "Test 3"],
        ...     },
        ... )
        # A specific element from the dataset is selected
        >>> dataset.isel(student=1, test=0)
        <xarray.Dataset> Size: 68B
        Dimensions:         ()
        Coordinates:
            student         <U7 28B 'Bob'
            test            <U6 24B 'Test 1'
        Data variables:
            math_scores     int64 8B 78
            english_scores  int64 8B 75
        # Indexing with a slice using isel
        >>> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2))
        >>> slice_of_data
        <xarray.Dataset> Size: 168B
        Dimensions:         (student: 2, test: 2)
        Coordinates:
          * student         (student) <U7 56B 'Alice' 'Bob'
          * test            (test) <U6 48B 'Test 1' 'Test 2'
        Data variables:
            math_scores     (student, test) int64 32B 90 85 78 80
            english_scores  (student, test) int64 32B 88 90 75 82
        >>> index_array = xr.DataArray([0, 2], dims="student")
        >>> indexed_data = dataset.isel(student=index_array)
        >>> indexed_data
        <xarray.Dataset> Size: 224B
        Dimensions:         (student: 2, test: 3)
        Coordinates:
          * student         (student) <U7 56B 'Alice' 'Charlie'
          * test            (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
        Data variables:
            math_scores     (student, test) int64 48B 90 85 92 95 92 98
            english_scores  (student, test) int64 48B 88 90 92 93 96 91
        See Also
        --------
        Dataset.sel
        DataArray.isel
        :doc:`xarray-tutorial:intermediate/indexing/indexing`
            Tutorial material on indexing with Xarray objects
        :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
            Tutorial material on basics of indexing
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_isel.txt 
 | 
	    def isel(
        self,
        indexers: Mapping[Any, Any] | None = None,
        drop: bool = False,
        missing_dims: ErrorOptionsWithWarn = "raise",
        **indexers_kwargs: Any,
    ) -> Self:
        """Returns a new dataset with each array indexed along the specified
        dimension(s).
        This method selects values from each array using its `__getitem__`
        method, except this method does not require knowing the order of
        each array's dimensions.
        Parameters
        ----------
        indexers : dict, optional
            A dict with keys matching dimensions and values given
            by integers, slice objects or arrays.
            indexer can be a integer, slice, array-like or DataArray.
            If DataArrays are passed as indexers, xarray-style indexing will be
            carried out. See :ref:`indexing` for the details.
            One of indexers or indexers_kwargs must be provided.
        drop : bool, default: False
            If ``drop=True``, drop coordinates variables indexed by integers
            instead of making them scalar.
        missing_dims : {"raise", "warn", "ignore"}, default: "raise"
            What to do if dimensions that should be selected from are not present in the
            Dataset:
            - "raise": raise an exception
            - "warn": raise a warning, and ignore the missing dimensions
            - "ignore": ignore the missing dimensions
        **indexers_kwargs : {dim: indexer, ...}, optional
            The keyword arguments form of ``indexers``.
            One of indexers or indexers_kwargs must be provided.
        Returns
        -------
        obj : Dataset
            A new Dataset with the same contents as this dataset, except each
            array and dimension is indexed by the appropriate indexers.
            If indexer DataArrays have coordinates that do not conflict with
            this object, then these coordinates will be attached.
            In general, each array's data will be a view of the array's data
            in this dataset, unless vectorized indexing was triggered by using
            an array indexer, in which case the data will be a copy.
        Examples
        --------
        >>> dataset = xr.Dataset(
        ...     {
        ...         "math_scores": (
        ...             ["student", "test"],
        ...             [[90, 85, 92], [78, 80, 85], [95, 92, 98]],
        ...         ),
        ...         "english_scores": (
        ...             ["student", "test"],
        ...             [[88, 90, 92], [75, 82, 79], [93, 96, 91]],
        ...         ),
        ...     },
        ...     coords={
        ...         "student": ["Alice", "Bob", "Charlie"],
        ...         "test": ["Test 1", "Test 2", "Test 3"],
        ...     },
        ... )
        # A specific element from the dataset is selected
        >>> dataset.isel(student=1, test=0)
        <xarray.Dataset> Size: 68B
        Dimensions:         ()
        Coordinates:
            student         <U7 28B 'Bob'
            test            <U6 24B 'Test 1'
        Data variables:
            math_scores     int64 8B 78
            english_scores  int64 8B 75
        # Indexing with a slice using isel
        >>> slice_of_data = dataset.isel(student=slice(0, 2), test=slice(0, 2))
        >>> slice_of_data
        <xarray.Dataset> Size: 168B
        Dimensions:         (student: 2, test: 2)
        Coordinates:
          * student         (student) <U7 56B 'Alice' 'Bob'
          * test            (test) <U6 48B 'Test 1' 'Test 2'
        Data variables:
            math_scores     (student, test) int64 32B 90 85 78 80
            english_scores  (student, test) int64 32B 88 90 75 82
        >>> index_array = xr.DataArray([0, 2], dims="student")
        >>> indexed_data = dataset.isel(student=index_array)
        >>> indexed_data
        <xarray.Dataset> Size: 224B
        Dimensions:         (student: 2, test: 3)
        Coordinates:
          * student         (student) <U7 56B 'Alice' 'Charlie'
          * test            (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
        Data variables:
            math_scores     (student, test) int64 48B 90 85 92 95 92 98
            english_scores  (student, test) int64 48B 88 90 92 93 96 91
        See Also
        --------
        Dataset.sel
        DataArray.isel
        :doc:`xarray-tutorial:intermediate/indexing/indexing`
            Tutorial material on indexing with Xarray objects
        :doc:`xarray-tutorial:fundamentals/02.1_indexing_Basic`
            Tutorial material on basics of indexing
        """
        indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
        if any(is_fancy_indexer(idx) for idx in indexers.values()):
            return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims)
        # Much faster algorithm for when all indexers are ints, slices, one-dimensional
        # lists, or zero or one-dimensional np.ndarray's
        indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
        variables = {}
        dims: dict[Hashable, int] = {}
        coord_names = self._coord_names.copy()
        indexes, index_variables = isel_indexes(self.xindexes, indexers)
        for name, var in self._variables.items():
            # preserve variable order
            if name in index_variables:
                var = index_variables[name]
            else:
                var_indexers = {k: v for k, v in indexers.items() if k in var.dims}
                if var_indexers:
                    var = var.isel(var_indexers)
                    if drop and var.ndim == 0 and name in coord_names:
                        coord_names.remove(name)
                        continue
            variables[name] = var
            dims.update(zip(var.dims, var.shape, strict=True))
        return self._construct_direct(
            variables=variables,
            coord_names=coord_names,
            dims=dims,
            attrs=self._attrs,
            indexes=indexes,
            encoding=self._encoding,
            close=self._close,
        )
 
 | 
	isel 
 | 
	Self-Contained 
 | 
					
	xarray 
 | 
	52 
 | 
	xarray/plot/utils.py 
 | 
	def legend_elements(
    self, prop="colors", num="auto", fmt=None, func=lambda x: x, **kwargs
):
    """
    Create legend handles and labels for a PathCollection.
    Each legend handle is a `.Line2D` representing the Path that was drawn,
    and each label is a string what each Path represents.
    This is useful for obtaining a legend for a `~.Axes.scatter` plot;
    e.g.::
        scatter = plt.scatter([1, 2, 3],  [4, 5, 6],  c=[7, 2, 3])
        plt.legend(*scatter.legend_elements())
    creates three legend elements, one for each color with the numerical
    values passed to *c* as the labels.
    Also see the :ref:`automatedlegendcreation` example.
    Parameters
    ----------
    prop : {"colors", "sizes"}, default: "colors"
        If "colors", the legend handles will show the different colors of
        the collection. If "sizes", the legend will show the different
        sizes. To set both, use *kwargs* to directly edit the `.Line2D`
        properties.
    num : int, None, "auto" (default), array-like, or `~.ticker.Locator`
        Target number of elements to create.
        If None, use all unique elements of the mappable array. If an
        integer, target to use *num* elements in the normed range.
        If *"auto"*, try to determine which option better suits the nature
        of the data.
        The number of created elements may slightly deviate from *num* due
        to a `~.ticker.Locator` being used to find useful locations.
        If a list or array, use exactly those elements for the legend.
        Finally, a `~.ticker.Locator` can be provided.
    fmt : str, `~matplotlib.ticker.Formatter`, or None (default)
        The format or formatter to use for the labels. If a string must be
        a valid input for a `~.StrMethodFormatter`. If None (the default),
        use a `~.ScalarFormatter`.
    func : function, default: ``lambda x: x``
        Function to calculate the labels.  Often the size (or color)
        argument to `~.Axes.scatter` will have been pre-processed by the
        user using a function ``s = f(x)`` to make the markers visible;
        e.g. ``size = np.log10(x)``.  Providing the inverse of this
        function here allows that pre-processing to be inverted, so that
        the legend labels have the correct values; e.g. ``func = lambda
        x: 10**x``.
    **kwargs
        Allowed keyword arguments are *color* and *size*. E.g. it may be
        useful to set the color of the markers if *prop="sizes"* is used;
        similarly to set the size of the markers if *prop="colors"* is
        used. Any further parameters are passed onto the `.Line2D`
        instance. This may be useful to e.g. specify a different
        *markeredgecolor* or *alpha* for the legend handles.
    Returns
    -------
    handles : list of `.Line2D`
        Visual representation of each element of the legend.
    labels : list of str
        The string labels for elements of the legend.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_legend_elements.txt 
 | 
	def legend_elements(
    self, prop="colors", num="auto", fmt=None, func=lambda x: x, **kwargs
):
    """
    Create legend handles and labels for a PathCollection.
    Each legend handle is a `.Line2D` representing the Path that was drawn,
    and each label is a string what each Path represents.
    This is useful for obtaining a legend for a `~.Axes.scatter` plot;
    e.g.::
        scatter = plt.scatter([1, 2, 3],  [4, 5, 6],  c=[7, 2, 3])
        plt.legend(*scatter.legend_elements())
    creates three legend elements, one for each color with the numerical
    values passed to *c* as the labels.
    Also see the :ref:`automatedlegendcreation` example.
    Parameters
    ----------
    prop : {"colors", "sizes"}, default: "colors"
        If "colors", the legend handles will show the different colors of
        the collection. If "sizes", the legend will show the different
        sizes. To set both, use *kwargs* to directly edit the `.Line2D`
        properties.
    num : int, None, "auto" (default), array-like, or `~.ticker.Locator`
        Target number of elements to create.
        If None, use all unique elements of the mappable array. If an
        integer, target to use *num* elements in the normed range.
        If *"auto"*, try to determine which option better suits the nature
        of the data.
        The number of created elements may slightly deviate from *num* due
        to a `~.ticker.Locator` being used to find useful locations.
        If a list or array, use exactly those elements for the legend.
        Finally, a `~.ticker.Locator` can be provided.
    fmt : str, `~matplotlib.ticker.Formatter`, or None (default)
        The format or formatter to use for the labels. If a string must be
        a valid input for a `~.StrMethodFormatter`. If None (the default),
        use a `~.ScalarFormatter`.
    func : function, default: ``lambda x: x``
        Function to calculate the labels.  Often the size (or color)
        argument to `~.Axes.scatter` will have been pre-processed by the
        user using a function ``s = f(x)`` to make the markers visible;
        e.g. ``size = np.log10(x)``.  Providing the inverse of this
        function here allows that pre-processing to be inverted, so that
        the legend labels have the correct values; e.g. ``func = lambda
        x: 10**x``.
    **kwargs
        Allowed keyword arguments are *color* and *size*. E.g. it may be
        useful to set the color of the markers if *prop="sizes"* is used;
        similarly to set the size of the markers if *prop="colors"* is
        used. Any further parameters are passed onto the `.Line2D`
        instance. This may be useful to e.g. specify a different
        *markeredgecolor* or *alpha* for the legend handles.
    Returns
    -------
    handles : list of `.Line2D`
        Visual representation of each element of the legend.
    labels : list of str
        The string labels for elements of the legend.
    """
    import warnings
    import matplotlib as mpl
    mlines = mpl.lines
    handles = []
    labels = []
    if prop == "colors":
        arr = self.get_array()
        if arr is None:
            warnings.warn(
                "Collection without array used. Make sure to "
                "specify the values to be colormapped via the "
                "`c` argument.",
                stacklevel=2,
            )
            return handles, labels
        _size = kwargs.pop("size", mpl.rcParams["lines.markersize"])
        def _get_color_and_size(value):
            return self.cmap(self.norm(value)), _size
    elif prop == "sizes":
        if isinstance(self, mpl.collections.LineCollection):
            arr = self.get_linewidths()
        else:
            arr = self.get_sizes()
        _color = kwargs.pop("color", "k")
        def _get_color_and_size(value):
            return _color, np.sqrt(value)
    else:
        raise ValueError(
            "Valid values for `prop` are 'colors' or "
            f"'sizes'. You supplied '{prop}' instead."
        )
    # Get the unique values and their labels:
    values = np.unique(arr)
    label_values = np.asarray(func(values))
    label_values_are_numeric = np.issubdtype(label_values.dtype, np.number)
    # Handle the label format:
    if fmt is None and label_values_are_numeric:
        fmt = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True)
    elif fmt is None and not label_values_are_numeric:
        fmt = mpl.ticker.StrMethodFormatter("{x}")
    elif isinstance(fmt, str):
        fmt = mpl.ticker.StrMethodFormatter(fmt)
    fmt.create_dummy_axis()
    if num == "auto":
        num = 9
        if len(values) <= num:
            num = None
    if label_values_are_numeric:
        label_values_min = label_values.min()
        label_values_max = label_values.max()
        fmt.axis.set_view_interval(label_values_min, label_values_max)
        fmt.axis.set_data_interval(label_values_min, label_values_max)
        if num is not None:
            # Labels are numerical but larger than the target
            # number of elements, reduce to target using matplotlibs
            # ticker classes:
            if isinstance(num, mpl.ticker.Locator):
                loc = num
            elif np.iterable(num):
                loc = mpl.ticker.FixedLocator(num)
            else:
                num = int(num)
                loc = mpl.ticker.MaxNLocator(
                    nbins=num, min_n_ticks=num - 1, steps=[1, 2, 2.5, 3, 5, 6, 8, 10]
                )
            # Get nicely spaced label_values:
            label_values = loc.tick_values(label_values_min, label_values_max)
            # Remove extrapolated label_values:
            cond = (label_values >= label_values_min) & (
                label_values <= label_values_max
            )
            label_values = label_values[cond]
            # Get the corresponding values by creating a linear interpolant
            # with small step size:
            values_interp = np.linspace(values.min(), values.max(), 256)
            label_values_interp = func(values_interp)
            ix = np.argsort(label_values_interp)
            values = np.interp(label_values, label_values_interp[ix], values_interp[ix])
    elif num is not None and not label_values_are_numeric:
        # Labels are not numerical so modifying label_values is not
        # possible, instead filter the array with nicely distributed
        # indexes:
        if type(num) == int:  # noqa: E721
            loc = mpl.ticker.LinearLocator(num)
        else:
            raise ValueError("`num` only supports integers for non-numeric labels.")
        ind = loc.tick_values(0, len(label_values) - 1).astype(int)
        label_values = label_values[ind]
        values = values[ind]
    # Some formatters requires set_locs:
    if hasattr(fmt, "set_locs"):
        fmt.set_locs(label_values)
    # Default settings for handles, add or override with kwargs:
    kw = dict(markeredgewidth=self.get_linewidths()[0], alpha=self.get_alpha())
    kw.update(kwargs)
    for val, lab in zip(values, label_values, strict=True):
        color, size = _get_color_and_size(val)
        if isinstance(self, mpl.collections.PathCollection):
            kw.update(linestyle="", marker=self.get_paths()[0], markersize=size)
        elif isinstance(self, mpl.collections.LineCollection):
            kw.update(linestyle=self.get_linestyle()[0], linewidth=size)
        h = mlines.Line2D([0], [0], color=color, **kw)
        handles.append(h)
        labels.append(fmt(lab))
    return handles, labels
 
 | 
	legend_elements 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	54 
 | 
	xarray/core/parallel.py 
 | 
	def map_blocks(
    func: Callable[..., T_Xarray],
    obj: DataArray | Dataset,
    args: Sequence[Any] = (),
    kwargs: Mapping[str, Any] | None = None,
    template: DataArray | Dataset | None = None,
) -> T_Xarray:
    """Apply a function to each block of a DataArray or Dataset.
    .. warning::
        This function is experimental and its signature may change.
    Parameters
    ----------
    func : callable
        User-provided function that accepts a DataArray or Dataset as its first
        parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below),
        corresponding to one chunk along each chunked dimension. ``func`` will be
        executed as ``func(subset_obj, *subset_args, **kwargs)``.
        This function must return either a single DataArray or a single Dataset.
        This function cannot add a new chunked dimension.
    obj : DataArray, Dataset
        Passed to the function as its first argument, one block at a time.
    args : sequence
        Passed to func after unpacking and subsetting any xarray objects by blocks.
        xarray objects in args must be aligned with obj, otherwise an error is raised.
    kwargs : mapping
        Passed verbatim to func after unpacking. xarray objects, if any, will not be
        subset to blocks. Passing dask collections in kwargs is not allowed.
    template : DataArray or Dataset, optional
        xarray object representing the final result after compute is called. If not provided,
        the function will be first run on mocked-up data, that looks like ``obj`` but
        has sizes 0, to determine properties of the returned object such as dtype,
        variable names, attributes, new dimensions and new indexes (if any).
        ``template`` must be provided if the function changes the size of existing dimensions.
        When provided, ``attrs`` on variables in `template` are copied over to the result. Any
        ``attrs`` set by ``func`` will be ignored.
    Returns
    -------
    obj : same as obj
        A single DataArray or Dataset with dask backend, reassembled from the outputs of the
        function.
    Notes
    -----
    This function is designed for when ``func`` needs to manipulate a whole xarray object
    subset to each block. Each block is loaded into memory. In the more common case where
    ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
    If none of the variables in ``obj`` is backed by dask arrays, calling this function is
    equivalent to calling ``func(obj, *args, **kwargs)``.
    See Also
    --------
    dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks
    xarray.DataArray.map_blocks
    Examples
    --------
    Calculate an anomaly from climatology using ``.groupby()``. Using
    ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
    its indices, and its methods like ``.groupby()``.
    >>> def calculate_anomaly(da, groupby_type="time.month"):
    ...     gb = da.groupby(groupby_type)
    ...     clim = gb.mean(dim="time")
    ...     return gb - clim
    ...
    >>> time = xr.cftime_range("1990-01", "1992-01", freq="ME")
    >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
    >>> np.random.seed(123)
    >>> array = xr.DataArray(
    ...     np.random.rand(len(time)),
    ...     dims=["time"],
    ...     coords={"time": time, "month": month},
    ... ).chunk()
    >>> array.map_blocks(calculate_anomaly, template=array).compute()
    <xarray.DataArray (time: 24)> Size: 192B
    array([ 0.12894847,  0.11323072, -0.0855964 , -0.09334032,  0.26848862,
            0.12382735,  0.22460641,  0.07650108, -0.07673453, -0.22865714,
           -0.19063865,  0.0590131 , -0.12894847, -0.11323072,  0.0855964 ,
            0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108,
            0.07673453,  0.22865714,  0.19063865, -0.0590131 ])
    Coordinates:
      * time     (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
        month    (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
    Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
    to the function being applied in ``xr.map_blocks()``:
    >>> array.map_blocks(
    ...     calculate_anomaly,
    ...     kwargs={"groupby_type": "time.year"},
    ...     template=array,
    ... )  # doctest: +ELLIPSIS
    <xarray.DataArray (time: 24)> Size: 192B
    dask.array<<this-array>-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray>
    Coordinates:
      * time     (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
        month    (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_map_blocks.txt 
 | 
	def map_blocks(
    func: Callable[..., T_Xarray],
    obj: DataArray | Dataset,
    args: Sequence[Any] = (),
    kwargs: Mapping[str, Any] | None = None,
    template: DataArray | Dataset | None = None,
) -> T_Xarray:
    """Apply a function to each block of a DataArray or Dataset.
    .. warning::
        This function is experimental and its signature may change.
    Parameters
    ----------
    func : callable
        User-provided function that accepts a DataArray or Dataset as its first
        parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below),
        corresponding to one chunk along each chunked dimension. ``func`` will be
        executed as ``func(subset_obj, *subset_args, **kwargs)``.
        This function must return either a single DataArray or a single Dataset.
        This function cannot add a new chunked dimension.
    obj : DataArray, Dataset
        Passed to the function as its first argument, one block at a time.
    args : sequence
        Passed to func after unpacking and subsetting any xarray objects by blocks.
        xarray objects in args must be aligned with obj, otherwise an error is raised.
    kwargs : mapping
        Passed verbatim to func after unpacking. xarray objects, if any, will not be
        subset to blocks. Passing dask collections in kwargs is not allowed.
    template : DataArray or Dataset, optional
        xarray object representing the final result after compute is called. If not provided,
        the function will be first run on mocked-up data, that looks like ``obj`` but
        has sizes 0, to determine properties of the returned object such as dtype,
        variable names, attributes, new dimensions and new indexes (if any).
        ``template`` must be provided if the function changes the size of existing dimensions.
        When provided, ``attrs`` on variables in `template` are copied over to the result. Any
        ``attrs`` set by ``func`` will be ignored.
    Returns
    -------
    obj : same as obj
        A single DataArray or Dataset with dask backend, reassembled from the outputs of the
        function.
    Notes
    -----
    This function is designed for when ``func`` needs to manipulate a whole xarray object
    subset to each block. Each block is loaded into memory. In the more common case where
    ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
    If none of the variables in ``obj`` is backed by dask arrays, calling this function is
    equivalent to calling ``func(obj, *args, **kwargs)``.
    See Also
    --------
    dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks
    xarray.DataArray.map_blocks
    Examples
    --------
    Calculate an anomaly from climatology using ``.groupby()``. Using
    ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
    its indices, and its methods like ``.groupby()``.
    >>> def calculate_anomaly(da, groupby_type="time.month"):
    ...     gb = da.groupby(groupby_type)
    ...     clim = gb.mean(dim="time")
    ...     return gb - clim
    ...
    >>> time = xr.cftime_range("1990-01", "1992-01", freq="ME")
    >>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
    >>> np.random.seed(123)
    >>> array = xr.DataArray(
    ...     np.random.rand(len(time)),
    ...     dims=["time"],
    ...     coords={"time": time, "month": month},
    ... ).chunk()
    >>> array.map_blocks(calculate_anomaly, template=array).compute()
    <xarray.DataArray (time: 24)> Size: 192B
    array([ 0.12894847,  0.11323072, -0.0855964 , -0.09334032,  0.26848862,
            0.12382735,  0.22460641,  0.07650108, -0.07673453, -0.22865714,
           -0.19063865,  0.0590131 , -0.12894847, -0.11323072,  0.0855964 ,
            0.09334032, -0.26848862, -0.12382735, -0.22460641, -0.07650108,
            0.07673453,  0.22865714,  0.19063865, -0.0590131 ])
    Coordinates:
      * time     (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
        month    (time) int64 192B 1 2 3 4 5 6 7 8 9 10 ... 3 4 5 6 7 8 9 10 11 12
    Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
    to the function being applied in ``xr.map_blocks()``:
    >>> array.map_blocks(
    ...     calculate_anomaly,
    ...     kwargs={"groupby_type": "time.year"},
    ...     template=array,
    ... )  # doctest: +ELLIPSIS
    <xarray.DataArray (time: 24)> Size: 192B
    dask.array<<this-array>-calculate_anomaly, shape=(24,), dtype=float64, chunksize=(24,), chunktype=numpy.ndarray>
    Coordinates:
      * time     (time) object 192B 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
        month    (time) int64 192B dask.array<chunksize=(24,), meta=np.ndarray>
    """
    def _wrapper(
        func: Callable,
        args: list,
        kwargs: dict,
        arg_is_array: Iterable[bool],
        expected: ExpectedDict,
    ):
        """
        Wrapper function that receives datasets in args; converts to dataarrays when necessary;
        passes these to the user function `func` and checks returned objects for expected shapes/sizes/etc.
        """
        converted_args = [
            dataset_to_dataarray(arg) if is_array else arg
            for is_array, arg in zip(arg_is_array, args, strict=True)
        ]
        result = func(*converted_args, **kwargs)
        merged_coordinates = merge(
            [arg.coords for arg in args if isinstance(arg, Dataset | DataArray)]
        ).coords
        # check all dims are present
        missing_dimensions = set(expected["shapes"]) - set(result.sizes)
        if missing_dimensions:
            raise ValueError(
                f"Dimensions {missing_dimensions} missing on returned object."
            )
        # check that index lengths and values are as expected
        for name, index in result._indexes.items():
            if name in expected["shapes"]:
                if result.sizes[name] != expected["shapes"][name]:
                    raise ValueError(
                        f"Received dimension {name!r} of length {result.sizes[name]}. "
                        f"Expected length {expected['shapes'][name]}."
                    )
            # ChainMap wants MutableMapping, but xindexes is Mapping
            merged_indexes = collections.ChainMap(
                expected["indexes"], merged_coordinates.xindexes  # type: ignore[arg-type]
            )
            expected_index = merged_indexes.get(name, None)
            if expected_index is not None and not index.equals(expected_index):
                raise ValueError(
                    f"Expected index {name!r} to be {expected_index!r}. Received {index!r} instead."
                )
        # check that all expected variables were returned
        check_result_variables(result, expected, "coords")
        if isinstance(result, Dataset):
            check_result_variables(result, expected, "data_vars")
        return make_dict(result)
    if template is not None and not isinstance(template, DataArray | Dataset):
        raise TypeError(
            f"template must be a DataArray or Dataset. Received {type(template).__name__} instead."
        )
    if not isinstance(args, Sequence):
        raise TypeError("args must be a sequence (for example, a list or tuple).")
    if kwargs is None:
        kwargs = {}
    elif not isinstance(kwargs, Mapping):
        raise TypeError("kwargs must be a mapping (for example, a dict)")
    for value in kwargs.values():
        if is_dask_collection(value):
            raise TypeError(
                "Cannot pass dask collections in kwargs yet. Please compute or "
                "load values before passing to map_blocks."
            )
    if not is_dask_collection(obj):
        return func(obj, *args, **kwargs)
    try:
        import dask
        import dask.array
        from dask.highlevelgraph import HighLevelGraph
    except ImportError:
        pass
    all_args = [obj] + list(args)
    is_xarray = [isinstance(arg, Dataset | DataArray) for arg in all_args]
    is_array = [isinstance(arg, DataArray) for arg in all_args]
    # there should be a better way to group this. partition?
    xarray_indices, xarray_objs = unzip(
        (index, arg) for index, arg in enumerate(all_args) if is_xarray[index]
    )
    others = [
        (index, arg) for index, arg in enumerate(all_args) if not is_xarray[index]
    ]
    # all xarray objects must be aligned. This is consistent with apply_ufunc.
    aligned = align(*xarray_objs, join="exact")
    xarray_objs = tuple(
        dataarray_to_dataset(arg) if isinstance(arg, DataArray) else arg
        for arg in aligned
    )
    # rechunk any numpy variables appropriately
    xarray_objs = tuple(arg.chunk(arg.chunksizes) for arg in xarray_objs)
    merged_coordinates = merge([arg.coords for arg in aligned]).coords
    _, npargs = unzip(
        sorted(
            list(zip(xarray_indices, xarray_objs, strict=True)) + others,
            key=lambda x: x[0],
        )
    )
    # check that chunk sizes are compatible
    input_chunks = dict(npargs[0].chunks)
    for arg in xarray_objs[1:]:
        assert_chunks_compatible(npargs[0], arg)
        input_chunks.update(arg.chunks)
    coordinates: Coordinates
    if template is None:
        # infer template by providing zero-shaped arrays
        template = infer_template(func, aligned[0], *args, **kwargs)
        template_coords = set(template.coords)
        preserved_coord_vars = template_coords & set(merged_coordinates)
        new_coord_vars = template_coords - set(merged_coordinates)
        preserved_coords = merged_coordinates.to_dataset()[preserved_coord_vars]
        # preserved_coords contains all coordinates variables that share a dimension
        # with any index variable in preserved_indexes
        # Drop any unneeded vars in a second pass, this is required for e.g.
        # if the mapped function were to drop a non-dimension coordinate variable.
        preserved_coords = preserved_coords.drop_vars(
            tuple(k for k in preserved_coords.variables if k not in template_coords)
        )
        coordinates = merge(
            (preserved_coords, template.coords.to_dataset()[new_coord_vars])
        ).coords
        output_chunks: Mapping[Hashable, tuple[int, ...]] = {
            dim: input_chunks[dim] for dim in template.dims if dim in input_chunks
        }
    else:
        # template xarray object has been provided with proper sizes and chunk shapes
        coordinates = template.coords
        output_chunks = template.chunksizes
        if not output_chunks:
            raise ValueError(
                "Provided template has no dask arrays. "
                " Please construct a template with appropriately chunked dask arrays."
            )
    new_indexes = set(template.xindexes) - set(merged_coordinates)
    modified_indexes = set(
        name
        for name, xindex in coordinates.xindexes.items()
        if not xindex.equals(merged_coordinates.xindexes.get(name, None))
    )
    for dim in output_chunks:
        if dim in input_chunks and len(input_chunks[dim]) != len(output_chunks[dim]):
            raise ValueError(
                "map_blocks requires that one block of the input maps to one block of output. "
                f"Expected number of output chunks along dimension {dim!r} to be {len(input_chunks[dim])}. "
                f"Received {len(output_chunks[dim])} instead. Please provide template if not provided, or "
                "fix the provided template."
            )
    if isinstance(template, DataArray):
        result_is_array = True
        template_name = template.name
        template = template._to_temp_dataset()
    elif isinstance(template, Dataset):
        result_is_array = False
    else:
        raise TypeError(
            f"func output must be DataArray or Dataset; got {type(template)}"
        )
    # We're building a new HighLevelGraph hlg. We'll have one new layer
    # for each variable in the dataset, which is the result of the
    # func applied to the values.
    graph: dict[Any, Any] = {}
    new_layers: collections.defaultdict[str, dict[Any, Any]] = collections.defaultdict(
        dict
    )
    gname = f"{dask.utils.funcname(func)}-{dask.base.tokenize(npargs[0], args, kwargs)}"
    # map dims to list of chunk indexes
    ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}
    # mapping from chunk index to slice bounds
    input_chunk_bounds = {
        dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()
    }
    output_chunk_bounds = {
        dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in output_chunks.items()
    }
    computed_variables = set(template.variables) - set(coordinates.indexes)
    # iterate over all possible chunk combinations
    for chunk_tuple in itertools.product(*ichunk.values()):
        # mapping from dimension name to chunk index
        chunk_index = dict(zip(ichunk.keys(), chunk_tuple, strict=True))
        blocked_args = [
            (
                subset_dataset_to_block(
                    graph, gname, arg, input_chunk_bounds, chunk_index
                )
                if isxr
                else arg
            )
            for isxr, arg in zip(is_xarray, npargs, strict=True)
        ]
        # raise nice error messages in _wrapper
        expected: ExpectedDict = {
            # input chunk 0 along a dimension maps to output chunk 0 along the same dimension
            # even if length of dimension is changed by the applied function
            "shapes": {
                k: output_chunks[k][v]
                for k, v in chunk_index.items()
                if k in output_chunks
            },
            "data_vars": set(template.data_vars.keys()),
            "coords": set(template.coords.keys()),
            # only include new or modified indexes to minimize duplication of data, and graph size.
            "indexes": {
                dim: coordinates.xindexes[dim][
                    _get_chunk_slicer(dim, chunk_index, output_chunk_bounds)
                ]
                for dim in (new_indexes | modified_indexes)
            },
        }
        from_wrapper = (gname,) + chunk_tuple
        graph[from_wrapper] = (_wrapper, func, blocked_args, kwargs, is_array, expected)
        # mapping from variable name to dask graph key
        var_key_map: dict[Hashable, str] = {}
        for name in computed_variables:
            variable = template.variables[name]
            gname_l = f"{name}-{gname}"
            var_key_map[name] = gname_l
            # unchunked dimensions in the input have one chunk in the result
            # output can have new dimensions with exactly one chunk
            key: tuple[Any, ...] = (gname_l,) + tuple(
                chunk_index[dim] if dim in chunk_index else 0 for dim in variable.dims
            )
            # We're adding multiple new layers to the graph:
            # The first new layer is the result of the computation on
            # the array.
            # Then we add one layer per variable, which extracts the
            # result for that variable, and depends on just the first new
            # layer.
            new_layers[gname_l][key] = (operator.getitem, from_wrapper, name)
    hlg = HighLevelGraph.from_collections(
        gname,
        graph,
        dependencies=[arg for arg in npargs if dask.is_dask_collection(arg)],
    )
    # This adds in the getitems for each variable in the dataset.
    hlg = HighLevelGraph(
        {**hlg.layers, **new_layers},
        dependencies={
            **hlg.dependencies,
            **{name: {gname} for name in new_layers.keys()},
        },
    )
    result = Dataset(coords=coordinates, attrs=template.attrs)
    for index in result._indexes:
        result[index].attrs = template[index].attrs
        result[index].encoding = template[index].encoding
    for name, gname_l in var_key_map.items():
        dims = template[name].dims
        var_chunks = []
        for dim in dims:
            if dim in output_chunks:
                var_chunks.append(output_chunks[dim])
            elif dim in result._indexes:
                var_chunks.append((result.sizes[dim],))
            elif dim in template.dims:
                # new unindexed dimension
                var_chunks.append((template.sizes[dim],))
        data = dask.array.Array(
            hlg, name=gname_l, chunks=var_chunks, dtype=template[name].dtype
        )
        result[name] = (dims, data, template[name].attrs)
        result[name].encoding = template[name].encoding
    result = result.set_coords(template._coord_names)
    if result_is_array:
        da = dataset_to_dataarray(result)
        da.name = template_name
        return da  # type: ignore[return-value]
    return result  # type: ignore[return-value]
 
 | 
	map_blocks 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	58 
 | 
	xarray/core/merge.py 
 | 
	def merge(
    objects: Iterable[DataArray | CoercibleMapping],
    compat: CompatOptions = "no_conflicts",
    join: JoinOptions = "outer",
    fill_value: object = dtypes.NA,
    combine_attrs: CombineAttrsOptions = "override",
) -> Dataset:
    """Merge any number of xarray objects into a single Dataset as variables.
    Parameters
    ----------
    objects : iterable of Dataset or iterable of DataArray or iterable of dict-like
        Merge together all variables from these objects. If any of them are
        DataArray objects, they must have a name.
    compat : {"identical", "equals", "broadcast_equals", "no_conflicts", \
              "override", "minimal"}, default: "no_conflicts"
        String indicating how to compare variables of the same name for
        potential conflicts:
        - "identical": all values, dimensions and attributes must be the
          same.
        - "equals": all values and dimensions must be the same.
        - "broadcast_equals": all values must be equal when variables are
          broadcast against each other to ensure common dimensions.
        - "no_conflicts": only values which are not null in both datasets
          must be equal. The returned dataset then contains the combination
          of all non-null values.
        - "override": skip comparing and pick variable from first dataset
        - "minimal": drop conflicting coordinates
    join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
        String indicating how to combine differing indexes in objects.
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    fill_value : scalar or dict-like, optional
        Value to use for newly missing values. If a dict-like, maps
        variable names to fill values. Use a data array's name to
        refer to its values.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "override"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    Returns
    -------
    Dataset
        Dataset with combined variables from each object.
    Examples
    --------
    >>> x = xr.DataArray(
    ...     [[1.0, 2.0], [3.0, 5.0]],
    ...     dims=("lat", "lon"),
    ...     coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
    ...     name="var1",
    ... )
    >>> y = xr.DataArray(
    ...     [[5.0, 6.0], [7.0, 8.0]],
    ...     dims=("lat", "lon"),
    ...     coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]},
    ...     name="var2",
    ... )
    >>> z = xr.DataArray(
    ...     [[0.0, 3.0], [4.0, 9.0]],
    ...     dims=("time", "lon"),
    ...     coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]},
    ...     name="var3",
    ... )
    >>> x
    <xarray.DataArray 'var1' (lat: 2, lon: 2)> Size: 32B
    array([[1., 2.],
           [3., 5.]])
    Coordinates:
      * lat      (lat) float64 16B 35.0 40.0
      * lon      (lon) float64 16B 100.0 120.0
    >>> y
    <xarray.DataArray 'var2' (lat: 2, lon: 2)> Size: 32B
    array([[5., 6.],
           [7., 8.]])
    Coordinates:
      * lat      (lat) float64 16B 35.0 42.0
      * lon      (lon) float64 16B 100.0 150.0
    >>> z
    <xarray.DataArray 'var3' (time: 2, lon: 2)> Size: 32B
    array([[0., 3.],
           [4., 9.]])
    Coordinates:
      * time     (time) float64 16B 30.0 60.0
      * lon      (lon) float64 16B 100.0 150.0
    >>> xr.merge([x, y, z])
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], compat="identical")
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], compat="equals")
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], compat="equals", fill_value=-999.0)
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0
        var2     (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0
        var3     (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0
    >>> xr.merge([x, y, z], join="override")
    <xarray.Dataset> Size: 144B
    Dimensions:  (lat: 2, lon: 2, time: 2)
    Coordinates:
      * lat      (lat) float64 16B 35.0 40.0
      * lon      (lon) float64 16B 100.0 120.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 32B 1.0 2.0 3.0 5.0
        var2     (lat, lon) float64 32B 5.0 6.0 7.0 8.0
        var3     (time, lon) float64 32B 0.0 3.0 4.0 9.0
    >>> xr.merge([x, y, z], join="inner")
    <xarray.Dataset> Size: 64B
    Dimensions:  (lat: 1, lon: 1, time: 2)
    Coordinates:
      * lat      (lat) float64 8B 35.0
      * lon      (lon) float64 8B 100.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 8B 1.0
        var2     (lat, lon) float64 8B 5.0
        var3     (time, lon) float64 16B 0.0 4.0
    >>> xr.merge([x, y, z], compat="identical", join="inner")
    <xarray.Dataset> Size: 64B
    Dimensions:  (lat: 1, lon: 1, time: 2)
    Coordinates:
      * lat      (lat) float64 8B 35.0
      * lon      (lon) float64 8B 100.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 8B 1.0
        var2     (lat, lon) float64 8B 5.0
        var3     (time, lon) float64 16B 0.0 4.0
    >>> xr.merge([x, y, z], compat="broadcast_equals", join="outer")
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], join="exact")
    Traceback (most recent call last):
    ...
    ValueError: cannot align objects with join='exact' where ...
    Raises
    ------
    xarray.MergeError
        If any variables with the same name have conflicting values.
    See also
    --------
    concat
    combine_nested
    combine_by_coords
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_merge.txt 
 | 
	def merge(
    objects: Iterable[DataArray | CoercibleMapping],
    compat: CompatOptions = "no_conflicts",
    join: JoinOptions = "outer",
    fill_value: object = dtypes.NA,
    combine_attrs: CombineAttrsOptions = "override",
) -> Dataset:
    """Merge any number of xarray objects into a single Dataset as variables.
    Parameters
    ----------
    objects : iterable of Dataset or iterable of DataArray or iterable of dict-like
        Merge together all variables from these objects. If any of them are
        DataArray objects, they must have a name.
    compat : {"identical", "equals", "broadcast_equals", "no_conflicts", \
              "override", "minimal"}, default: "no_conflicts"
        String indicating how to compare variables of the same name for
        potential conflicts:
        - "identical": all values, dimensions and attributes must be the
          same.
        - "equals": all values and dimensions must be the same.
        - "broadcast_equals": all values must be equal when variables are
          broadcast against each other to ensure common dimensions.
        - "no_conflicts": only values which are not null in both datasets
          must be equal. The returned dataset then contains the combination
          of all non-null values.
        - "override": skip comparing and pick variable from first dataset
        - "minimal": drop conflicting coordinates
    join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
        String indicating how to combine differing indexes in objects.
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    fill_value : scalar or dict-like, optional
        Value to use for newly missing values. If a dict-like, maps
        variable names to fill values. Use a data array's name to
        refer to its values.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "override"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    Returns
    -------
    Dataset
        Dataset with combined variables from each object.
    Examples
    --------
    >>> x = xr.DataArray(
    ...     [[1.0, 2.0], [3.0, 5.0]],
    ...     dims=("lat", "lon"),
    ...     coords={"lat": [35.0, 40.0], "lon": [100.0, 120.0]},
    ...     name="var1",
    ... )
    >>> y = xr.DataArray(
    ...     [[5.0, 6.0], [7.0, 8.0]],
    ...     dims=("lat", "lon"),
    ...     coords={"lat": [35.0, 42.0], "lon": [100.0, 150.0]},
    ...     name="var2",
    ... )
    >>> z = xr.DataArray(
    ...     [[0.0, 3.0], [4.0, 9.0]],
    ...     dims=("time", "lon"),
    ...     coords={"time": [30.0, 60.0], "lon": [100.0, 150.0]},
    ...     name="var3",
    ... )
    >>> x
    <xarray.DataArray 'var1' (lat: 2, lon: 2)> Size: 32B
    array([[1., 2.],
           [3., 5.]])
    Coordinates:
      * lat      (lat) float64 16B 35.0 40.0
      * lon      (lon) float64 16B 100.0 120.0
    >>> y
    <xarray.DataArray 'var2' (lat: 2, lon: 2)> Size: 32B
    array([[5., 6.],
           [7., 8.]])
    Coordinates:
      * lat      (lat) float64 16B 35.0 42.0
      * lon      (lon) float64 16B 100.0 150.0
    >>> z
    <xarray.DataArray 'var3' (time: 2, lon: 2)> Size: 32B
    array([[0., 3.],
           [4., 9.]])
    Coordinates:
      * time     (time) float64 16B 30.0 60.0
      * lon      (lon) float64 16B 100.0 150.0
    >>> xr.merge([x, y, z])
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], compat="identical")
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], compat="equals")
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], compat="equals", fill_value=-999.0)
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0
        var2     (lat, lon) float64 72B 5.0 -999.0 6.0 -999.0 ... 7.0 -999.0 8.0
        var3     (time, lon) float64 48B 0.0 -999.0 3.0 4.0 -999.0 9.0
    >>> xr.merge([x, y, z], join="override")
    <xarray.Dataset> Size: 144B
    Dimensions:  (lat: 2, lon: 2, time: 2)
    Coordinates:
      * lat      (lat) float64 16B 35.0 40.0
      * lon      (lon) float64 16B 100.0 120.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 32B 1.0 2.0 3.0 5.0
        var2     (lat, lon) float64 32B 5.0 6.0 7.0 8.0
        var3     (time, lon) float64 32B 0.0 3.0 4.0 9.0
    >>> xr.merge([x, y, z], join="inner")
    <xarray.Dataset> Size: 64B
    Dimensions:  (lat: 1, lon: 1, time: 2)
    Coordinates:
      * lat      (lat) float64 8B 35.0
      * lon      (lon) float64 8B 100.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 8B 1.0
        var2     (lat, lon) float64 8B 5.0
        var3     (time, lon) float64 16B 0.0 4.0
    >>> xr.merge([x, y, z], compat="identical", join="inner")
    <xarray.Dataset> Size: 64B
    Dimensions:  (lat: 1, lon: 1, time: 2)
    Coordinates:
      * lat      (lat) float64 8B 35.0
      * lon      (lon) float64 8B 100.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 8B 1.0
        var2     (lat, lon) float64 8B 5.0
        var3     (time, lon) float64 16B 0.0 4.0
    >>> xr.merge([x, y, z], compat="broadcast_equals", join="outer")
    <xarray.Dataset> Size: 256B
    Dimensions:  (lat: 3, lon: 3, time: 2)
    Coordinates:
      * lat      (lat) float64 24B 35.0 40.0 42.0
      * lon      (lon) float64 24B 100.0 120.0 150.0
      * time     (time) float64 16B 30.0 60.0
    Data variables:
        var1     (lat, lon) float64 72B 1.0 2.0 nan 3.0 5.0 nan nan nan nan
        var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
        var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
    >>> xr.merge([x, y, z], join="exact")
    Traceback (most recent call last):
    ...
    ValueError: cannot align objects with join='exact' where ...
    Raises
    ------
    xarray.MergeError
        If any variables with the same name have conflicting values.
    See also
    --------
    concat
    combine_nested
    combine_by_coords
    """
    from xarray.core.coordinates import Coordinates
    from xarray.core.dataarray import DataArray
    from xarray.core.dataset import Dataset
    dict_like_objects = []
    for obj in objects:
        if not isinstance(obj, DataArray | Dataset | Coordinates | dict):
            raise TypeError(
                "objects must be an iterable containing only "
                "Dataset(s), DataArray(s), and dictionaries."
            )
        if isinstance(obj, DataArray):
            obj = obj.to_dataset(promote_attrs=True)
        elif isinstance(obj, Coordinates):
            obj = obj.to_dataset()
        dict_like_objects.append(obj)
    merge_result = merge_core(
        dict_like_objects,
        compat,
        join,
        combine_attrs=combine_attrs,
        fill_value=fill_value,
    )
    return Dataset._construct_direct(**merge_result._asdict())
 
 | 
	merge 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	60 
 | 
	xarray/backends/api.py 
 | 
	def open_dataarray(
    filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
    *,
    engine: T_Engine | None = None,
    chunks: T_Chunks | None = None,
    cache: bool | None = None,
    decode_cf: bool | None = None,
    mask_and_scale: bool | None = None,
    decode_times: bool | None = None,
    decode_timedelta: bool | None = None,
    use_cftime: bool | None = None,
    concat_characters: bool | None = None,
    decode_coords: Literal["coordinates", "all"] | bool | None = None,
    drop_variables: str | Iterable[str] | None = None,
    inline_array: bool = False,
    chunked_array_type: str | None = None,
    from_array_kwargs: dict[str, Any] | None = None,
    backend_kwargs: dict[str, Any] | None = None,
    **kwargs,
) -> DataArray:
    """Open an DataArray from a file or file-like object containing a single
    data variable.
    This is designed to read netCDF files with only one data variable. If
    multiple variables are present then a ValueError is raised.
    Parameters
    ----------
    filename_or_obj : str, Path, file-like or DataStore
        Strings and Path objects are interpreted as a path to a netCDF file
        or an OpenDAP URL and opened with python-netCDF4, unless the filename
        ends with .gz, in which case the file is gunzipped and opened with
        scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
        objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
    engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
        , installed backend \
        or subclass of xarray.backends.BackendEntrypoint, optional
        Engine to use when reading files. If not provided, the default engine
        is chosen based on available dependencies, with a preference for
        "netcdf4".
    chunks : int, dict, 'auto' or None, default: None
        If provided, used to load the data into dask arrays.
        - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
          engine preferred chunks.
        - ``chunks=None`` skips using dask, which is generally faster for
          small arrays.
        - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
        - ``chunks={}`` loads the data with dask using engine preferred chunks if
          exposed by the backend, otherwise with a single chunk for all arrays.
        See dask chunking for more details.
    cache : bool, optional
        If True, cache data loaded from the underlying datastore in memory as
        NumPy arrays when accessed to avoid reading from the underlying data-
        store multiple times. Defaults to True unless you specify the `chunks`
        argument to use dask, in which case it defaults to False. Does not
        change the behavior of coordinates corresponding to dimensions, which
        always load their data from disk into a ``pandas.Index``.
    decode_cf : bool, optional
        Whether to decode these variables, assuming they were saved according
        to CF conventions.
    mask_and_scale : bool, optional
        If True, replace array values equal to `_FillValue` with NA and scale
        values according to the formula `original_values * scale_factor +
        add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
        taken from variable attributes (if they exist).  If the `_FillValue` or
        `missing_value` attribute contains multiple values a warning will be
        issued and all array values matching one of the multiple values will
        be replaced by NA. This keyword may not be supported by all the backends.
    decode_times : bool, optional
        If True, decode times encoded in the standard NetCDF datetime format
        into datetime objects. Otherwise, leave them encoded as numbers.
        This keyword may not be supported by all the backends.
    decode_timedelta : bool, optional
        If True, decode variables and coordinates with time units in
        {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
        into timedelta objects. If False, leave them encoded as numbers.
        If None (default), assume the same value of decode_time.
        This keyword may not be supported by all the backends.
    use_cftime: bool, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error. This keyword may not be supported by all the backends.
    concat_characters : bool, optional
        If True, concatenate along the last dimension of character arrays to
        form string arrays. Dimensions will only be concatenated over (and
        removed) if they have no corresponding variable and if they are only
        used as the last dimension of character arrays.
        This keyword may not be supported by all the backends.
    decode_coords : bool or {"coordinates", "all"}, optional
        Controls which variables are set as coordinate variables:
        - "coordinates" or True: Set variables referred to in the
          ``'coordinates'`` attribute of the datasets or individual variables
          as coordinate variables.
        - "all": Set variables referred to in  ``'grid_mapping'``, ``'bounds'`` and
          other attributes as coordinate variables.
        Only existing variables can be set as coordinates. Missing variables
        will be silently ignored.
    drop_variables: str or iterable of str, optional
        A variable or list of variables to exclude from being parsed from the
        dataset. This may be useful to drop variables with problems or
        inconsistent values.
    inline_array: bool, default: False
        How to include the array in the dask task graph.
        By default(``inline_array=False``) the array is included in a task by
        itself, and each chunk refers to that task by its key. With
        ``inline_array=True``, Dask will instead inline the array directly
        in the values of the task graph. See :py:func:`dask.array.from_array`.
    chunked_array_type: str, optional
        Which chunked array type to coerce the underlying data array to.
        Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
        Experimental API that should not be relied upon.
    from_array_kwargs: dict
        Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
        chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
        For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
        to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
    backend_kwargs: dict
        Additional keyword arguments passed on to the engine open function,
        equivalent to `**kwargs`.
    **kwargs: dict
        Additional keyword arguments passed on to the engine open function.
        For example:
        - 'group': path to the netCDF4 group in the given file to open given as
          a str,supported by "netcdf4", "h5netcdf", "zarr".
        - 'lock': resource lock to use when reading data from disk. Only
          relevant when using dask or another form of parallelism. By default,
          appropriate locks are chosen to safely read and write files with the
          currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
          "scipy".
        See engine open function for kwargs accepted by each specific engine.
    Notes
    -----
    This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
    using `DataArray.to_netcdf` and then loading with this function will
    produce an identical result.
    All parameters are passed directly to `xarray.open_dataset`. See that
    documentation for further details.
    See also
    --------
    open_dataset
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_open_dataarray.txt 
 | 
	def open_dataarray(
    filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
    *,
    engine: T_Engine | None = None,
    chunks: T_Chunks | None = None,
    cache: bool | None = None,
    decode_cf: bool | None = None,
    mask_and_scale: bool | None = None,
    decode_times: bool | None = None,
    decode_timedelta: bool | None = None,
    use_cftime: bool | None = None,
    concat_characters: bool | None = None,
    decode_coords: Literal["coordinates", "all"] | bool | None = None,
    drop_variables: str | Iterable[str] | None = None,
    inline_array: bool = False,
    chunked_array_type: str | None = None,
    from_array_kwargs: dict[str, Any] | None = None,
    backend_kwargs: dict[str, Any] | None = None,
    **kwargs,
) -> DataArray:
    """Open an DataArray from a file or file-like object containing a single
    data variable.
    This is designed to read netCDF files with only one data variable. If
    multiple variables are present then a ValueError is raised.
    Parameters
    ----------
    filename_or_obj : str, Path, file-like or DataStore
        Strings and Path objects are interpreted as a path to a netCDF file
        or an OpenDAP URL and opened with python-netCDF4, unless the filename
        ends with .gz, in which case the file is gunzipped and opened with
        scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
        objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
    engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
        , installed backend \
        or subclass of xarray.backends.BackendEntrypoint, optional
        Engine to use when reading files. If not provided, the default engine
        is chosen based on available dependencies, with a preference for
        "netcdf4".
    chunks : int, dict, 'auto' or None, default: None
        If provided, used to load the data into dask arrays.
        - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
          engine preferred chunks.
        - ``chunks=None`` skips using dask, which is generally faster for
          small arrays.
        - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
        - ``chunks={}`` loads the data with dask using engine preferred chunks if
          exposed by the backend, otherwise with a single chunk for all arrays.
        See dask chunking for more details.
    cache : bool, optional
        If True, cache data loaded from the underlying datastore in memory as
        NumPy arrays when accessed to avoid reading from the underlying data-
        store multiple times. Defaults to True unless you specify the `chunks`
        argument to use dask, in which case it defaults to False. Does not
        change the behavior of coordinates corresponding to dimensions, which
        always load their data from disk into a ``pandas.Index``.
    decode_cf : bool, optional
        Whether to decode these variables, assuming they were saved according
        to CF conventions.
    mask_and_scale : bool, optional
        If True, replace array values equal to `_FillValue` with NA and scale
        values according to the formula `original_values * scale_factor +
        add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
        taken from variable attributes (if they exist).  If the `_FillValue` or
        `missing_value` attribute contains multiple values a warning will be
        issued and all array values matching one of the multiple values will
        be replaced by NA. This keyword may not be supported by all the backends.
    decode_times : bool, optional
        If True, decode times encoded in the standard NetCDF datetime format
        into datetime objects. Otherwise, leave them encoded as numbers.
        This keyword may not be supported by all the backends.
    decode_timedelta : bool, optional
        If True, decode variables and coordinates with time units in
        {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
        into timedelta objects. If False, leave them encoded as numbers.
        If None (default), assume the same value of decode_time.
        This keyword may not be supported by all the backends.
    use_cftime: bool, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error. This keyword may not be supported by all the backends.
    concat_characters : bool, optional
        If True, concatenate along the last dimension of character arrays to
        form string arrays. Dimensions will only be concatenated over (and
        removed) if they have no corresponding variable and if they are only
        used as the last dimension of character arrays.
        This keyword may not be supported by all the backends.
    decode_coords : bool or {"coordinates", "all"}, optional
        Controls which variables are set as coordinate variables:
        - "coordinates" or True: Set variables referred to in the
          ``'coordinates'`` attribute of the datasets or individual variables
          as coordinate variables.
        - "all": Set variables referred to in  ``'grid_mapping'``, ``'bounds'`` and
          other attributes as coordinate variables.
        Only existing variables can be set as coordinates. Missing variables
        will be silently ignored.
    drop_variables: str or iterable of str, optional
        A variable or list of variables to exclude from being parsed from the
        dataset. This may be useful to drop variables with problems or
        inconsistent values.
    inline_array: bool, default: False
        How to include the array in the dask task graph.
        By default(``inline_array=False``) the array is included in a task by
        itself, and each chunk refers to that task by its key. With
        ``inline_array=True``, Dask will instead inline the array directly
        in the values of the task graph. See :py:func:`dask.array.from_array`.
    chunked_array_type: str, optional
        Which chunked array type to coerce the underlying data array to.
        Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
        Experimental API that should not be relied upon.
    from_array_kwargs: dict
        Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
        chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
        For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
        to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
    backend_kwargs: dict
        Additional keyword arguments passed on to the engine open function,
        equivalent to `**kwargs`.
    **kwargs: dict
        Additional keyword arguments passed on to the engine open function.
        For example:
        - 'group': path to the netCDF4 group in the given file to open given as
          a str,supported by "netcdf4", "h5netcdf", "zarr".
        - 'lock': resource lock to use when reading data from disk. Only
          relevant when using dask or another form of parallelism. By default,
          appropriate locks are chosen to safely read and write files with the
          currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
          "scipy".
        See engine open function for kwargs accepted by each specific engine.
    Notes
    -----
    This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
    using `DataArray.to_netcdf` and then loading with this function will
    produce an identical result.
    All parameters are passed directly to `xarray.open_dataset`. See that
    documentation for further details.
    See also
    --------
    open_dataset
    """
    dataset = open_dataset(
        filename_or_obj,
        decode_cf=decode_cf,
        mask_and_scale=mask_and_scale,
        decode_times=decode_times,
        concat_characters=concat_characters,
        decode_coords=decode_coords,
        engine=engine,
        chunks=chunks,
        cache=cache,
        drop_variables=drop_variables,
        inline_array=inline_array,
        chunked_array_type=chunked_array_type,
        from_array_kwargs=from_array_kwargs,
        backend_kwargs=backend_kwargs,
        use_cftime=use_cftime,
        decode_timedelta=decode_timedelta,
        **kwargs,
    )
    if len(dataset.data_vars) != 1:
        raise ValueError(
            "Given file dataset contains more than one data "
            "variable. Please read with xarray.open_dataset and "
            "then select the variable you want."
        )
    else:
        (data_array,) = dataset.data_vars.values()
    data_array.set_close(dataset._close)
    # Reset names if they were changed during saving
    # to ensure that we can 'roundtrip' perfectly
    if DATAARRAY_NAME in dataset.attrs:
        data_array.name = dataset.attrs[DATAARRAY_NAME]
        del dataset.attrs[DATAARRAY_NAME]
    if data_array.name == DATAARRAY_VARIABLE:
        data_array.name = None
    return data_array
 
 | 
	open_dataarray 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	61 
 | 
	xarray/backends/api.py 
 | 
	def open_dataset(
    filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
    *,
    engine: T_Engine = None,
    chunks: T_Chunks = None,
    cache: bool | None = None,
    decode_cf: bool | None = None,
    mask_and_scale: bool | Mapping[str, bool] | None = None,
    decode_times: bool | Mapping[str, bool] | None = None,
    decode_timedelta: bool | Mapping[str, bool] | None = None,
    use_cftime: bool | Mapping[str, bool] | None = None,
    concat_characters: bool | Mapping[str, bool] | None = None,
    decode_coords: Literal["coordinates", "all"] | bool | None = None,
    drop_variables: str | Iterable[str] | None = None,
    inline_array: bool = False,
    chunked_array_type: str | None = None,
    from_array_kwargs: dict[str, Any] | None = None,
    backend_kwargs: dict[str, Any] | None = None,
    **kwargs,
) -> Dataset:
    """Open and decode a dataset from a file or file-like object.
    Parameters
    ----------
    filename_or_obj : str, Path, file-like or DataStore
        Strings and Path objects are interpreted as a path to a netCDF file
        or an OpenDAP URL and opened with python-netCDF4, unless the filename
        ends with .gz, in which case the file is gunzipped and opened with
        scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
        objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
    engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
        , installed backend \
        or subclass of xarray.backends.BackendEntrypoint, optional
        Engine to use when reading files. If not provided, the default engine
        is chosen based on available dependencies, with a preference for
        "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
        can also be used.
    chunks : int, dict, 'auto' or None, default: None
        If provided, used to load the data into dask arrays.
        - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
          engine preferred chunks.
        - ``chunks=None`` skips using dask, which is generally faster for
          small arrays.
        - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
        - ``chunks={}`` loads the data with dask using the engine's preferred chunk
          size, generally identical to the format's chunk size. If not available, a
          single chunk for all arrays.
        See dask chunking for more details.
    cache : bool, optional
        If True, cache data loaded from the underlying datastore in memory as
        NumPy arrays when accessed to avoid reading from the underlying data-
        store multiple times. Defaults to True unless you specify the `chunks`
        argument to use dask, in which case it defaults to False. Does not
        change the behavior of coordinates corresponding to dimensions, which
        always load their data from disk into a ``pandas.Index``.
    decode_cf : bool, optional
        Whether to decode these variables, assuming they were saved according
        to CF conventions.
    mask_and_scale : bool or dict-like, optional
        If True, replace array values equal to `_FillValue` with NA and scale
        values according to the formula `original_values * scale_factor +
        add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
        taken from variable attributes (if they exist).  If the `_FillValue` or
        `missing_value` attribute contains multiple values a warning will be
        issued and all array values matching one of the multiple values will
        be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    decode_times : bool or dict-like, optional
        If True, decode times encoded in the standard NetCDF datetime format
        into datetime objects. Otherwise, leave them encoded as numbers.
        Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    decode_timedelta : bool or dict-like, optional
        If True, decode variables and coordinates with time units in
        {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
        into timedelta objects. If False, leave them encoded as numbers.
        If None (default), assume the same value of decode_time.
        Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    use_cftime: bool or dict-like, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error. Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    concat_characters : bool or dict-like, optional
        If True, concatenate along the last dimension of character arrays to
        form string arrays. Dimensions will only be concatenated over (and
        removed) if they have no corresponding variable and if they are only
        used as the last dimension of character arrays.
        Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    decode_coords : bool or {"coordinates", "all"}, optional
        Controls which variables are set as coordinate variables:
        - "coordinates" or True: Set variables referred to in the
          ``'coordinates'`` attribute of the datasets or individual variables
          as coordinate variables.
        - "all": Set variables referred to in  ``'grid_mapping'``, ``'bounds'`` and
          other attributes as coordinate variables.
        Only existing variables can be set as coordinates. Missing variables
        will be silently ignored.
    drop_variables: str or iterable of str, optional
        A variable or list of variables to exclude from being parsed from the
        dataset. This may be useful to drop variables with problems or
        inconsistent values.
    inline_array: bool, default: False
        How to include the array in the dask task graph.
        By default(``inline_array=False``) the array is included in a task by
        itself, and each chunk refers to that task by its key. With
        ``inline_array=True``, Dask will instead inline the array directly
        in the values of the task graph. See :py:func:`dask.array.from_array`.
    chunked_array_type: str, optional
        Which chunked array type to coerce this datasets' arrays to.
        Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
        Experimental API that should not be relied upon.
    from_array_kwargs: dict
        Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
        chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
        For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
        to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
    backend_kwargs: dict
        Additional keyword arguments passed on to the engine open function,
        equivalent to `**kwargs`.
    **kwargs: dict
        Additional keyword arguments passed on to the engine open function.
        For example:
        - 'group': path to the netCDF4 group in the given file to open given as
          a str,supported by "netcdf4", "h5netcdf", "zarr".
        - 'lock': resource lock to use when reading data from disk. Only
          relevant when using dask or another form of parallelism. By default,
          appropriate locks are chosen to safely read and write files with the
          currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
          "scipy".
        See engine open function for kwargs accepted by each specific engine.
    Returns
    -------
    dataset : Dataset
        The newly created dataset.
    Notes
    -----
    ``open_dataset`` opens the file with read-only access. When you modify
    values of a Dataset, even one linked to files on disk, only the in-memory
    copy you are manipulating in xarray is modified: the original file on disk
    is never touched.
    See Also
    --------
    open_mfdataset
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_open_dataset.txt 
 | 
	def open_dataset(
    filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
    *,
    engine: T_Engine = None,
    chunks: T_Chunks = None,
    cache: bool | None = None,
    decode_cf: bool | None = None,
    mask_and_scale: bool | Mapping[str, bool] | None = None,
    decode_times: bool | Mapping[str, bool] | None = None,
    decode_timedelta: bool | Mapping[str, bool] | None = None,
    use_cftime: bool | Mapping[str, bool] | None = None,
    concat_characters: bool | Mapping[str, bool] | None = None,
    decode_coords: Literal["coordinates", "all"] | bool | None = None,
    drop_variables: str | Iterable[str] | None = None,
    inline_array: bool = False,
    chunked_array_type: str | None = None,
    from_array_kwargs: dict[str, Any] | None = None,
    backend_kwargs: dict[str, Any] | None = None,
    **kwargs,
) -> Dataset:
    """Open and decode a dataset from a file or file-like object.
    Parameters
    ----------
    filename_or_obj : str, Path, file-like or DataStore
        Strings and Path objects are interpreted as a path to a netCDF file
        or an OpenDAP URL and opened with python-netCDF4, unless the filename
        ends with .gz, in which case the file is gunzipped and opened with
        scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
        objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
    engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
        , installed backend \
        or subclass of xarray.backends.BackendEntrypoint, optional
        Engine to use when reading files. If not provided, the default engine
        is chosen based on available dependencies, with a preference for
        "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
        can also be used.
    chunks : int, dict, 'auto' or None, default: None
        If provided, used to load the data into dask arrays.
        - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
          engine preferred chunks.
        - ``chunks=None`` skips using dask, which is generally faster for
          small arrays.
        - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
        - ``chunks={}`` loads the data with dask using the engine's preferred chunk
          size, generally identical to the format's chunk size. If not available, a
          single chunk for all arrays.
        See dask chunking for more details.
    cache : bool, optional
        If True, cache data loaded from the underlying datastore in memory as
        NumPy arrays when accessed to avoid reading from the underlying data-
        store multiple times. Defaults to True unless you specify the `chunks`
        argument to use dask, in which case it defaults to False. Does not
        change the behavior of coordinates corresponding to dimensions, which
        always load their data from disk into a ``pandas.Index``.
    decode_cf : bool, optional
        Whether to decode these variables, assuming they were saved according
        to CF conventions.
    mask_and_scale : bool or dict-like, optional
        If True, replace array values equal to `_FillValue` with NA and scale
        values according to the formula `original_values * scale_factor +
        add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
        taken from variable attributes (if they exist).  If the `_FillValue` or
        `missing_value` attribute contains multiple values a warning will be
        issued and all array values matching one of the multiple values will
        be replaced by NA. Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    decode_times : bool or dict-like, optional
        If True, decode times encoded in the standard NetCDF datetime format
        into datetime objects. Otherwise, leave them encoded as numbers.
        Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    decode_timedelta : bool or dict-like, optional
        If True, decode variables and coordinates with time units in
        {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
        into timedelta objects. If False, leave them encoded as numbers.
        If None (default), assume the same value of decode_time.
        Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    use_cftime: bool or dict-like, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error. Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    concat_characters : bool or dict-like, optional
        If True, concatenate along the last dimension of character arrays to
        form string arrays. Dimensions will only be concatenated over (and
        removed) if they have no corresponding variable and if they are only
        used as the last dimension of character arrays.
        Pass a mapping, e.g. ``{"my_variable": False}``,
        to toggle this feature per-variable individually.
        This keyword may not be supported by all the backends.
    decode_coords : bool or {"coordinates", "all"}, optional
        Controls which variables are set as coordinate variables:
        - "coordinates" or True: Set variables referred to in the
          ``'coordinates'`` attribute of the datasets or individual variables
          as coordinate variables.
        - "all": Set variables referred to in  ``'grid_mapping'``, ``'bounds'`` and
          other attributes as coordinate variables.
        Only existing variables can be set as coordinates. Missing variables
        will be silently ignored.
    drop_variables: str or iterable of str, optional
        A variable or list of variables to exclude from being parsed from the
        dataset. This may be useful to drop variables with problems or
        inconsistent values.
    inline_array: bool, default: False
        How to include the array in the dask task graph.
        By default(``inline_array=False``) the array is included in a task by
        itself, and each chunk refers to that task by its key. With
        ``inline_array=True``, Dask will instead inline the array directly
        in the values of the task graph. See :py:func:`dask.array.from_array`.
    chunked_array_type: str, optional
        Which chunked array type to coerce this datasets' arrays to.
        Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEnetryPoint` system.
        Experimental API that should not be relied upon.
    from_array_kwargs: dict
        Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
        chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
        For example if :py:func:`dask.array.Array` objects are used for chunking, additional kwargs will be passed
        to :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
    backend_kwargs: dict
        Additional keyword arguments passed on to the engine open function,
        equivalent to `**kwargs`.
    **kwargs: dict
        Additional keyword arguments passed on to the engine open function.
        For example:
        - 'group': path to the netCDF4 group in the given file to open given as
          a str,supported by "netcdf4", "h5netcdf", "zarr".
        - 'lock': resource lock to use when reading data from disk. Only
          relevant when using dask or another form of parallelism. By default,
          appropriate locks are chosen to safely read and write files with the
          currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
          "scipy".
        See engine open function for kwargs accepted by each specific engine.
    Returns
    -------
    dataset : Dataset
        The newly created dataset.
    Notes
    -----
    ``open_dataset`` opens the file with read-only access. When you modify
    values of a Dataset, even one linked to files on disk, only the in-memory
    copy you are manipulating in xarray is modified: the original file on disk
    is never touched.
    See Also
    --------
    open_mfdataset
    """
    if cache is None:
        cache = chunks is None
    if backend_kwargs is not None:
        kwargs.update(backend_kwargs)
    if engine is None:
        engine = plugins.guess_engine(filename_or_obj)
    if from_array_kwargs is None:
        from_array_kwargs = {}
    backend = plugins.get_backend(engine)
    decoders = _resolve_decoders_kwargs(
        decode_cf,
        open_backend_dataset_parameters=backend.open_dataset_parameters,
        mask_and_scale=mask_and_scale,
        decode_times=decode_times,
        decode_timedelta=decode_timedelta,
        concat_characters=concat_characters,
        use_cftime=use_cftime,
        decode_coords=decode_coords,
    )
    overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
    backend_ds = backend.open_dataset(
        filename_or_obj,
        drop_variables=drop_variables,
        **decoders,
        **kwargs,
    )
    ds = _dataset_from_backend_dataset(
        backend_ds,
        filename_or_obj,
        engine,
        chunks,
        cache,
        overwrite_encoded_chunks,
        inline_array,
        chunked_array_type,
        from_array_kwargs,
        drop_variables=drop_variables,
        **decoders,
        **kwargs,
    )
    return ds
 
 | 
	open_dataset 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	63 
 | 
	xarray/backends/api.py 
 | 
	def open_mfdataset(
    paths: str | NestedSequence[str | os.PathLike],
    chunks: T_Chunks | None = None,
    concat_dim: (
        str
        | DataArray
        | Index
        | Sequence[str]
        | Sequence[DataArray]
        | Sequence[Index]
        | None
    ) = None,
    compat: CompatOptions = "no_conflicts",
    preprocess: Callable[[Dataset], Dataset] | None = None,
    engine: T_Engine | None = None,
    data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
    coords="different",
    combine: Literal["by_coords", "nested"] = "by_coords",
    parallel: bool = False,
    join: JoinOptions = "outer",
    attrs_file: str | os.PathLike | None = None,
    combine_attrs: CombineAttrsOptions = "override",
    **kwargs,
) -> Dataset:
    """Open multiple files as a single dataset.
    If combine='by_coords' then the function ``combine_by_coords`` is used to combine
    the datasets into one before returning the result, and if combine='nested' then
    ``combine_nested`` is used. The filepaths must be structured according to which
    combining function is used, the details of which are given in the documentation for
    ``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'``
    will be used. Requires dask to be installed. See documentation for
    details on dask [1]_. Global attributes from the ``attrs_file`` are used
    for the combined dataset.
    Parameters
    ----------
    paths : str or nested sequence of paths
        Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
        files to open. Paths can be given as strings or as pathlib Paths. If
        concatenation along more than one dimension is desired, then ``paths`` must be a
        nested list-of-lists (see ``combine_nested`` for details). (A string glob will
        be expanded to a 1-dimensional list.)
    chunks : int, dict, 'auto' or None, optional
        Dictionary with keys given by dimension names and values given by chunk sizes.
        In general, these should divide the dimensions of each dataset. If int, chunk
        each dimension by ``chunks``. By default, chunks will be chosen to load entire
        input files into memory at once. This has a major impact on performance: please
        see the full documentation for more details [2]_. This argument is evaluated
        on a per-file basis, so chunk sizes that span multiple files will be ignored.
    concat_dim : str, DataArray, Index or a Sequence of these or None, optional
        Dimensions to concatenate files along.  You only need to provide this argument
        if ``combine='nested'``, and if any of the dimensions along which you want to
        concatenate is not a dimension in the original datasets, e.g., if you want to
        stack a collection of 2D arrays along a third dimension. Set
        ``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a
        particular dimension. Default is None, which for a 1D list of filepaths is
        equivalent to opening the files separately and then merging them with
        ``xarray.merge``.
    combine : {"by_coords", "nested"}, optional
        Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
        combine all the data. Default is to use ``xarray.combine_by_coords``.
    compat : {"identical", "equals", "broadcast_equals", \
              "no_conflicts", "override"}, default: "no_conflicts"
        String indicating how to compare variables of the same name for
        potential conflicts when merging:
         * "broadcast_equals": all values must be equal when variables are
           broadcast against each other to ensure common dimensions.
         * "equals": all values and dimensions must be the same.
         * "identical": all values, dimensions and attributes must be the
           same.
         * "no_conflicts": only values which are not null in both datasets
           must be equal. The returned dataset then contains the combination
           of all non-null values.
         * "override": skip comparing and pick variable from first dataset
    preprocess : callable, optional
        If provided, call this function on each dataset prior to concatenation.
        You can find the file-name from which each dataset was loaded in
        ``ds.encoding["source"]``.
    engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
        , installed backend \
        or subclass of xarray.backends.BackendEntrypoint, optional
        Engine to use when reading files. If not provided, the default engine
        is chosen based on available dependencies, with a preference for
        "netcdf4".
    data_vars : {"minimal", "different", "all"} or list of str, default: "all"
        These data variables will be concatenated together:
          * "minimal": Only data variables in which the dimension already
            appears are included.
          * "different": Data variables which are not equal (ignoring
            attributes) across all datasets are also concatenated (as well as
            all for which dimension already appears). Beware: this option may
            load the data payload of data variables into memory if they are not
            already loaded.
          * "all": All data variables will be concatenated.
          * list of str: The listed data variables will be concatenated, in
            addition to the "minimal" data variables.
    coords : {"minimal", "different", "all"} or list of str, optional
        These coordinate variables will be concatenated together:
         * "minimal": Only coordinates in which the dimension already appears
           are included.
         * "different": Coordinates which are not equal (ignoring attributes)
           across all datasets are also concatenated (as well as all for which
           dimension already appears). Beware: this option may load the data
           payload of coordinate variables into memory if they are not already
           loaded.
         * "all": All coordinate variables will be concatenated, except
           those corresponding to other dimensions.
         * list of str: The listed coordinate variables will be concatenated,
           in addition the "minimal" coordinates.
    parallel : bool, default: False
        If True, the open and preprocess steps of this function will be
        performed in parallel using ``dask.delayed``. Default is False.
    join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
        String indicating how to combine differing indexes
        (excluding concat_dim) in objects
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    attrs_file : str or path-like, optional
        Path of the file used to read global attributes from.
        By default global attributes are read from the first file provided,
        with wildcard matches sorted by filename.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "override"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    **kwargs : optional
        Additional arguments passed on to :py:func:`xarray.open_dataset`. For an
        overview of some of the possible options, see the documentation of
        :py:func:`xarray.open_dataset`
    Returns
    -------
    xarray.Dataset
    Notes
    -----
    ``open_mfdataset`` opens files with read-only access. When you modify values
    of a Dataset, even one linked to files on disk, only the in-memory copy you
    are manipulating in xarray is modified: the original file on disk is never
    touched.
    See Also
    --------
    combine_by_coords
    combine_nested
    open_dataset
    Examples
    --------
    A user might want to pass additional arguments into ``preprocess`` when
    applying some operation to many individual files that are being opened. One route
    to do this is through the use of ``functools.partial``.
    >>> from functools import partial
    >>> def _preprocess(x, lon_bnds, lat_bnds):
    ...     return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds))
    ...
    >>> lon_bnds, lat_bnds = (-110, -105), (40, 45)
    >>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
    >>> ds = xr.open_mfdataset(
    ...     "file_*.nc", concat_dim="time", preprocess=partial_func
    ... )  # doctest: +SKIP
    It is also possible to use any argument to ``open_dataset`` together
    with ``open_mfdataset``, such as for example ``drop_variables``:
    >>> ds = xr.open_mfdataset(
    ...     "file.nc", drop_variables=["varname_1", "varname_2"]  # any list of vars
    ... )  # doctest: +SKIP
    References
    ----------
    .. [1] https://docs.xarray.dev/en/stable/dask.html
    .. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_open_mfdataset.txt 
 | 
	def open_mfdataset(
    paths: str | NestedSequence[str | os.PathLike],
    chunks: T_Chunks | None = None,
    concat_dim: (
        str
        | DataArray
        | Index
        | Sequence[str]
        | Sequence[DataArray]
        | Sequence[Index]
        | None
    ) = None,
    compat: CompatOptions = "no_conflicts",
    preprocess: Callable[[Dataset], Dataset] | None = None,
    engine: T_Engine | None = None,
    data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
    coords="different",
    combine: Literal["by_coords", "nested"] = "by_coords",
    parallel: bool = False,
    join: JoinOptions = "outer",
    attrs_file: str | os.PathLike | None = None,
    combine_attrs: CombineAttrsOptions = "override",
    **kwargs,
) -> Dataset:
    """Open multiple files as a single dataset.
    If combine='by_coords' then the function ``combine_by_coords`` is used to combine
    the datasets into one before returning the result, and if combine='nested' then
    ``combine_nested`` is used. The filepaths must be structured according to which
    combining function is used, the details of which are given in the documentation for
    ``combine_by_coords`` and ``combine_nested``. By default ``combine='by_coords'``
    will be used. Requires dask to be installed. See documentation for
    details on dask [1]_. Global attributes from the ``attrs_file`` are used
    for the combined dataset.
    Parameters
    ----------
    paths : str or nested sequence of paths
        Either a string glob in the form ``"path/to/my/files/*.nc"`` or an explicit list of
        files to open. Paths can be given as strings or as pathlib Paths. If
        concatenation along more than one dimension is desired, then ``paths`` must be a
        nested list-of-lists (see ``combine_nested`` for details). (A string glob will
        be expanded to a 1-dimensional list.)
    chunks : int, dict, 'auto' or None, optional
        Dictionary with keys given by dimension names and values given by chunk sizes.
        In general, these should divide the dimensions of each dataset. If int, chunk
        each dimension by ``chunks``. By default, chunks will be chosen to load entire
        input files into memory at once. This has a major impact on performance: please
        see the full documentation for more details [2]_. This argument is evaluated
        on a per-file basis, so chunk sizes that span multiple files will be ignored.
    concat_dim : str, DataArray, Index or a Sequence of these or None, optional
        Dimensions to concatenate files along.  You only need to provide this argument
        if ``combine='nested'``, and if any of the dimensions along which you want to
        concatenate is not a dimension in the original datasets, e.g., if you want to
        stack a collection of 2D arrays along a third dimension. Set
        ``concat_dim=[..., None, ...]`` explicitly to disable concatenation along a
        particular dimension. Default is None, which for a 1D list of filepaths is
        equivalent to opening the files separately and then merging them with
        ``xarray.merge``.
    combine : {"by_coords", "nested"}, optional
        Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
        combine all the data. Default is to use ``xarray.combine_by_coords``.
    compat : {"identical", "equals", "broadcast_equals", \
              "no_conflicts", "override"}, default: "no_conflicts"
        String indicating how to compare variables of the same name for
        potential conflicts when merging:
         * "broadcast_equals": all values must be equal when variables are
           broadcast against each other to ensure common dimensions.
         * "equals": all values and dimensions must be the same.
         * "identical": all values, dimensions and attributes must be the
           same.
         * "no_conflicts": only values which are not null in both datasets
           must be equal. The returned dataset then contains the combination
           of all non-null values.
         * "override": skip comparing and pick variable from first dataset
    preprocess : callable, optional
        If provided, call this function on each dataset prior to concatenation.
        You can find the file-name from which each dataset was loaded in
        ``ds.encoding["source"]``.
    engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
        , installed backend \
        or subclass of xarray.backends.BackendEntrypoint, optional
        Engine to use when reading files. If not provided, the default engine
        is chosen based on available dependencies, with a preference for
        "netcdf4".
    data_vars : {"minimal", "different", "all"} or list of str, default: "all"
        These data variables will be concatenated together:
          * "minimal": Only data variables in which the dimension already
            appears are included.
          * "different": Data variables which are not equal (ignoring
            attributes) across all datasets are also concatenated (as well as
            all for which dimension already appears). Beware: this option may
            load the data payload of data variables into memory if they are not
            already loaded.
          * "all": All data variables will be concatenated.
          * list of str: The listed data variables will be concatenated, in
            addition to the "minimal" data variables.
    coords : {"minimal", "different", "all"} or list of str, optional
        These coordinate variables will be concatenated together:
         * "minimal": Only coordinates in which the dimension already appears
           are included.
         * "different": Coordinates which are not equal (ignoring attributes)
           across all datasets are also concatenated (as well as all for which
           dimension already appears). Beware: this option may load the data
           payload of coordinate variables into memory if they are not already
           loaded.
         * "all": All coordinate variables will be concatenated, except
           those corresponding to other dimensions.
         * list of str: The listed coordinate variables will be concatenated,
           in addition the "minimal" coordinates.
    parallel : bool, default: False
        If True, the open and preprocess steps of this function will be
        performed in parallel using ``dask.delayed``. Default is False.
    join : {"outer", "inner", "left", "right", "exact", "override"}, default: "outer"
        String indicating how to combine differing indexes
        (excluding concat_dim) in objects
        - "outer": use the union of object indexes
        - "inner": use the intersection of object indexes
        - "left": use indexes from the first object with each dimension
        - "right": use indexes from the last object with each dimension
        - "exact": instead of aligning, raise `ValueError` when indexes to be
          aligned are not equal
        - "override": if indexes are of same size, rewrite indexes to be
          those of the first object with that dimension. Indexes for the same
          dimension must have the same size in all objects.
    attrs_file : str or path-like, optional
        Path of the file used to read global attributes from.
        By default global attributes are read from the first file provided,
        with wildcard matches sorted by filename.
    combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
                     "override"} or callable, default: "override"
        A callable or a string indicating how to combine attrs of the objects being
        merged:
        - "drop": empty attrs on returned Dataset.
        - "identical": all attrs must be the same on every object.
        - "no_conflicts": attrs from all objects are combined, any that have
          the same name must also have the same value.
        - "drop_conflicts": attrs from all objects are combined, any that have
          the same name but different values are dropped.
        - "override": skip comparing and copy attrs from the first dataset to
          the result.
        If a callable, it must expect a sequence of ``attrs`` dicts and a context object
        as its only parameters.
    **kwargs : optional
        Additional arguments passed on to :py:func:`xarray.open_dataset`. For an
        overview of some of the possible options, see the documentation of
        :py:func:`xarray.open_dataset`
    Returns
    -------
    xarray.Dataset
    Notes
    -----
    ``open_mfdataset`` opens files with read-only access. When you modify values
    of a Dataset, even one linked to files on disk, only the in-memory copy you
    are manipulating in xarray is modified: the original file on disk is never
    touched.
    See Also
    --------
    combine_by_coords
    combine_nested
    open_dataset
    Examples
    --------
    A user might want to pass additional arguments into ``preprocess`` when
    applying some operation to many individual files that are being opened. One route
    to do this is through the use of ``functools.partial``.
    >>> from functools import partial
    >>> def _preprocess(x, lon_bnds, lat_bnds):
    ...     return x.sel(lon=slice(*lon_bnds), lat=slice(*lat_bnds))
    ...
    >>> lon_bnds, lat_bnds = (-110, -105), (40, 45)
    >>> partial_func = partial(_preprocess, lon_bnds=lon_bnds, lat_bnds=lat_bnds)
    >>> ds = xr.open_mfdataset(
    ...     "file_*.nc", concat_dim="time", preprocess=partial_func
    ... )  # doctest: +SKIP
    It is also possible to use any argument to ``open_dataset`` together
    with ``open_mfdataset``, such as for example ``drop_variables``:
    >>> ds = xr.open_mfdataset(
    ...     "file.nc", drop_variables=["varname_1", "varname_2"]  # any list of vars
    ... )  # doctest: +SKIP
    References
    ----------
    .. [1] https://docs.xarray.dev/en/stable/dask.html
    .. [2] https://docs.xarray.dev/en/stable/dask.html#chunking-and-performance
    """
    paths = _find_absolute_paths(paths, engine=engine, **kwargs)
    if not paths:
        raise OSError("no files to open")
    if combine == "nested":
        if isinstance(concat_dim, str | DataArray) or concat_dim is None:
            concat_dim = [concat_dim]  # type: ignore[assignment]
        # This creates a flat list which is easier to iterate over, whilst
        # encoding the originally-supplied structure as "ids".
        # The "ids" are not used at all if combine='by_coords`.
        combined_ids_paths = _infer_concat_order_from_positions(paths)
        ids, paths = (
            list(combined_ids_paths.keys()),
            list(combined_ids_paths.values()),
        )
    elif concat_dim is not None:
        raise ValueError(
            "When combine='by_coords', passing a value for `concat_dim` has no "
            "effect. To manually combine along a specific dimension you should "
            "instead specify combine='nested' along with a value for `concat_dim`.",
        )
    open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs)
    if parallel:
        import dask
        # wrap the open_dataset, getattr, and preprocess with delayed
        open_ = dask.delayed(open_dataset)
        getattr_ = dask.delayed(getattr)
        if preprocess is not None:
            preprocess = dask.delayed(preprocess)
    else:
        open_ = open_dataset
        getattr_ = getattr
    datasets = [open_(p, **open_kwargs) for p in paths]
    closers = [getattr_(ds, "_close") for ds in datasets]
    if preprocess is not None:
        datasets = [preprocess(ds) for ds in datasets]
    if parallel:
        # calling compute here will return the datasets/file_objs lists,
        # the underlying datasets will still be stored as dask arrays
        datasets, closers = dask.compute(datasets, closers)
    # Combine all datasets, closing them in case of a ValueError
    try:
        if combine == "nested":
            # Combined nested list by successive concat and merge operations
            # along each dimension, using structure given by "ids"
            combined = _nested_combine(
                datasets,
                concat_dims=concat_dim,
                compat=compat,
                data_vars=data_vars,
                coords=coords,
                ids=ids,
                join=join,
                combine_attrs=combine_attrs,
            )
        elif combine == "by_coords":
            # Redo ordering from coordinates, ignoring how they were ordered
            # previously
            combined = combine_by_coords(
                datasets,
                compat=compat,
                data_vars=data_vars,
                coords=coords,
                join=join,
                combine_attrs=combine_attrs,
            )
        else:
            raise ValueError(
                f"{combine} is an invalid option for the keyword argument"
                " ``combine``"
            )
    except ValueError:
        for ds in datasets:
            ds.close()
        raise
    combined.set_close(partial(_multi_file_closer, closers))
    # read global attributes from the attrs_file or from the first dataset
    if attrs_file is not None:
        if isinstance(attrs_file, os.PathLike):
            attrs_file = cast(str, os.fspath(attrs_file))
        combined.attrs = datasets[paths.index(attrs_file)].attrs
    return combined
 
 | 
	open_mfdataset 
 | 
	Self-Contained 
 | 
					
	xarray 
 | 
	64 
 | 
	xarray/backends/zarr.py 
 | 
	def open_zarr(
    store,
    group=None,
    synchronizer=None,
    chunks="auto",
    decode_cf=True,
    mask_and_scale=True,
    decode_times=True,
    concat_characters=True,
    decode_coords=True,
    drop_variables=None,
    consolidated=None,
    overwrite_encoded_chunks=False,
    chunk_store=None,
    storage_options=None,
    decode_timedelta=None,
    use_cftime=None,
    zarr_version=None,
    chunked_array_type: str | None = None,
    from_array_kwargs: dict[str, Any] | None = None,
    **kwargs,
):
    """Load and decode a dataset from a Zarr store.
    The `store` object should be a valid store for a Zarr group. `store`
    variables must contain dimension metadata encoded in the
    `_ARRAY_DIMENSIONS` attribute or must have NCZarr format.
    Parameters
    ----------
    store : MutableMapping or str
        A MutableMapping where a Zarr Group has been stored or a path to a
        directory in file system where a Zarr DirectoryStore has been stored.
    synchronizer : object, optional
        Array synchronizer provided to zarr
    group : str, optional
        Group path. (a.k.a. `path` in zarr terminology.)
    chunks : int, dict, 'auto' or None, default: 'auto'
        If provided, used to load the data into dask arrays.
        - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
          engine preferred chunks.
        - ``chunks=None`` skips using dask, which is generally faster for
          small arrays.
        - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
        - ``chunks={}`` loads the data with dask using engine preferred chunks if
          exposed by the backend, otherwise with a single chunk for all arrays.
        See dask chunking for more details.
    overwrite_encoded_chunks : bool, optional
        Whether to drop the zarr chunks encoded for each variable when a
        dataset is loaded with specified chunk sizes (default: False)
    decode_cf : bool, optional
        Whether to decode these variables, assuming they were saved according
        to CF conventions.
    mask_and_scale : bool, optional
        If True, replace array values equal to `_FillValue` with NA and scale
        values according to the formula `original_values * scale_factor +
        add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
        taken from variable attributes (if they exist).  If the `_FillValue` or
        `missing_value` attribute contains multiple values a warning will be
        issued and all array values matching one of the multiple values will
        be replaced by NA.
    decode_times : bool, optional
        If True, decode times encoded in the standard NetCDF datetime format
        into datetime objects. Otherwise, leave them encoded as numbers.
    concat_characters : bool, optional
        If True, concatenate along the last dimension of character arrays to
        form string arrays. Dimensions will only be concatenated over (and
        removed) if they have no corresponding variable and if they are only
        used as the last dimension of character arrays.
    decode_coords : bool, optional
        If True, decode the 'coordinates' attribute to identify coordinates in
        the resulting dataset.
    drop_variables : str or iterable, optional
        A variable or list of variables to exclude from being parsed from the
        dataset. This may be useful to drop variables with problems or
        inconsistent values.
    consolidated : bool, optional
        Whether to open the store using zarr's consolidated metadata
        capability. Only works for stores that have already been consolidated.
        By default (`consolidate=None`), attempts to read consolidated metadata,
        falling back to read non-consolidated metadata if that fails.
        When the experimental ``zarr_version=3``, ``consolidated`` must be
        either be ``None`` or ``False``.
    chunk_store : MutableMapping, optional
        A separate Zarr store only for chunk data.
    storage_options : dict, optional
        Any additional parameters for the storage backend (ignored for local
        paths).
    decode_timedelta : bool, optional
        If True, decode variables and coordinates with time units in
        {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'}
        into timedelta objects. If False, leave them encoded as numbers.
        If None (default), assume the same value of decode_time.
    use_cftime : bool, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error.
    zarr_version : int or None, optional
        The desired zarr spec version to target (currently 2 or 3). The default
        of None will attempt to determine the zarr version from ``store`` when
        possible, otherwise defaulting to 2.
    chunked_array_type: str, optional
        Which chunked array type to coerce this datasets' arrays to.
        Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
        Experimental API that should not be relied upon.
    from_array_kwargs: dict, optional
        Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
        chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
        Defaults to {'manager': 'dask'}, meaning additional kwargs will be passed eventually to
        :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
    Returns
    -------
    dataset : Dataset
        The newly created dataset.
    See Also
    --------
    open_dataset
    open_mfdataset
    References
    ----------
    http://zarr.readthedocs.io/
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_open_zarr.txt 
 | 
	def open_zarr(
    store,
    group=None,
    synchronizer=None,
    chunks="auto",
    decode_cf=True,
    mask_and_scale=True,
    decode_times=True,
    concat_characters=True,
    decode_coords=True,
    drop_variables=None,
    consolidated=None,
    overwrite_encoded_chunks=False,
    chunk_store=None,
    storage_options=None,
    decode_timedelta=None,
    use_cftime=None,
    zarr_version=None,
    chunked_array_type: str | None = None,
    from_array_kwargs: dict[str, Any] | None = None,
    **kwargs,
):
    """Load and decode a dataset from a Zarr store.
    The `store` object should be a valid store for a Zarr group. `store`
    variables must contain dimension metadata encoded in the
    `_ARRAY_DIMENSIONS` attribute or must have NCZarr format.
    Parameters
    ----------
    store : MutableMapping or str
        A MutableMapping where a Zarr Group has been stored or a path to a
        directory in file system where a Zarr DirectoryStore has been stored.
    synchronizer : object, optional
        Array synchronizer provided to zarr
    group : str, optional
        Group path. (a.k.a. `path` in zarr terminology.)
    chunks : int, dict, 'auto' or None, default: 'auto'
        If provided, used to load the data into dask arrays.
        - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
          engine preferred chunks.
        - ``chunks=None`` skips using dask, which is generally faster for
          small arrays.
        - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
        - ``chunks={}`` loads the data with dask using engine preferred chunks if
          exposed by the backend, otherwise with a single chunk for all arrays.
        See dask chunking for more details.
    overwrite_encoded_chunks : bool, optional
        Whether to drop the zarr chunks encoded for each variable when a
        dataset is loaded with specified chunk sizes (default: False)
    decode_cf : bool, optional
        Whether to decode these variables, assuming they were saved according
        to CF conventions.
    mask_and_scale : bool, optional
        If True, replace array values equal to `_FillValue` with NA and scale
        values according to the formula `original_values * scale_factor +
        add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
        taken from variable attributes (if they exist).  If the `_FillValue` or
        `missing_value` attribute contains multiple values a warning will be
        issued and all array values matching one of the multiple values will
        be replaced by NA.
    decode_times : bool, optional
        If True, decode times encoded in the standard NetCDF datetime format
        into datetime objects. Otherwise, leave them encoded as numbers.
    concat_characters : bool, optional
        If True, concatenate along the last dimension of character arrays to
        form string arrays. Dimensions will only be concatenated over (and
        removed) if they have no corresponding variable and if they are only
        used as the last dimension of character arrays.
    decode_coords : bool, optional
        If True, decode the 'coordinates' attribute to identify coordinates in
        the resulting dataset.
    drop_variables : str or iterable, optional
        A variable or list of variables to exclude from being parsed from the
        dataset. This may be useful to drop variables with problems or
        inconsistent values.
    consolidated : bool, optional
        Whether to open the store using zarr's consolidated metadata
        capability. Only works for stores that have already been consolidated.
        By default (`consolidate=None`), attempts to read consolidated metadata,
        falling back to read non-consolidated metadata if that fails.
        When the experimental ``zarr_version=3``, ``consolidated`` must be
        either be ``None`` or ``False``.
    chunk_store : MutableMapping, optional
        A separate Zarr store only for chunk data.
    storage_options : dict, optional
        Any additional parameters for the storage backend (ignored for local
        paths).
    decode_timedelta : bool, optional
        If True, decode variables and coordinates with time units in
        {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'}
        into timedelta objects. If False, leave them encoded as numbers.
        If None (default), assume the same value of decode_time.
    use_cftime : bool, optional
        Only relevant if encoded dates come from a standard calendar
        (e.g. "gregorian", "proleptic_gregorian", "standard", or not
        specified).  If None (default), attempt to decode times to
        ``np.datetime64[ns]`` objects; if this is not possible, decode times to
        ``cftime.datetime`` objects. If True, always decode times to
        ``cftime.datetime`` objects, regardless of whether or not they can be
        represented using ``np.datetime64[ns]`` objects.  If False, always
        decode times to ``np.datetime64[ns]`` objects; if this is not possible
        raise an error.
    zarr_version : int or None, optional
        The desired zarr spec version to target (currently 2 or 3). The default
        of None will attempt to determine the zarr version from ``store`` when
        possible, otherwise defaulting to 2.
    chunked_array_type: str, optional
        Which chunked array type to coerce this datasets' arrays to.
        Defaults to 'dask' if installed, else whatever is registered via the `ChunkManagerEntryPoint` system.
        Experimental API that should not be relied upon.
    from_array_kwargs: dict, optional
        Additional keyword arguments passed on to the `ChunkManagerEntrypoint.from_array` method used to create
        chunked arrays, via whichever chunk manager is specified through the `chunked_array_type` kwarg.
        Defaults to {'manager': 'dask'}, meaning additional kwargs will be passed eventually to
        :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
    Returns
    -------
    dataset : Dataset
        The newly created dataset.
    See Also
    --------
    open_dataset
    open_mfdataset
    References
    ----------
    http://zarr.readthedocs.io/
    """
    from xarray.backends.api import open_dataset
    if from_array_kwargs is None:
        from_array_kwargs = {}
    if chunks == "auto":
        try:
            guess_chunkmanager(
                chunked_array_type
            )  # attempt to import that parallel backend
            chunks = {}
        except ValueError:
            chunks = None
    if kwargs:
        raise TypeError(
            "open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())
        )
    backend_kwargs = {
        "synchronizer": synchronizer,
        "consolidated": consolidated,
        "overwrite_encoded_chunks": overwrite_encoded_chunks,
        "chunk_store": chunk_store,
        "storage_options": storage_options,
        "stacklevel": 4,
        "zarr_version": zarr_version,
    }
    ds = open_dataset(
        filename_or_obj=store,
        group=group,
        decode_cf=decode_cf,
        mask_and_scale=mask_and_scale,
        decode_times=decode_times,
        concat_characters=concat_characters,
        decode_coords=decode_coords,
        engine="zarr",
        chunks=chunks,
        drop_variables=drop_variables,
        chunked_array_type=chunked_array_type,
        from_array_kwargs=from_array_kwargs,
        backend_kwargs=backend_kwargs,
        decode_timedelta=decode_timedelta,
        use_cftime=use_cftime,
        zarr_version=zarr_version,
    )
    return ds
 
 | 
	open_zarr 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	65 
 | 
	xarray/plot/dataarray_plot.py 
 | 
	def plot(
    darray: DataArray,
    *,
    row: Hashable | None = None,
    col: Hashable | None = None,
    col_wrap: int | None = None,
    ax: Axes | None = None,
    hue: Hashable | None = None,
    subplot_kws: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Any:
    """
    Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`.
    Calls xarray plotting function based on the dimensions of
    the squeezed DataArray.
    =============== ===========================
    Dimensions      Plotting function
    =============== ===========================
    1               :py:func:`xarray.plot.line`
    2               :py:func:`xarray.plot.pcolormesh`
    Anything else   :py:func:`xarray.plot.hist`
    =============== ===========================
    Parameters
    ----------
    darray : DataArray
    row : Hashable or None, optional
        If passed, make row faceted plots on this dimension name.
    col : Hashable or None, optional
        If passed, make column faceted plots on this dimension name.
    col_wrap : int or None, optional
        Use together with ``col`` to wrap faceted plots.
    ax : matplotlib axes object, optional
        Axes on which to plot. By default, use the current axes.
        Mutually exclusive with ``size``, ``figsize`` and facets.
    hue : Hashable or None, optional
        If passed, make faceted line plots with hue on this dimension name.
    subplot_kws : dict, optional
        Dictionary of keyword arguments for Matplotlib subplots
        (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
    **kwargs : optional
        Additional keyword arguments for Matplotlib.
    See Also
    --------
    xarray.DataArray.squeeze
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_plot.txt 
 | 
	def plot(
    darray: DataArray,
    *,
    row: Hashable | None = None,
    col: Hashable | None = None,
    col_wrap: int | None = None,
    ax: Axes | None = None,
    hue: Hashable | None = None,
    subplot_kws: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Any:
    """
    Default plot of DataArray using :py:mod:`matplotlib:matplotlib.pyplot`.
    Calls xarray plotting function based on the dimensions of
    the squeezed DataArray.
    =============== ===========================
    Dimensions      Plotting function
    =============== ===========================
    1               :py:func:`xarray.plot.line`
    2               :py:func:`xarray.plot.pcolormesh`
    Anything else   :py:func:`xarray.plot.hist`
    =============== ===========================
    Parameters
    ----------
    darray : DataArray
    row : Hashable or None, optional
        If passed, make row faceted plots on this dimension name.
    col : Hashable or None, optional
        If passed, make column faceted plots on this dimension name.
    col_wrap : int or None, optional
        Use together with ``col`` to wrap faceted plots.
    ax : matplotlib axes object, optional
        Axes on which to plot. By default, use the current axes.
        Mutually exclusive with ``size``, ``figsize`` and facets.
    hue : Hashable or None, optional
        If passed, make faceted line plots with hue on this dimension name.
    subplot_kws : dict, optional
        Dictionary of keyword arguments for Matplotlib subplots
        (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
    **kwargs : optional
        Additional keyword arguments for Matplotlib.
    See Also
    --------
    xarray.DataArray.squeeze
    """
    darray = darray.squeeze(
        d for d, s in darray.sizes.items() if s == 1 and d not in (row, col, hue)
    ).compute()
    plot_dims = set(darray.dims)
    plot_dims.discard(row)
    plot_dims.discard(col)
    plot_dims.discard(hue)
    ndims = len(plot_dims)
    plotfunc: Callable
    if ndims == 0 or darray.size == 0:
        raise TypeError("No numeric data to plot.")
    if ndims in (1, 2):
        if row or col:
            kwargs["subplot_kws"] = subplot_kws
            kwargs["row"] = row
            kwargs["col"] = col
            kwargs["col_wrap"] = col_wrap
        if ndims == 1:
            plotfunc = line
            kwargs["hue"] = hue
        elif ndims == 2:
            if hue:
                plotfunc = line
                kwargs["hue"] = hue
            else:
                plotfunc = pcolormesh
                kwargs["subplot_kws"] = subplot_kws
    else:
        if row or col or hue:
            raise ValueError(
                "Only 1d and 2d plots are supported for facets in xarray. "
                "See the package `Seaborn` for more options."
            )
        plotfunc = hist
    kwargs["ax"] = ax
    return plotfunc(darray, **kwargs)
 
 | 
	plot 
 | 
	Self-Contained 
 | 
					
	xarray 
 | 
	67 
 | 
	xarray/core/groupby.py 
 | 
	    def quantile(
        self,
        q: ArrayLike,
        dim: Dims = None,
        *,
        method: QuantileMethods = "linear",
        keep_attrs: bool | None = None,
        skipna: bool | None = None,
        interpolation: QuantileMethods | None = None,
    ) -> T_Xarray:
        """Compute the qth quantile over each array in the groups and
        concatenate them together into a new array.
        Parameters
        ----------
        q : float or sequence of float
            Quantile to compute, which must be between 0 and 1
            inclusive.
        dim : str or Iterable of Hashable, optional
            Dimension(s) over which to apply quantile.
            Defaults to the grouped dimension.
        method : str, default: "linear"
            This optional parameter specifies the interpolation method to use when the
            desired quantile lies between two data points. The options sorted by their R
            type as summarized in the H&F paper [1]_ are:
                1. "inverted_cdf"
                2. "averaged_inverted_cdf"
                3. "closest_observation"
                4. "interpolated_inverted_cdf"
                5. "hazen"
                6. "weibull"
                7. "linear"  (default)
                8. "median_unbiased"
                9. "normal_unbiased"
            The first three methods are discontiuous.  The following discontinuous
            variations of the default "linear" (7.) option are also available:
                * "lower"
                * "higher"
                * "midpoint"
                * "nearest"
            See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument
            was previously called "interpolation", renamed in accordance with numpy
            version 1.22.0.
        keep_attrs : bool or None, default: None
            If True, the dataarray's attributes (`attrs`) will be copied from
            the original object to the new one.  If False, the new
            object will be returned without attributes.
        skipna : bool or None, default: None
            If True, skip missing values (as marked by NaN). By default, only
            skips missing values for float dtypes; other dtypes either do not
            have a sentinel missing value (int) or skipna=True has not been
            implemented (object, datetime64 or timedelta64).
        Returns
        -------
        quantiles : Variable
            If `q` is a single quantile, then the result is a
            scalar. If multiple percentiles are given, first axis of
            the result corresponds to the quantile. In either case a
            quantile dimension is added to the return array. The other
            dimensions are the dimensions that remain after the
            reduction of the array.
        See Also
        --------
        numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile
        DataArray.quantile
        Examples
        --------
        >>> da = xr.DataArray(
        ...     [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],
        ...     coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]},
        ...     dims=("x", "y"),
        ... )
        >>> ds = xr.Dataset({"a": da})
        >>> da.groupby("x").quantile(0)
        <xarray.DataArray (x: 2, y: 4)> Size: 64B
        array([[0.7, 4.2, 0.7, 1.5],
               [6.5, 7.3, 2.6, 1.9]])
        Coordinates:
          * y         (y) int64 32B 1 1 2 2
            quantile  float64 8B 0.0
          * x         (x) int64 16B 0 1
        >>> ds.groupby("y").quantile(0, dim=...)
        <xarray.Dataset> Size: 40B
        Dimensions:   (y: 2)
        Coordinates:
            quantile  float64 8B 0.0
          * y         (y) int64 16B 1 2
        Data variables:
            a         (y) float64 16B 0.7 0.7
        >>> da.groupby("x").quantile([0, 0.5, 1])
        <xarray.DataArray (x: 2, y: 4, quantile: 3)> Size: 192B
        array([[[0.7 , 1.  , 1.3 ],
                [4.2 , 6.3 , 8.4 ],
                [0.7 , 5.05, 9.4 ],
                [1.5 , 4.2 , 6.9 ]],
        <BLANKLINE>
               [[6.5 , 6.5 , 6.5 ],
                [7.3 , 7.3 , 7.3 ],
                [2.6 , 2.6 , 2.6 ],
                [1.9 , 1.9 , 1.9 ]]])
        Coordinates:
          * y         (y) int64 32B 1 1 2 2
          * quantile  (quantile) float64 24B 0.0 0.5 1.0
          * x         (x) int64 16B 0 1
        >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...)
        <xarray.Dataset> Size: 88B
        Dimensions:   (y: 2, quantile: 3)
        Coordinates:
          * quantile  (quantile) float64 24B 0.0 0.5 1.0
          * y         (y) int64 16B 1 2
        Data variables:
            a         (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4
        References
        ----------
        .. [1] R. J. Hyndman and Y. Fan,
           "Sample quantiles in statistical packages,"
           The American Statistician, 50(4), pp. 361-365, 1996
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_quantile.txt 
 | 
	    def quantile(
        self,
        q: ArrayLike,
        dim: Dims = None,
        *,
        method: QuantileMethods = "linear",
        keep_attrs: bool | None = None,
        skipna: bool | None = None,
        interpolation: QuantileMethods | None = None,
    ) -> T_Xarray:
        """Compute the qth quantile over each array in the groups and
        concatenate them together into a new array.
        Parameters
        ----------
        q : float or sequence of float
            Quantile to compute, which must be between 0 and 1
            inclusive.
        dim : str or Iterable of Hashable, optional
            Dimension(s) over which to apply quantile.
            Defaults to the grouped dimension.
        method : str, default: "linear"
            This optional parameter specifies the interpolation method to use when the
            desired quantile lies between two data points. The options sorted by their R
            type as summarized in the H&F paper [1]_ are:
                1. "inverted_cdf"
                2. "averaged_inverted_cdf"
                3. "closest_observation"
                4. "interpolated_inverted_cdf"
                5. "hazen"
                6. "weibull"
                7. "linear"  (default)
                8. "median_unbiased"
                9. "normal_unbiased"
            The first three methods are discontiuous.  The following discontinuous
            variations of the default "linear" (7.) option are also available:
                * "lower"
                * "higher"
                * "midpoint"
                * "nearest"
            See :py:func:`numpy.quantile` or [1]_ for details. The "method" argument
            was previously called "interpolation", renamed in accordance with numpy
            version 1.22.0.
        keep_attrs : bool or None, default: None
            If True, the dataarray's attributes (`attrs`) will be copied from
            the original object to the new one.  If False, the new
            object will be returned without attributes.
        skipna : bool or None, default: None
            If True, skip missing values (as marked by NaN). By default, only
            skips missing values for float dtypes; other dtypes either do not
            have a sentinel missing value (int) or skipna=True has not been
            implemented (object, datetime64 or timedelta64).
        Returns
        -------
        quantiles : Variable
            If `q` is a single quantile, then the result is a
            scalar. If multiple percentiles are given, first axis of
            the result corresponds to the quantile. In either case a
            quantile dimension is added to the return array. The other
            dimensions are the dimensions that remain after the
            reduction of the array.
        See Also
        --------
        numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile
        DataArray.quantile
        Examples
        --------
        >>> da = xr.DataArray(
        ...     [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],
        ...     coords={"x": [0, 0, 1], "y": [1, 1, 2, 2]},
        ...     dims=("x", "y"),
        ... )
        >>> ds = xr.Dataset({"a": da})
        >>> da.groupby("x").quantile(0)
        <xarray.DataArray (x: 2, y: 4)> Size: 64B
        array([[0.7, 4.2, 0.7, 1.5],
               [6.5, 7.3, 2.6, 1.9]])
        Coordinates:
          * y         (y) int64 32B 1 1 2 2
            quantile  float64 8B 0.0
          * x         (x) int64 16B 0 1
        >>> ds.groupby("y").quantile(0, dim=...)
        <xarray.Dataset> Size: 40B
        Dimensions:   (y: 2)
        Coordinates:
            quantile  float64 8B 0.0
          * y         (y) int64 16B 1 2
        Data variables:
            a         (y) float64 16B 0.7 0.7
        >>> da.groupby("x").quantile([0, 0.5, 1])
        <xarray.DataArray (x: 2, y: 4, quantile: 3)> Size: 192B
        array([[[0.7 , 1.  , 1.3 ],
                [4.2 , 6.3 , 8.4 ],
                [0.7 , 5.05, 9.4 ],
                [1.5 , 4.2 , 6.9 ]],
        <BLANKLINE>
               [[6.5 , 6.5 , 6.5 ],
                [7.3 , 7.3 , 7.3 ],
                [2.6 , 2.6 , 2.6 ],
                [1.9 , 1.9 , 1.9 ]]])
        Coordinates:
          * y         (y) int64 32B 1 1 2 2
          * quantile  (quantile) float64 24B 0.0 0.5 1.0
          * x         (x) int64 16B 0 1
        >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...)
        <xarray.Dataset> Size: 88B
        Dimensions:   (y: 2, quantile: 3)
        Coordinates:
          * quantile  (quantile) float64 24B 0.0 0.5 1.0
          * y         (y) int64 16B 1 2
        Data variables:
            a         (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4
        References
        ----------
        .. [1] R. J. Hyndman and Y. Fan,
           "Sample quantiles in statistical packages,"
           The American Statistician, 50(4), pp. 361-365, 1996
        """
        if dim is None:
            dim = (self._group_dim,)
        # Dataset.quantile does this, do it for flox to ensure same output.
        q = np.asarray(q, dtype=np.float64)
        if (
            method == "linear"
            and OPTIONS["use_flox"]
            and contains_only_chunked_or_numpy(self._obj)
            and module_available("flox", minversion="0.9.4")
        ):
            result = self._flox_reduce(
                func="quantile", q=q, dim=dim, keep_attrs=keep_attrs, skipna=skipna
            )
            return result
        else:
            return self.map(
                self._obj.__class__.quantile,
                shortcut=False,
                q=q,
                dim=dim,
                method=method,
                keep_attrs=keep_attrs,
                skipna=skipna,
                interpolation=interpolation,
            )
 
 | 
	quantile 
 | 
	Self-Contained 
 | 
					
	xarray 
 | 
	71 
 | 
	xarray/backends/api.py 
 | 
	def save_mfdataset(
    datasets,
    paths,
    mode="w",
    format=None,
    groups=None,
    engine=None,
    compute=True,
    **kwargs,
):
    """Write multiple datasets to disk as netCDF files simultaneously.
    This function is intended for use with datasets consisting of dask.array
    objects, in which case it can write the multiple datasets to disk
    simultaneously using a shared thread pool.
    When not using dask, it is no different than calling ``to_netcdf``
    repeatedly.
    Parameters
    ----------
    datasets : list of Dataset
        List of datasets to save.
    paths : list of str or list of path-like objects
        List of paths to which to save each corresponding dataset.
    mode : {"w", "a"}, optional
        Write ("w") or append ("a") mode. If mode="w", any existing file at
        these locations will be overwritten.
    format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
              "NETCDF3_CLASSIC"}, optional
        File format for the resulting netCDF file:
        * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
          features.
        * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
          netCDF 3 compatible API features.
        * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
          which fully supports 2+ GB files, but is only compatible with
          clients linked against netCDF version 3.6.0 or later.
        * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
          handle 2+ GB files very well.
        All formats are supported by the netCDF4-python library.
        scipy.io.netcdf only supports the last two formats.
        The default format is NETCDF4 if you are saving a file to disk and
        have the netCDF4-python library available. Otherwise, xarray falls
        back to using scipy to write netCDF files and defaults to the
        NETCDF3_64BIT format (scipy does not support netCDF4).
    groups : list of str, optional
        Paths to the netCDF4 group in each corresponding file to which to save
        datasets (only works for format="NETCDF4"). The groups will be created
        if necessary.
    engine : {"netcdf4", "scipy", "h5netcdf"}, optional
        Engine to use when writing netCDF files. If not provided, the
        default engine is chosen based on available dependencies, with a
        preference for "netcdf4" if writing to a file on disk.
        See `Dataset.to_netcdf` for additional information.
    compute : bool
        If true compute immediately, otherwise return a
        ``dask.delayed.Delayed`` object that can be computed later.
    **kwargs : dict, optional
        Additional arguments are passed along to ``to_netcdf``.
    Examples
    --------
    Save a dataset into one netCDF per year of data:
    >>> ds = xr.Dataset(
    ...     {"a": ("time", np.linspace(0, 1, 48))},
    ...     coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
    ... )
    >>> ds
    <xarray.Dataset> Size: 768B
    Dimensions:  (time: 48)
    Coordinates:
      * time     (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
    Data variables:
        a        (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
    >>> years, datasets = zip(*ds.groupby("time.year"))
    >>> paths = [f"{y}.nc" for y in years]
    >>> xr.save_mfdataset(datasets, paths)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_save_mfdataset.txt 
 | 
	def save_mfdataset(
    datasets,
    paths,
    mode="w",
    format=None,
    groups=None,
    engine=None,
    compute=True,
    **kwargs,
):
    """Write multiple datasets to disk as netCDF files simultaneously.
    This function is intended for use with datasets consisting of dask.array
    objects, in which case it can write the multiple datasets to disk
    simultaneously using a shared thread pool.
    When not using dask, it is no different than calling ``to_netcdf``
    repeatedly.
    Parameters
    ----------
    datasets : list of Dataset
        List of datasets to save.
    paths : list of str or list of path-like objects
        List of paths to which to save each corresponding dataset.
    mode : {"w", "a"}, optional
        Write ("w") or append ("a") mode. If mode="w", any existing file at
        these locations will be overwritten.
    format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
              "NETCDF3_CLASSIC"}, optional
        File format for the resulting netCDF file:
        * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
          features.
        * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
          netCDF 3 compatible API features.
        * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
          which fully supports 2+ GB files, but is only compatible with
          clients linked against netCDF version 3.6.0 or later.
        * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
          handle 2+ GB files very well.
        All formats are supported by the netCDF4-python library.
        scipy.io.netcdf only supports the last two formats.
        The default format is NETCDF4 if you are saving a file to disk and
        have the netCDF4-python library available. Otherwise, xarray falls
        back to using scipy to write netCDF files and defaults to the
        NETCDF3_64BIT format (scipy does not support netCDF4).
    groups : list of str, optional
        Paths to the netCDF4 group in each corresponding file to which to save
        datasets (only works for format="NETCDF4"). The groups will be created
        if necessary.
    engine : {"netcdf4", "scipy", "h5netcdf"}, optional
        Engine to use when writing netCDF files. If not provided, the
        default engine is chosen based on available dependencies, with a
        preference for "netcdf4" if writing to a file on disk.
        See `Dataset.to_netcdf` for additional information.
    compute : bool
        If true compute immediately, otherwise return a
        ``dask.delayed.Delayed`` object that can be computed later.
    **kwargs : dict, optional
        Additional arguments are passed along to ``to_netcdf``.
    Examples
    --------
    Save a dataset into one netCDF per year of data:
    >>> ds = xr.Dataset(
    ...     {"a": ("time", np.linspace(0, 1, 48))},
    ...     coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
    ... )
    >>> ds
    <xarray.Dataset> Size: 768B
    Dimensions:  (time: 48)
    Coordinates:
      * time     (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
    Data variables:
        a        (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
    >>> years, datasets = zip(*ds.groupby("time.year"))
    >>> paths = [f"{y}.nc" for y in years]
    >>> xr.save_mfdataset(datasets, paths)
    """
    if mode == "w" and len(set(paths)) < len(paths):
        raise ValueError(
            "cannot use mode='w' when writing multiple datasets to the same path"
        )
    for obj in datasets:
        if not isinstance(obj, Dataset):
            raise TypeError(
                "save_mfdataset only supports writing Dataset "
                f"objects, received type {type(obj)}"
            )
    if groups is None:
        groups = [None] * len(datasets)
    if len({len(datasets), len(paths), len(groups)}) > 1:
        raise ValueError(
            "must supply lists of the same length for the "
            "datasets, paths and groups arguments to "
            "save_mfdataset"
        )
    writers, stores = zip(
        *[
            to_netcdf(
                ds,
                path,
                mode,
                format,
                group,
                engine,
                compute=compute,
                multifile=True,
                **kwargs,
            )
            for ds, path, group in zip(datasets, paths, groups, strict=True)
        ],
        strict=True,
    )
    try:
        writes = [w.sync(compute=compute) for w in writers]
    finally:
        if compute:
            for store in stores:
                store.close()
    if not compute:
        import dask
        return dask.delayed(
            [
                dask.delayed(_finalize_store)(w, s)
                for w, s in zip(writes, stores, strict=True)
            ]
        )
 
 | 
	save_mfdataset 
 | 
	File-Level 
 | 
					
	xarray 
 | 
	81 
 | 
	xarray/testing/strategies.py 
 | 
	def variables(
    draw: st.DrawFn,
    *,
    array_strategy_fn: ArrayStrategyFn | None = None,
    dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None,
    dtype: st.SearchStrategy[np.dtype] | None = None,
    attrs: st.SearchStrategy[Mapping] = ATTRS,
) -> xr.Variable:
    """
    Generates arbitrary xarray.Variable objects.
    Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to
    generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array
    up front.
    Passing nothing will generate a completely arbitrary Variable (containing a numpy array).
    Requires the hypothesis package to be installed.
    Parameters
    ----------
    array_strategy_fn: Callable which returns a strategy generating array-likes, optional
        Callable must only accept shape and dtype kwargs, and must generate results consistent with its input.
        If not passed the default is to generate a small numpy array with one of the supported_dtypes.
    dims: Strategy for generating the dimensions, optional
        Can either be a strategy for generating a sequence of string dimension names,
        or a strategy for generating a mapping of string dimension names to integer lengths along each dimension.
        If provided as a mapping the array shape will be passed to array_strategy_fn.
        Default is to generate arbitrary dimension names for each axis in data.
    dtype: Strategy which generates np.dtype objects, optional
        Will be passed in to array_strategy_fn.
        Default is to generate any scalar dtype using supported_dtypes.
        Be aware that this default set of dtypes includes some not strictly allowed by the array API standard.
    attrs: Strategy which generates dicts, optional
        Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones,
        and numpy arrays.
    Returns
    -------
    variable_strategy
        Strategy for generating xarray.Variable objects.
    Raises
    ------
    ValueError
        If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape
        & dtype input passed to it.
    Examples
    --------
    Generate completely arbitrary Variable objects backed by a numpy array:
    >>> variables().example()  # doctest: +SKIP
    <xarray.Variable (żō: 3)>
    array([43506,   -16,  -151], dtype=int32)
    >>> variables().example()  # doctest: +SKIP
    <xarray.Variable (eD: 4, ğŻżÂĕ: 2, T: 2)>
    array([[[-10000000., -10000000.],
            [-10000000., -10000000.]],
           [[-10000000., -10000000.],
            [        0., -10000000.]],
           [[        0., -10000000.],
            [-10000000.,        inf]],
           [[       -0., -10000000.],
            [-10000000.,        -0.]]], dtype=float32)
    Attributes:
        śřĴ:      {'ĉ': {'iĥf': array([-30117,  -1740], dtype=int16)}}
    Generate only Variable objects with certain dimension names:
    >>> variables(dims=st.just(["a", "b"])).example()  # doctest: +SKIP
    <xarray.Variable (a: 5, b: 3)>
    array([[       248, 4294967295, 4294967295],
           [2412855555, 3514117556, 4294967295],
           [       111, 4294967295, 4294967295],
           [4294967295, 1084434988,      51688],
           [     47714,        252,      11207]], dtype=uint32)
    Generate only Variable objects with certain dimension names and lengths:
    >>> variables(dims=st.just({"a": 2, "b": 1})).example()  # doctest: +SKIP
    <xarray.Variable (a: 2, b: 1)>
    array([[-1.00000000e+007+3.40282347e+038j],
           [-2.75034266e-225+2.22507386e-311j]])
    See Also
    --------
    :ref:`testing.hypothesis`_
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_variables.txt 
 | 
	def variables(
    draw: st.DrawFn,
    *,
    array_strategy_fn: ArrayStrategyFn | None = None,
    dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None,
    dtype: st.SearchStrategy[np.dtype] | None = None,
    attrs: st.SearchStrategy[Mapping] = ATTRS,
) -> xr.Variable:
    """
    Generates arbitrary xarray.Variable objects.
    Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to
    generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array
    up front.
    Passing nothing will generate a completely arbitrary Variable (containing a numpy array).
    Requires the hypothesis package to be installed.
    Parameters
    ----------
    array_strategy_fn: Callable which returns a strategy generating array-likes, optional
        Callable must only accept shape and dtype kwargs, and must generate results consistent with its input.
        If not passed the default is to generate a small numpy array with one of the supported_dtypes.
    dims: Strategy for generating the dimensions, optional
        Can either be a strategy for generating a sequence of string dimension names,
        or a strategy for generating a mapping of string dimension names to integer lengths along each dimension.
        If provided as a mapping the array shape will be passed to array_strategy_fn.
        Default is to generate arbitrary dimension names for each axis in data.
    dtype: Strategy which generates np.dtype objects, optional
        Will be passed in to array_strategy_fn.
        Default is to generate any scalar dtype using supported_dtypes.
        Be aware that this default set of dtypes includes some not strictly allowed by the array API standard.
    attrs: Strategy which generates dicts, optional
        Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones,
        and numpy arrays.
    Returns
    -------
    variable_strategy
        Strategy for generating xarray.Variable objects.
    Raises
    ------
    ValueError
        If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape
        & dtype input passed to it.
    Examples
    --------
    Generate completely arbitrary Variable objects backed by a numpy array:
    >>> variables().example()  # doctest: +SKIP
    <xarray.Variable (żō: 3)>
    array([43506,   -16,  -151], dtype=int32)
    >>> variables().example()  # doctest: +SKIP
    <xarray.Variable (eD: 4, ğŻżÂĕ: 2, T: 2)>
    array([[[-10000000., -10000000.],
            [-10000000., -10000000.]],
           [[-10000000., -10000000.],
            [        0., -10000000.]],
           [[        0., -10000000.],
            [-10000000.,        inf]],
           [[       -0., -10000000.],
            [-10000000.,        -0.]]], dtype=float32)
    Attributes:
        śřĴ:      {'ĉ': {'iĥf': array([-30117,  -1740], dtype=int16)}}
    Generate only Variable objects with certain dimension names:
    >>> variables(dims=st.just(["a", "b"])).example()  # doctest: +SKIP
    <xarray.Variable (a: 5, b: 3)>
    array([[       248, 4294967295, 4294967295],
           [2412855555, 3514117556, 4294967295],
           [       111, 4294967295, 4294967295],
           [4294967295, 1084434988,      51688],
           [     47714,        252,      11207]], dtype=uint32)
    Generate only Variable objects with certain dimension names and lengths:
    >>> variables(dims=st.just({"a": 2, "b": 1})).example()  # doctest: +SKIP
    <xarray.Variable (a: 2, b: 1)>
    array([[-1.00000000e+007+3.40282347e+038j],
           [-2.75034266e-225+2.22507386e-311j]])
    See Also
    --------
    :ref:`testing.hypothesis`_
    """
    if dtype is None:
        dtype = supported_dtypes()
    if not isinstance(dims, st.SearchStrategy) and dims is not None:
        raise InvalidArgument(
            f"dims must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dims)}. "
            "To specify fixed contents, use hypothesis.strategies.just()."
        )
    if not isinstance(dtype, st.SearchStrategy) and dtype is not None:
        raise InvalidArgument(
            f"dtype must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dtype)}. "
            "To specify fixed contents, use hypothesis.strategies.just()."
        )
    if not isinstance(attrs, st.SearchStrategy) and attrs is not None:
        raise InvalidArgument(
            f"attrs must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(attrs)}. "
            "To specify fixed contents, use hypothesis.strategies.just()."
        )
    _array_strategy_fn: ArrayStrategyFn
    if array_strategy_fn is None:
        # For some reason if I move the default value to the function signature definition mypy incorrectly says the ignore is no longer necessary, making it impossible to satisfy mypy
        _array_strategy_fn = npst.arrays  # type: ignore[assignment]  # npst.arrays has extra kwargs that we aren't using later
    elif not callable(array_strategy_fn):
        raise InvalidArgument(
            "array_strategy_fn must be a Callable that accepts the kwargs dtype and shape and returns a hypothesis "
            "strategy which generates corresponding array-like objects."
        )
    else:
        _array_strategy_fn = (
            array_strategy_fn  # satisfy mypy that this new variable cannot be None
        )
    _dtype = draw(dtype)
    if dims is not None:
        # generate dims first then draw data to match
        _dims = draw(dims)
        if isinstance(_dims, Sequence):
            dim_names = list(_dims)
            valid_shapes = npst.array_shapes(min_dims=len(_dims), max_dims=len(_dims))
            _shape = draw(valid_shapes)
            array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
        elif isinstance(_dims, Mapping | dict):
            # should be a mapping of form {dim_names: lengths}
            dim_names, _shape = list(_dims.keys()), tuple(_dims.values())
            array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
        else:
            raise InvalidArgument(
                f"Invalid type returned by dims strategy - drew an object of type {type(dims)}"
            )
    else:
        # nothing provided, so generate everything consistently
        # We still generate the shape first here just so that we always pass shape to array_strategy_fn
        _shape = draw(npst.array_shapes())
        array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
        dim_names = draw(dimension_names(min_dims=len(_shape), max_dims=len(_shape)))
    _data = draw(array_strategy)
    if _data.shape != _shape:
        raise ValueError(
            "array_strategy_fn returned an array object with a different shape than it was passed."
            f"Passed {_shape}, but returned {_data.shape}."
            "Please either specify a consistent shape via the dims kwarg or ensure the array_strategy_fn callable "
            "obeys the shape argument passed to it."
        )
    if _data.dtype != _dtype:
        raise ValueError(
            "array_strategy_fn returned an array object with a different dtype than it was passed."
            f"Passed {_dtype}, but returned {_data.dtype}"
            "Please either specify a consistent dtype via the dtype kwarg or ensure the array_strategy_fn callable "
            "obeys the dtype argument passed to it."
        )
    return xr.Variable(dims=dim_names, data=_data, attrs=draw(attrs))
 
 | 
	variables 
 | 
	File-Level 
 | 
					
	datasets 
 | 
	4 
 | 
	src/datasets/dataset_dict.py 
 | 
	    def push_to_hub(
        self,
        repo_id,
        config_name: str = "default",
        set_default: Optional[bool] = None,
        data_dir: Optional[str] = None,
        commit_message: Optional[str] = None,
        commit_description: Optional[str] = None,
        private: Optional[bool] = False,
        token: Optional[str] = None,
        revision: Optional[str] = None,
        create_pr: Optional[bool] = False,
        max_shard_size: Optional[Union[int, str]] = None,
        num_shards: Optional[Dict[str, int]] = None,
        embed_external_files: bool = True,
    ) -> CommitInfo:
        """Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
        The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
        Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
        The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
        data, the Parquet files will store the bytes of your images or audio files.
        You can disable this by setting `embed_external_files` to False.
        Args:
            repo_id (`str`):
                The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
                `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
                of the logged-in user.
            config_name (`str`):
                Configuration name of a dataset. Defaults to "default".
            set_default (`bool`, *optional*):
                Whether to set this configuration as the default one. Otherwise, the default configuration is the one
                named "default".
            data_dir (`str`, *optional*):
                Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
                from "default", else "data".
                <Added version="2.17.0"/>
            commit_message (`str`, *optional*):
                Message to commit while pushing. Will default to `"Upload dataset"`.
            commit_description (`str`, *optional*):
                Description of the commit that will be created.
                Additionally, description of the PR if a PR is created (`create_pr` is True).
                <Added version="2.16.0"/>
            private (`bool`, *optional*):
                Whether the dataset repository should be set to private or not. Only affects repository creation:
                a repository that already exists will not be affected by that parameter.
            token (`str`, *optional*):
                An optional authentication token for the Hugging Face Hub. If no token is passed, will default
                to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
                if no token is passed and the user is not logged-in.
            revision (`str`, *optional*):
                Branch to push the uploaded files to. Defaults to the `"main"` branch.
                <Added version="2.15.0"/>
            create_pr (`bool`, *optional*, defaults to `False`):
                Whether to create a PR with the uploaded files or directly commit.
                <Added version="2.15.0"/>
            max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
                The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
                (like `"500MB"` or `"1GB"`).
            num_shards (`Dict[str, int]`, *optional*):
                Number of shards to write. By default, the number of shards depends on `max_shard_size`.
                Use a dictionary to define a different num_shards for each split.
                <Added version="2.8.0"/>
            embed_external_files (`bool`, defaults to `True`):
                Whether to embed file bytes in the shards.
                In particular, this will do the following before the push for the fields of type:
                - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
        Return:
            huggingface_hub.CommitInfo
        Example:
        ```python
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
        ```
        If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
        ```python
        >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
        >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
        >>> # later
        >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
        >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
        ```
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_DatasetDict.push_to_hub.txt 
 | 
	    def push_to_hub(
        self,
        repo_id,
        config_name: str = "default",
        set_default: Optional[bool] = None,
        data_dir: Optional[str] = None,
        commit_message: Optional[str] = None,
        commit_description: Optional[str] = None,
        private: Optional[bool] = False,
        token: Optional[str] = None,
        revision: Optional[str] = None,
        create_pr: Optional[bool] = False,
        max_shard_size: Optional[Union[int, str]] = None,
        num_shards: Optional[Dict[str, int]] = None,
        embed_external_files: bool = True,
    ) -> CommitInfo:
        """Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
        The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
        Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
        The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
        data, the Parquet files will store the bytes of your images or audio files.
        You can disable this by setting `embed_external_files` to False.
        Args:
            repo_id (`str`):
                The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
                `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
                of the logged-in user.
            config_name (`str`):
                Configuration name of a dataset. Defaults to "default".
            set_default (`bool`, *optional*):
                Whether to set this configuration as the default one. Otherwise, the default configuration is the one
                named "default".
            data_dir (`str`, *optional*):
                Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
                from "default", else "data".
                <Added version="2.17.0"/>
            commit_message (`str`, *optional*):
                Message to commit while pushing. Will default to `"Upload dataset"`.
            commit_description (`str`, *optional*):
                Description of the commit that will be created.
                Additionally, description of the PR if a PR is created (`create_pr` is True).
                <Added version="2.16.0"/>
            private (`bool`, *optional*):
                Whether the dataset repository should be set to private or not. Only affects repository creation:
                a repository that already exists will not be affected by that parameter.
            token (`str`, *optional*):
                An optional authentication token for the Hugging Face Hub. If no token is passed, will default
                to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
                if no token is passed and the user is not logged-in.
            revision (`str`, *optional*):
                Branch to push the uploaded files to. Defaults to the `"main"` branch.
                <Added version="2.15.0"/>
            create_pr (`bool`, *optional*, defaults to `False`):
                Whether to create a PR with the uploaded files or directly commit.
                <Added version="2.15.0"/>
            max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
                The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
                (like `"500MB"` or `"1GB"`).
            num_shards (`Dict[str, int]`, *optional*):
                Number of shards to write. By default, the number of shards depends on `max_shard_size`.
                Use a dictionary to define a different num_shards for each split.
                <Added version="2.8.0"/>
            embed_external_files (`bool`, defaults to `True`):
                Whether to embed file bytes in the shards.
                In particular, this will do the following before the push for the fields of type:
                - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
        Return:
            huggingface_hub.CommitInfo
        Example:
        ```python
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
        >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
        ```
        If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
        ```python
        >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
        >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
        >>> # later
        >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
        >>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
        ```
        """
        if num_shards is None:
            num_shards = {k: None for k in self}
        elif not isinstance(num_shards, dict):
            raise ValueError(
                "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
            )
        self._check_values_type()
        self._check_values_features()
        total_uploaded_size = 0
        total_dataset_nbytes = 0
        info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
        info_to_dump.config_name = config_name
        info_to_dump.splits = SplitDict()
        for split in self.keys():
            if not re.match(_split_re, split):
                raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
        api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
        repo_url = api.create_repo(
            repo_id,
            token=token,
            repo_type="dataset",
            private=private,
            exist_ok=True,
        )
        repo_id = repo_url.repo_id
        if revision is not None and not revision.startswith("refs/pr/"):
            # We do not call create_branch for a PR reference: 400 Bad Request
            api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
        if not data_dir:
            data_dir = config_name if config_name != "default" else "data"  # for backward compatibility
        additions = []
        for split in self.keys():
            logger.info(f"Pushing split {split} to the Hub.")
            # The split=key needs to be removed before merging
            split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
                repo_id,
                data_dir=data_dir,
                split=split,
                token=token,
                revision=revision,
                create_pr=create_pr,
                max_shard_size=max_shard_size,
                num_shards=num_shards.get(split),
                embed_external_files=embed_external_files,
            )
            additions += split_additions
            total_uploaded_size += uploaded_size
            total_dataset_nbytes += dataset_nbytes
            info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
        info_to_dump.download_checksums = None
        info_to_dump.download_size = total_uploaded_size
        info_to_dump.dataset_size = total_dataset_nbytes
        info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
        # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
        # and delete old split shards (if they exist)
        repo_with_dataset_card, repo_with_dataset_infos = False, False
        repo_splits = []  # use a list to keep the order of the splits
        deletions = []
        repo_files_to_add = [addition.path_in_repo for addition in additions]
        for repo_file in api.list_repo_tree(
            repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
        ):
            if not isinstance(repo_file, RepoFile):
                continue
            if repo_file.rfilename == config.REPOCARD_FILENAME:
                repo_with_dataset_card = True
            elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
                repo_with_dataset_infos = True
            elif (
                repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
                and repo_file.rfilename not in repo_files_to_add
            ):
                deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
            elif fnmatch.fnmatch(
                repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
            ):
                repo_split = string_to_dict(
                    repo_file.rfilename,
                    glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
                )["split"]
                if repo_split not in repo_splits:
                    repo_splits.append(split)
        # get the info from the README to update them
        if repo_with_dataset_card:
            dataset_card_path = api.hf_hub_download(
                repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
            )
            dataset_card = DatasetCard.load(Path(dataset_card_path))
            dataset_card_data = dataset_card.data
            metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
        # get the deprecated dataset_infos.json to update them
        elif repo_with_dataset_infos:
            dataset_card = None
            dataset_card_data = DatasetCardData()
            metadata_configs = MetadataConfigs()
        else:
            dataset_card = None
            dataset_card_data = DatasetCardData()
            metadata_configs = MetadataConfigs()
        # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
        if not metadata_configs and repo_splits:
            default_metadata_configs_to_dump = {
                "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
            }
            MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
        metadata_config_to_dump = {
            "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
        }
        if set_default and config_name != "default":
            if metadata_configs:
                default_config_name = metadata_configs.get_default_config_name()
                if default_config_name == "default":
                    raise ValueError(
                        "There exists a configuration named 'default'. To set a different configuration as default, "
                        "rename the 'default' one first."
                    )
                else:
                    _ = metadata_configs[default_config_name].pop("default")
            metadata_config_to_dump["default"] = True
        # push to the deprecated dataset_infos.json
        if repo_with_dataset_infos:
            dataset_infos_path = api.hf_hub_download(
                repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
            )
            with open(dataset_infos_path, encoding="utf-8") as f:
                dataset_infos: dict = json.load(f)
            dataset_infos[config_name] = asdict(info_to_dump)
            buffer = BytesIO()
            buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
            additions.append(
                CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
            )
        # push to README
        DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
        MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
        dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
        additions.append(
            CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
        )
        commit_message = commit_message if commit_message is not None else "Upload dataset"
        if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
            commit_info = api.create_commit(
                repo_id,
                operations=additions + deletions,
                commit_message=commit_message,
                commit_description=commit_description,
                token=token,
                repo_type="dataset",
                revision=revision,
                create_pr=create_pr,
            )
        else:
            logger.info(
                f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
            )
            num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
            for i in range(0, num_commits):
                operations = additions[
                    i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
                ] + (deletions if i == 0 else [])
                commit_info = api.create_commit(
                    repo_id,
                    operations=operations,
                    commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
                    commit_description=commit_description,
                    token=token,
                    repo_type="dataset",
                    revision=revision,
                    create_pr=create_pr,
                )
                logger.info(
                    f"Commit #{i+1} completed"
                    + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
                    + "."
                )
        return commit_info
 
 | 
	DatasetDict.push_to_hub 
 | 
	Repo-Level 
 | 
					
	datasets 
 | 
	20 
 | 
	src/datasets/iterable_dataset.py 
 | 
	    def map(
        self,
        function: Optional[Callable] = None,
        with_indices: bool = False,
        input_columns: Optional[Union[str, List[str]]] = None,
        batched: bool = False,
        batch_size: Optional[int] = 1000,
        drop_last_batch: bool = False,
        remove_columns: Optional[Union[str, List[str]]] = None,
        features: Optional[Features] = None,
        fn_kwargs: Optional[dict] = None,
    ) -> "IterableDataset":
        """
        Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
        If your function returns a column that already exists, then it overwrites it.
        The function is applied on-the-fly on the examples when iterating over the dataset.
        You can specify whether the function should be batched or not with the `batched` parameter:
        - If batched is `False`, then the function takes 1 example in and should return 1 example.
          An example is a dictionary, e.g. `{"text": "Hello there !"}`.
        - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
          A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
        - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
          Note that the last batch may have less than `n` examples.
          A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
        Args:
            function (`Callable`, *optional*, defaults to `None`):
                Function applied on-the-fly on the examples when you iterate on the dataset.
                It must have one of the following signatures:
                - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
                - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
                - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
                - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
                For advanced usage, the function can also return a `pyarrow.Table`.
                Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
                If no function is provided, default to identity function: `lambda x: x`.
            with_indices (`bool`, defaults to `False`):
                Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
            input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
                The columns to be passed into `function`
                as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
            batched (`bool`, defaults to `False`):
                Provide batch of examples to `function`.
            batch_size (`int`, *optional*, defaults to `1000`):
                Number of examples per batch provided to `function` if `batched=True`.
                `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
            drop_last_batch (`bool`, defaults to `False`):
                Whether a last batch smaller than the batch_size should be
                dropped instead of being processed by the function.
            remove_columns (`[List[str]]`, *optional*, defaults to `None`):
                Remove a selection of columns while doing the mapping.
                Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
                columns with names in `remove_columns`, these columns will be kept.
            features (`[Features]`, *optional*, defaults to `None`):
                Feature types of the resulting dataset.
            fn_kwargs (`Dict`, *optional*, default `None`):
                Keyword arguments to be passed to `function`.
        Example:
        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
        >>> def add_prefix(example):
        ...     example["text"] = "Review: " + example["text"]
        ...     return example
        >>> ds = ds.map(add_prefix)
        >>> list(ds.take(3))
        [{'label': 1,
         'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
         {'label': 1,
         'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
         {'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
        ```
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_IterableDataset.map.txt 
 | 
	    def map(
        self,
        function: Optional[Callable] = None,
        with_indices: bool = False,
        input_columns: Optional[Union[str, List[str]]] = None,
        batched: bool = False,
        batch_size: Optional[int] = 1000,
        drop_last_batch: bool = False,
        remove_columns: Optional[Union[str, List[str]]] = None,
        features: Optional[Features] = None,
        fn_kwargs: Optional[dict] = None,
    ) -> "IterableDataset":
        """
        Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
        If your function returns a column that already exists, then it overwrites it.
        The function is applied on-the-fly on the examples when iterating over the dataset.
        You can specify whether the function should be batched or not with the `batched` parameter:
        - If batched is `False`, then the function takes 1 example in and should return 1 example.
          An example is a dictionary, e.g. `{"text": "Hello there !"}`.
        - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
          A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
        - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
          Note that the last batch may have less than `n` examples.
          A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
        Args:
            function (`Callable`, *optional*, defaults to `None`):
                Function applied on-the-fly on the examples when you iterate on the dataset.
                It must have one of the following signatures:
                - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
                - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
                - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
                - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
                For advanced usage, the function can also return a `pyarrow.Table`.
                Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
                If no function is provided, default to identity function: `lambda x: x`.
            with_indices (`bool`, defaults to `False`):
                Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
            input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
                The columns to be passed into `function`
                as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
            batched (`bool`, defaults to `False`):
                Provide batch of examples to `function`.
            batch_size (`int`, *optional*, defaults to `1000`):
                Number of examples per batch provided to `function` if `batched=True`.
                `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
            drop_last_batch (`bool`, defaults to `False`):
                Whether a last batch smaller than the batch_size should be
                dropped instead of being processed by the function.
            remove_columns (`[List[str]]`, *optional*, defaults to `None`):
                Remove a selection of columns while doing the mapping.
                Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
                columns with names in `remove_columns`, these columns will be kept.
            features (`[Features]`, *optional*, defaults to `None`):
                Feature types of the resulting dataset.
            fn_kwargs (`Dict`, *optional*, default `None`):
                Keyword arguments to be passed to `function`.
        Example:
        ```py
        >>> from datasets import load_dataset
        >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
        >>> def add_prefix(example):
        ...     example["text"] = "Review: " + example["text"]
        ...     return example
        >>> ds = ds.map(add_prefix)
        >>> list(ds.take(3))
        [{'label': 1,
         'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
         {'label': 1,
         'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
         {'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
        ```
        """
        if isinstance(input_columns, str):
            input_columns = [input_columns]
        if isinstance(remove_columns, str):
            remove_columns = [remove_columns]
        if function is None:
            function = identity_func
        if fn_kwargs is None:
            fn_kwargs = {}
        ex_iterable = (
            TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
            if self._info.features is not None
            else self._ex_iterable
        )
        ex_iterable = (
            RebatchedArrowExamplesIterable(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
            if self._formatting and self._formatting.format_type == "arrow"
            else ex_iterable
        )
        ex_iterable = MappedExamplesIterable(
            ex_iterable,
            function=function,
            with_indices=with_indices,
            input_columns=input_columns,
            batched=batched,
            batch_size=batch_size,
            drop_last_batch=drop_last_batch,
            remove_columns=remove_columns,
            fn_kwargs=fn_kwargs,
            formatting=self._formatting,
        )
        info = self.info.copy()
        info.features = features
        return IterableDataset(
            ex_iterable=ex_iterable,
            info=info,
            split=self._split,
            formatting=self._formatting,
            shuffling=copy.deepcopy(self._shuffling),
            distributed=copy.deepcopy(self._distributed),
            token_per_repo_id=self._token_per_repo_id,
        )
 
 | 
	IterableDataset.map 
 | 
	Self-Contained 
 | 
					
	datasets 
 | 
	49 
 | 
	src/datasets/combine.py 
 | 
	def interleave_datasets(
    datasets: List[DatasetType],
    probabilities: Optional[List[float]] = None,
    seed: Optional[int] = None,
    info: Optional[DatasetInfo] = None,
    split: Optional[NamedSplit] = None,
    stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
) -> DatasetType:
    """
    Interleave several datasets (sources) into a single dataset.
    The new dataset is constructed by alternating between the sources to get the examples.
    You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
        - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
        - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
    The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
    in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
    Note for iterable datasets:
    In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
    Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
    Args:
        datasets (`List[Dataset]` or `List[IterableDataset]`):
            List of datasets to interleave.
        probabilities (`List[float]`, *optional*, defaults to `None`):
            If specified, the new dataset is constructed by sampling
            examples from one source at a time according to these probabilities.
        seed (`int`, *optional*, defaults to `None`):
            The random seed used to choose a source for each example.
        info ([`DatasetInfo`], *optional*):
            Dataset information, like description, citation, etc.
            <Added version="2.4.0"/>
        split ([`NamedSplit`], *optional*):
            Name of the dataset split.
            <Added version="2.4.0"/>
        stopping_strategy (`str`, defaults to `first_exhausted`):
            Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
            By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
            If the strategy is `all_exhausted`,  we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
            Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
            - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
            - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
    Returns:
        [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
        parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
        `IterableDataset`.
    Example:
        For regular datasets (map-style):
        ```python
        >>> from datasets import Dataset, interleave_datasets
        >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
        >>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
        >>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
        >>> dataset["a"]
        [10, 0, 11, 1, 2]
        >>> dataset = interleave_datasets([d1, d2, d3])
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22]
        >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22]
        >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
        >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
        >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
        >>> dataset = interleave_datasets([d1, d2, d3])
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22]
        >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
        >>> dataset["a"]
        [10, 0, 11, 1, 2]
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
        For datasets in streaming mode (iterable):
        >>> from datasets import load_dataset, interleave_datasets
        >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
        >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
        >>> dataset = interleave_datasets([d1, d2])
        >>> iterator = iter(dataset)
        >>> next(iterator)
        {'text': 'Mtendere Village was inspired by the vision...}
        >>> next(iterator)
        {'text': "Média de débat d'idées, de culture...}
        ```
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_interleave_datasets.txt 
 | 
	def interleave_datasets(
    datasets: List[DatasetType],
    probabilities: Optional[List[float]] = None,
    seed: Optional[int] = None,
    info: Optional[DatasetInfo] = None,
    split: Optional[NamedSplit] = None,
    stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
) -> DatasetType:
    """
    Interleave several datasets (sources) into a single dataset.
    The new dataset is constructed by alternating between the sources to get the examples.
    You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
        - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
        - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
    The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
    in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
    Note for iterable datasets:
    In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
    Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
    Args:
        datasets (`List[Dataset]` or `List[IterableDataset]`):
            List of datasets to interleave.
        probabilities (`List[float]`, *optional*, defaults to `None`):
            If specified, the new dataset is constructed by sampling
            examples from one source at a time according to these probabilities.
        seed (`int`, *optional*, defaults to `None`):
            The random seed used to choose a source for each example.
        info ([`DatasetInfo`], *optional*):
            Dataset information, like description, citation, etc.
            <Added version="2.4.0"/>
        split ([`NamedSplit`], *optional*):
            Name of the dataset split.
            <Added version="2.4.0"/>
        stopping_strategy (`str`, defaults to `first_exhausted`):
            Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
            By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
            If the strategy is `all_exhausted`,  we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
            Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
            - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
            - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
    Returns:
        [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
        parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
        `IterableDataset`.
    Example:
        For regular datasets (map-style):
        ```python
        >>> from datasets import Dataset, interleave_datasets
        >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
        >>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
        >>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
        >>> dataset["a"]
        [10, 0, 11, 1, 2]
        >>> dataset = interleave_datasets([d1, d2, d3])
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22]
        >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22]
        >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
        >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
        >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
        >>> dataset = interleave_datasets([d1, d2, d3])
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22]
        >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
        >>> dataset["a"]
        [10, 0, 11, 1, 2]
        >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
        >>> dataset["a"]
        [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
        For datasets in streaming mode (iterable):
        >>> from datasets import load_dataset, interleave_datasets
        >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
        >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
        >>> dataset = interleave_datasets([d1, d2])
        >>> iterator = iter(dataset)
        >>> next(iterator)
        {'text': 'Mtendere Village was inspired by the vision...}
        >>> next(iterator)
        {'text': "Média de débat d'idées, de culture...}
        ```
    """
    from .arrow_dataset import Dataset
    from .iterable_dataset import IterableDataset
    if not datasets:
        raise ValueError("Unable to interleave an empty list of datasets.")
    for i, dataset in enumerate(datasets):
        if not isinstance(dataset, (Dataset, IterableDataset)):
            if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
                if not dataset:
                    raise ValueError(
                        f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
                        "is an empty dataset dictionary."
                    )
                raise ValueError(
                    f"Dataset at position {i} has at least one split: {list(dataset)}\n"
                    f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
                )
            raise ValueError(
                f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
            )
        if i == 0:
            dataset_type, other_type = (
                (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
            )
        elif not isinstance(dataset, dataset_type):
            raise ValueError(
                f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
            )
    if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
        raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
    if dataset_type is Dataset:
        return _interleave_map_style_datasets(
            datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
        )
    else:
        return _interleave_iterable_datasets(
            datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
        )
 
 | 
	interleave_datasets 
 | 
	Repo-Level 
 | 
					
	datasets 
 | 
	50 
 | 
	src/datasets/load.py 
 | 
	def load_dataset(
    path: str,
    name: Optional[str] = None,
    data_dir: Optional[str] = None,
    data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
    split: Optional[Union[str, Split]] = None,
    cache_dir: Optional[str] = None,
    features: Optional[Features] = None,
    download_config: Optional[DownloadConfig] = None,
    download_mode: Optional[Union[DownloadMode, str]] = None,
    verification_mode: Optional[Union[VerificationMode, str]] = None,
    keep_in_memory: Optional[bool] = None,
    save_infos: bool = False,
    revision: Optional[Union[str, Version]] = None,
    token: Optional[Union[bool, str]] = None,
    streaming: bool = False,
    num_proc: Optional[int] = None,
    storage_options: Optional[Dict] = None,
    trust_remote_code: bool = None,
    **config_kwargs,
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
    """Load a dataset from the Hugging Face Hub, or a local dataset.
    You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
    A dataset is a directory that contains:
    - some data files in generic formats (JSON, CSV, Parquet, text, etc.).
    - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
    Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
    This function does the following under the hood:
        1. Download and import in the library the dataset script from `path` if it's not already cached inside the library.
            If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.)
            Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset,
            contain the path or URL to the original data files and the code to load examples from the original data files.
            You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets).
        2. Run the dataset script which will:
            * Download the dataset file from the original URL (see the script) if it's not already available locally or cached.
            * Process and cache the dataset in typed Arrow tables for caching.
                Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
                They can be directly accessed from disk, loaded in RAM or even streamed over the web.
        3. Return a dataset built from the requested splits in `split` (default: all).
    It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
    In this case, it automatically loads all the data files from the directory or the dataset repository.
    Args:
        path (`str`):
            Path or name of the dataset.
            Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
            For local datasets:
            - if `path` is a local directory (containing data files only)
              -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
              e.g. `'./path/to/directory/with/my/csv/data'`.
            - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script
              e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
            For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
            - if `path` is a dataset repository on the HF hub (containing data files only)
              -> load a generic dataset builder (csv, text etc.) based on the content of the repository
              e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
            - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script in the dataset repository
              e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
        name (`str`, *optional*):
            Defining the name of the dataset configuration.
        data_dir (`str`, *optional*):
            Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
            the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
        data_files (`str` or `Sequence` or `Mapping`, *optional*):
            Path(s) to source data file(s).
        split (`Split` or `str`):
            Which split of the data to load.
            If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
            If given, will return a single Dataset.
            Splits can be combined and specified like in tensorflow-datasets.
        cache_dir (`str`, *optional*):
            Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
        features (`Features`, *optional*):
            Set the features type to use for this dataset.
        download_config ([`DownloadConfig`], *optional*):
            Specific download configuration parameters.
        download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
            Download/generate mode.
        verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
            Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
            <Added version="2.9.1"/>
        keep_in_memory (`bool`, defaults to `None`):
            Whether to copy the dataset in-memory. If `None`, the dataset
            will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
            nonzero. See more details in the [improve performance](../cache#improve-performance) section.
        save_infos (`bool`, defaults to `False`):
            Save the dataset information (checksums/size/splits/...).
        revision ([`Version`] or `str`, *optional*):
            Version of the dataset script to load.
            As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
            You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
        token (`str` or `bool`, *optional*):
            Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
            If `True`, or not specified, will get token from `"~/.huggingface"`.
        streaming (`bool`, defaults to `False`):
            If set to `True`, don't download the data files. Instead, it streams the data progressively while
            iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
            Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
            Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
            like rar and xz are not yet supported. The tgz format doesn't allow streaming.
        num_proc (`int`, *optional*, defaults to `None`):
            Number of processes when downloading and generating the dataset locally.
            Multiprocessing is disabled by default.
            <Added version="2.7.0"/>
        storage_options (`dict`, *optional*, defaults to `None`):
            **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
            <Added version="2.11.0"/>
        trust_remote_code (`bool`, defaults to `False`):
            Whether or not to allow for datasets defined on the Hub using a dataset script. This option
            should only be set to `True` for repositories you trust and in which you have read the code, as it will
            execute code present on the Hub on your local machine.
            <Added version="2.16.0"/>
            <Changed version="2.20.0">
            `trust_remote_code` defaults to `False` if not specified.
            </Changed>
        **config_kwargs (additional keyword arguments):
            Keyword arguments to be passed to the `BuilderConfig`
            and used in the [`DatasetBuilder`].
    Returns:
        [`Dataset`] or [`DatasetDict`]:
        - if `split` is not `None`: the dataset requested,
        - if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
        or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
        - if `split` is not `None`, the dataset is requested
        - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
    Example:
    Load a dataset from the Hugging Face Hub:
    ```py
    >>> from datasets import load_dataset
    >>> ds = load_dataset('rotten_tomatoes', split='train')
    # Map data files to splits
    >>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
    >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
    ```
    Load a local dataset:
    ```py
    # Load a CSV file
    >>> from datasets import load_dataset
    >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
    # Load a JSON file
    >>> from datasets import load_dataset
    >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
    # Load from a local loading script
    >>> from datasets import load_dataset
    >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
    ```
    Load an [`~datasets.IterableDataset`]:
    ```py
    >>> from datasets import load_dataset
    >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True)
    ```
    Load an image dataset with the `ImageFolder` dataset builder:
    ```py
    >>> from datasets import load_dataset
    >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
    ```
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_load_dataset.txt 
 | 
	def load_dataset(
    path: str,
    name: Optional[str] = None,
    data_dir: Optional[str] = None,
    data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
    split: Optional[Union[str, Split]] = None,
    cache_dir: Optional[str] = None,
    features: Optional[Features] = None,
    download_config: Optional[DownloadConfig] = None,
    download_mode: Optional[Union[DownloadMode, str]] = None,
    verification_mode: Optional[Union[VerificationMode, str]] = None,
    keep_in_memory: Optional[bool] = None,
    save_infos: bool = False,
    revision: Optional[Union[str, Version]] = None,
    token: Optional[Union[bool, str]] = None,
    streaming: bool = False,
    num_proc: Optional[int] = None,
    storage_options: Optional[Dict] = None,
    trust_remote_code: bool = None,
    **config_kwargs,
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
    """Load a dataset from the Hugging Face Hub, or a local dataset.
    You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
    A dataset is a directory that contains:
    - some data files in generic formats (JSON, CSV, Parquet, text, etc.).
    - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
    Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
    This function does the following under the hood:
        1. Download and import in the library the dataset script from `path` if it's not already cached inside the library.
            If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.)
            Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset,
            contain the path or URL to the original data files and the code to load examples from the original data files.
            You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets).
        2. Run the dataset script which will:
            * Download the dataset file from the original URL (see the script) if it's not already available locally or cached.
            * Process and cache the dataset in typed Arrow tables for caching.
                Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
                They can be directly accessed from disk, loaded in RAM or even streamed over the web.
        3. Return a dataset built from the requested splits in `split` (default: all).
    It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
    In this case, it automatically loads all the data files from the directory or the dataset repository.
    Args:
        path (`str`):
            Path or name of the dataset.
            Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
            For local datasets:
            - if `path` is a local directory (containing data files only)
              -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
              e.g. `'./path/to/directory/with/my/csv/data'`.
            - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script
              e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
            For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
            - if `path` is a dataset repository on the HF hub (containing data files only)
              -> load a generic dataset builder (csv, text etc.) based on the content of the repository
              e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
            - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script in the dataset repository
              e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
        name (`str`, *optional*):
            Defining the name of the dataset configuration.
        data_dir (`str`, *optional*):
            Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
            the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
        data_files (`str` or `Sequence` or `Mapping`, *optional*):
            Path(s) to source data file(s).
        split (`Split` or `str`):
            Which split of the data to load.
            If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
            If given, will return a single Dataset.
            Splits can be combined and specified like in tensorflow-datasets.
        cache_dir (`str`, *optional*):
            Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
        features (`Features`, *optional*):
            Set the features type to use for this dataset.
        download_config ([`DownloadConfig`], *optional*):
            Specific download configuration parameters.
        download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
            Download/generate mode.
        verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
            Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
            <Added version="2.9.1"/>
        keep_in_memory (`bool`, defaults to `None`):
            Whether to copy the dataset in-memory. If `None`, the dataset
            will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
            nonzero. See more details in the [improve performance](../cache#improve-performance) section.
        save_infos (`bool`, defaults to `False`):
            Save the dataset information (checksums/size/splits/...).
        revision ([`Version`] or `str`, *optional*):
            Version of the dataset script to load.
            As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
            You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
        token (`str` or `bool`, *optional*):
            Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
            If `True`, or not specified, will get token from `"~/.huggingface"`.
        streaming (`bool`, defaults to `False`):
            If set to `True`, don't download the data files. Instead, it streams the data progressively while
            iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
            Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
            Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
            like rar and xz are not yet supported. The tgz format doesn't allow streaming.
        num_proc (`int`, *optional*, defaults to `None`):
            Number of processes when downloading and generating the dataset locally.
            Multiprocessing is disabled by default.
            <Added version="2.7.0"/>
        storage_options (`dict`, *optional*, defaults to `None`):
            **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
            <Added version="2.11.0"/>
        trust_remote_code (`bool`, defaults to `False`):
            Whether or not to allow for datasets defined on the Hub using a dataset script. This option
            should only be set to `True` for repositories you trust and in which you have read the code, as it will
            execute code present on the Hub on your local machine.
            <Added version="2.16.0"/>
            <Changed version="2.20.0">
            `trust_remote_code` defaults to `False` if not specified.
            </Changed>
        **config_kwargs (additional keyword arguments):
            Keyword arguments to be passed to the `BuilderConfig`
            and used in the [`DatasetBuilder`].
    Returns:
        [`Dataset`] or [`DatasetDict`]:
        - if `split` is not `None`: the dataset requested,
        - if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
        or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
        - if `split` is not `None`, the dataset is requested
        - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
    Example:
    Load a dataset from the Hugging Face Hub:
    ```py
    >>> from datasets import load_dataset
    >>> ds = load_dataset('rotten_tomatoes', split='train')
    # Map data files to splits
    >>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
    >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
    ```
    Load a local dataset:
    ```py
    # Load a CSV file
    >>> from datasets import load_dataset
    >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
    # Load a JSON file
    >>> from datasets import load_dataset
    >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
    # Load from a local loading script
    >>> from datasets import load_dataset
    >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
    ```
    Load an [`~datasets.IterableDataset`]:
    ```py
    >>> from datasets import load_dataset
    >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True)
    ```
    Load an image dataset with the `ImageFolder` dataset builder:
    ```py
    >>> from datasets import load_dataset
    >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
    ```
    """
    if data_files is not None and not data_files:
        raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).")
    if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
        raise ValueError(
            "You are trying to load a dataset that was saved using `save_to_disk`. "
            "Please use `load_from_disk` instead."
        )
    if streaming and num_proc is not None:
        raise NotImplementedError(
            "Loading a streaming dataset in parallel with `num_proc` is not implemented. "
            "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead."
        )
    download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
    verification_mode = VerificationMode(
        (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
    )
    # Create a dataset builder
    builder_instance = load_dataset_builder(
        path=path,
        name=name,
        data_dir=data_dir,
        data_files=data_files,
        cache_dir=cache_dir,
        features=features,
        download_config=download_config,
        download_mode=download_mode,
        revision=revision,
        token=token,
        storage_options=storage_options,
        trust_remote_code=trust_remote_code,
        _require_default_config_name=name is None,
        **config_kwargs,
    )
    # Return iterable dataset in case of streaming
    if streaming:
        return builder_instance.as_streaming_dataset(split=split)
    # Download and prepare data
    builder_instance.download_and_prepare(
        download_config=download_config,
        download_mode=download_mode,
        verification_mode=verification_mode,
        num_proc=num_proc,
        storage_options=storage_options,
    )
    # Build dataset for splits
    keep_in_memory = (
        keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
    )
    ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory)
    if save_infos:
        builder_instance._save_infos()
    return ds
 
 | 
	load_dataset 
 | 
	Repo-Level 
 | 
					
	datasets 
 | 
	51 
 | 
	src/datasets/load.py 
 | 
	def load_dataset_builder(
    path: str,
    name: Optional[str] = None,
    data_dir: Optional[str] = None,
    data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
    cache_dir: Optional[str] = None,
    features: Optional[Features] = None,
    download_config: Optional[DownloadConfig] = None,
    download_mode: Optional[Union[DownloadMode, str]] = None,
    revision: Optional[Union[str, Version]] = None,
    token: Optional[Union[bool, str]] = None,
    storage_options: Optional[Dict] = None,
    trust_remote_code: Optional[bool] = None,
    _require_default_config_name=True,
    **config_kwargs,
) -> DatasetBuilder:
    """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
    without downloading the dataset itself.
    You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
    A dataset is a directory that contains:
    - some data files in generic formats (JSON, CSV, Parquet, text, etc.)
    - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
    Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
    Args:
        path (`str`):
            Path or name of the dataset.
            Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
            For local datasets:
            - if `path` is a local directory (containing data files only)
              -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
              e.g. `'./path/to/directory/with/my/csv/data'`.
            - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script
              e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
            For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
            - if `path` is a dataset repository on the HF hub (containing data files only)
              -> load a generic dataset builder (csv, text etc.) based on the content of the repository
              e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
            - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script in the dataset repository
              e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
        name (`str`, *optional*):
            Defining the name of the dataset configuration.
        data_dir (`str`, *optional*):
            Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
            the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
        data_files (`str` or `Sequence` or `Mapping`, *optional*):
            Path(s) to source data file(s).
        cache_dir (`str`, *optional*):
            Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
        features ([`Features`], *optional*):
            Set the features type to use for this dataset.
        download_config ([`DownloadConfig`], *optional*):
            Specific download configuration parameters.
        download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
            Download/generate mode.
        revision ([`Version`] or `str`, *optional*):
            Version of the dataset script to load.
            As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
            You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
        token (`str` or `bool`, *optional*):
            Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
            If `True`, or not specified, will get token from `"~/.huggingface"`.
        storage_options (`dict`, *optional*, defaults to `None`):
            **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
            <Added version="2.11.0"/>
        trust_remote_code (`bool`, defaults to `False`):
            Whether or not to allow for datasets defined on the Hub using a dataset script. This option
            should only be set to `True` for repositories you trust and in which you have read the code, as it will
            execute code present on the Hub on your local machine.
            <Added version="2.16.0"/>
            <Changed version="2.20.0">
            `trust_remote_code` defaults to `False` if not specified.
            </Changed>
        **config_kwargs (additional keyword arguments):
            Keyword arguments to be passed to the [`BuilderConfig`]
            and used in the [`DatasetBuilder`].
    Returns:
        [`DatasetBuilder`]
    Example:
    ```py
    >>> from datasets import load_dataset_builder
    >>> ds_builder = load_dataset_builder('rotten_tomatoes')
    >>> ds_builder.info.features
    {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
     'text': Value(dtype='string', id=None)}
    ```
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_load_dataset_builder.txt 
 | 
	def load_dataset_builder(
    path: str,
    name: Optional[str] = None,
    data_dir: Optional[str] = None,
    data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
    cache_dir: Optional[str] = None,
    features: Optional[Features] = None,
    download_config: Optional[DownloadConfig] = None,
    download_mode: Optional[Union[DownloadMode, str]] = None,
    revision: Optional[Union[str, Version]] = None,
    token: Optional[Union[bool, str]] = None,
    storage_options: Optional[Dict] = None,
    trust_remote_code: Optional[bool] = None,
    _require_default_config_name=True,
    **config_kwargs,
) -> DatasetBuilder:
    """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
    without downloading the dataset itself.
    You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
    A dataset is a directory that contains:
    - some data files in generic formats (JSON, CSV, Parquet, text, etc.)
    - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
    Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
    Args:
        path (`str`):
            Path or name of the dataset.
            Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
            For local datasets:
            - if `path` is a local directory (containing data files only)
              -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
              e.g. `'./path/to/directory/with/my/csv/data'`.
            - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script
              e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
            For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
            - if `path` is a dataset repository on the HF hub (containing data files only)
              -> load a generic dataset builder (csv, text etc.) based on the content of the repository
              e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
            - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
              -> load the dataset builder from the dataset script in the dataset repository
              e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
        name (`str`, *optional*):
            Defining the name of the dataset configuration.
        data_dir (`str`, *optional*):
            Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
            the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
        data_files (`str` or `Sequence` or `Mapping`, *optional*):
            Path(s) to source data file(s).
        cache_dir (`str`, *optional*):
            Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
        features ([`Features`], *optional*):
            Set the features type to use for this dataset.
        download_config ([`DownloadConfig`], *optional*):
            Specific download configuration parameters.
        download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
            Download/generate mode.
        revision ([`Version`] or `str`, *optional*):
            Version of the dataset script to load.
            As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
            You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
        token (`str` or `bool`, *optional*):
            Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
            If `True`, or not specified, will get token from `"~/.huggingface"`.
        storage_options (`dict`, *optional*, defaults to `None`):
            **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
            <Added version="2.11.0"/>
        trust_remote_code (`bool`, defaults to `False`):
            Whether or not to allow for datasets defined on the Hub using a dataset script. This option
            should only be set to `True` for repositories you trust and in which you have read the code, as it will
            execute code present on the Hub on your local machine.
            <Added version="2.16.0"/>
            <Changed version="2.20.0">
            `trust_remote_code` defaults to `False` if not specified.
            </Changed>
        **config_kwargs (additional keyword arguments):
            Keyword arguments to be passed to the [`BuilderConfig`]
            and used in the [`DatasetBuilder`].
    Returns:
        [`DatasetBuilder`]
    Example:
    ```py
    >>> from datasets import load_dataset_builder
    >>> ds_builder = load_dataset_builder('rotten_tomatoes')
    >>> ds_builder.info.features
    {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
     'text': Value(dtype='string', id=None)}
    ```
    """
    download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
    if token is not None:
        download_config = download_config.copy() if download_config else DownloadConfig()
        download_config.token = token
    if storage_options is not None:
        download_config = download_config.copy() if download_config else DownloadConfig()
        download_config.storage_options.update(storage_options)
    dataset_module = dataset_module_factory(
        path,
        revision=revision,
        download_config=download_config,
        download_mode=download_mode,
        data_dir=data_dir,
        data_files=data_files,
        cache_dir=cache_dir,
        trust_remote_code=trust_remote_code,
        _require_default_config_name=_require_default_config_name,
        _require_custom_configs=bool(config_kwargs),
    )
    # Get dataset builder class from the processing script
    builder_kwargs = dataset_module.builder_kwargs
    data_dir = builder_kwargs.pop("data_dir", data_dir)
    data_files = builder_kwargs.pop("data_files", data_files)
    config_name = builder_kwargs.pop(
        "config_name", name or dataset_module.builder_configs_parameters.default_config_name
    )
    dataset_name = builder_kwargs.pop("dataset_name", None)
    info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None
    if (
        path in _PACKAGED_DATASETS_MODULES
        and data_files is None
        and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None
    ):
        error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder."
        example_extensions = [
            extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
        ]
        if example_extensions:
            error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
        raise ValueError(error_msg)
    builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
    # Instantiate the dataset builder
    builder_instance: DatasetBuilder = builder_cls(
        cache_dir=cache_dir,
        dataset_name=dataset_name,
        config_name=config_name,
        data_dir=data_dir,
        data_files=data_files,
        hash=dataset_module.hash,
        info=info,
        features=features,
        token=token,
        storage_options=storage_options,
        **builder_kwargs,
        **config_kwargs,
    )
    builder_instance._use_legacy_cache_dir_if_possible(dataset_module)
    return builder_instance
 
 | 
	load_dataset_builder 
 | 
	File-Level 
 | 
					
	datasets 
 | 
	54 
 | 
	src/datasets/utils/py_utils.py 
 | 
	def map_nested(
    function: Callable[[Any], Any],
    data_struct: Any,
    dict_only: bool = False,
    map_list: bool = True,
    map_tuple: bool = False,
    map_numpy: bool = False,
    num_proc: Optional[int] = None,
    parallel_min_length: int = 2,
    batched: bool = False,
    batch_size: Optional[int] = 1000,
    types: Optional[tuple] = None,
    disable_tqdm: bool = True,
    desc: Optional[str] = None,
) -> Any:
    """Apply a function recursively to each element of a nested data struct.
    Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to
    `parallel_min_length`.
    <Changed version="2.5.0">
    Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.
    Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and
    multiprocessing is used.
    </Changed>
    Args:
        function (`Callable`): Function to be applied to `data_struct`.
        data_struct (`Any`): Data structure to apply `function` to.
        dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in
            `data_struct`.
        map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`
            values).
        map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides
            `dict` values).
        map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides
            `dict` values).
        num_proc (`int`, *optional*): Number of processes.
            The level in the data struct used for multiprocessing is the first level that has smaller sub-structs,
            starting from the root.
        parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel
            processing.
            <Added version="2.5.0"/>
        batched (`bool`, defaults to `False`):
            Provide batch of items to `function`.
            <Added version="2.19.0"/>
        batch_size (`int`, *optional*, defaults to `1000`):
            Number of items per batch provided to `function` if `batched=True`.
            If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`.
            <Added version="2.19.0"/>
        types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
            elements.
        disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
        desc (`str`, *optional*): Prefix for the tqdm progressbar.
    Returns:
        `Any`
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_map_nested.txt 
 | 
	def map_nested(
    function: Callable[[Any], Any],
    data_struct: Any,
    dict_only: bool = False,
    map_list: bool = True,
    map_tuple: bool = False,
    map_numpy: bool = False,
    num_proc: Optional[int] = None,
    parallel_min_length: int = 2,
    batched: bool = False,
    batch_size: Optional[int] = 1000,
    types: Optional[tuple] = None,
    disable_tqdm: bool = True,
    desc: Optional[str] = None,
) -> Any:
    """Apply a function recursively to each element of a nested data struct.
    Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to
    `parallel_min_length`.
    <Changed version="2.5.0">
    Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.
    Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and
    multiprocessing is used.
    </Changed>
    Args:
        function (`Callable`): Function to be applied to `data_struct`.
        data_struct (`Any`): Data structure to apply `function` to.
        dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in
            `data_struct`.
        map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`
            values).
        map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides
            `dict` values).
        map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides
            `dict` values).
        num_proc (`int`, *optional*): Number of processes.
            The level in the data struct used for multiprocessing is the first level that has smaller sub-structs,
            starting from the root.
        parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel
            processing.
            <Added version="2.5.0"/>
        batched (`bool`, defaults to `False`):
            Provide batch of items to `function`.
            <Added version="2.19.0"/>
        batch_size (`int`, *optional*, defaults to `1000`):
            Number of items per batch provided to `function` if `batched=True`.
            If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`.
            <Added version="2.19.0"/>
        types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
            elements.
        disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
        desc (`str`, *optional*): Prefix for the tqdm progressbar.
    Returns:
        `Any`
    """
    if types is None:
        types = []
        if not dict_only:
            if map_list:
                types.append(list)
            if map_tuple:
                types.append(tuple)
            if map_numpy:
                types.append(np.ndarray)
        types = tuple(types)
    # Singleton
    if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
        if batched:
            data_struct = [data_struct]
        mapped = function(data_struct)
        if batched:
            mapped = mapped[0]
        return mapped
    iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct
    if num_proc is None:
        num_proc = 1
    if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):
        mapped = [
            map_nested(
                function=function,
                data_struct=obj,
                num_proc=num_proc,
                parallel_min_length=parallel_min_length,
                batched=batched,
                batch_size=batch_size,
                types=types,
            )
            for obj in iterable
        ]
    elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
        if batched:
            if batch_size is None or batch_size <= 0:
                batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)
            iterable = list(iter_batched(iterable, batch_size))
        mapped = [
            _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
            for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
        ]
        if batched:
            mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]
    else:
        with warnings.catch_warnings():
            warnings.filterwarnings(
                "ignore",
                message=".* is experimental and might be subject to breaking changes in the future\\.$",
                category=UserWarning,
            )
            if batched:
                if batch_size is None or batch_size <= 0:
                    batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0)
                iterable = list(iter_batched(iterable, batch_size))
            mapped = parallel_map(
                function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested
            )
            if batched:
                mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]
    if isinstance(data_struct, dict):
        return dict(zip(data_struct.keys(), mapped))
    else:
        if isinstance(data_struct, list):
            return mapped
        elif isinstance(data_struct, tuple):
            return tuple(mapped)
        else:
            return np.array(mapped)
 
 | 
	map_nested 
 | 
	Repo-Level 
 | 
					
	pylint 
 | 
	15 
 | 
	pylint/extensions/docparams.py 
 | 
	    def check_arguments_in_docstring(
        self,
        doc: Docstring,
        arguments_node: astroid.Arguments,
        warning_node: astroid.NodeNG,
        accept_no_param_doc: bool | None = None,
    ) -> None:
        """Check that all parameters are consistent with the parameters mentioned
        in the parameter documentation (e.g. the Sphinx tags 'param' and 'type').
        * Undocumented parameters except 'self' are noticed.
        * Undocumented parameter types except for 'self' and the ``*<args>``
          and ``**<kwargs>`` parameters are noticed.
        * Parameters mentioned in the parameter documentation that don't or no
          longer exist in the function parameter list are noticed.
        * If the text "For the parameters, see" or "For the other parameters,
          see" (ignoring additional white-space) is mentioned in the docstring,
          missing parameter documentation is tolerated.
        * If there's no Sphinx style, Google style or NumPy style parameter
          documentation at all, i.e. ``:param`` is never mentioned etc., the
          checker assumes that the parameters are documented in another format
          and the absence is tolerated.
        :param doc: Docstring for the function, method or class.
        :type doc: :class:`Docstring`
        :param arguments_node: Arguments node for the function, method or
            class constructor.
        :type arguments_node: :class:`astroid.scoped_nodes.Arguments`
        :param warning_node: The node to assign the warnings to
        :type warning_node: :class:`astroid.scoped_nodes.Node`
        :param accept_no_param_doc: Whether to allow no parameters to be
            documented. If None then this value is read from the configuration.
        :type accept_no_param_doc: bool or None
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_check_arguments_in_docstring.txt 
 | 
	    def check_arguments_in_docstring(
        self,
        doc: Docstring,
        arguments_node: astroid.Arguments,
        warning_node: astroid.NodeNG,
        accept_no_param_doc: bool | None = None,
    ) -> None:
        """Check that all parameters are consistent with the parameters mentioned
        in the parameter documentation (e.g. the Sphinx tags 'param' and 'type').
        * Undocumented parameters except 'self' are noticed.
        * Undocumented parameter types except for 'self' and the ``*<args>``
          and ``**<kwargs>`` parameters are noticed.
        * Parameters mentioned in the parameter documentation that don't or no
          longer exist in the function parameter list are noticed.
        * If the text "For the parameters, see" or "For the other parameters,
          see" (ignoring additional white-space) is mentioned in the docstring,
          missing parameter documentation is tolerated.
        * If there's no Sphinx style, Google style or NumPy style parameter
          documentation at all, i.e. ``:param`` is never mentioned etc., the
          checker assumes that the parameters are documented in another format
          and the absence is tolerated.
        :param doc: Docstring for the function, method or class.
        :type doc: :class:`Docstring`
        :param arguments_node: Arguments node for the function, method or
            class constructor.
        :type arguments_node: :class:`astroid.scoped_nodes.Arguments`
        :param warning_node: The node to assign the warnings to
        :type warning_node: :class:`astroid.scoped_nodes.Node`
        :param accept_no_param_doc: Whether to allow no parameters to be
            documented. If None then this value is read from the configuration.
        :type accept_no_param_doc: bool or None
        """
        # Tolerate missing param or type declarations if there is a link to
        # another method carrying the same name.
        if not doc.doc:
            return
        if accept_no_param_doc is None:
            accept_no_param_doc = self.linter.config.accept_no_param_doc
        tolerate_missing_params = doc.params_documented_elsewhere()
        # Collect the function arguments.
        expected_argument_names = {arg.name for arg in arguments_node.args}
        expected_argument_names.update(
            a.name for a in arguments_node.posonlyargs + arguments_node.kwonlyargs
        )
        not_needed_type_in_docstring = self.not_needed_param_in_docstring.copy()
        expected_but_ignored_argument_names = set()
        ignored_argument_names = self.linter.config.ignored_argument_names
        if ignored_argument_names:
            expected_but_ignored_argument_names = {
                arg
                for arg in expected_argument_names
                if ignored_argument_names.match(arg)
            }
        if arguments_node.vararg is not None:
            expected_argument_names.add(f"*{arguments_node.vararg}")
            not_needed_type_in_docstring.add(f"*{arguments_node.vararg}")
        if arguments_node.kwarg is not None:
            expected_argument_names.add(f"**{arguments_node.kwarg}")
            not_needed_type_in_docstring.add(f"**{arguments_node.kwarg}")
        params_with_doc, params_with_type = doc.match_param_docs()
        # Tolerate no parameter documentation at all.
        if not params_with_doc and not params_with_type and accept_no_param_doc:
            tolerate_missing_params = True
        # This is before the update of params_with_type because this must check only
        # the type documented in a docstring, not the one using pep484
        # See #4117 and #4593
        self._compare_ignored_args(
            params_with_type,
            "useless-type-doc",
            expected_but_ignored_argument_names,
            warning_node,
        )
        params_with_type |= utils.args_with_annotation(arguments_node)
        if not tolerate_missing_params:
            missing_param_doc = (expected_argument_names - params_with_doc) - (
                self.not_needed_param_in_docstring | expected_but_ignored_argument_names
            )
            missing_type_doc = (expected_argument_names - params_with_type) - (
                not_needed_type_in_docstring | expected_but_ignored_argument_names
            )
            if (
                missing_param_doc == expected_argument_names == missing_type_doc
                and len(expected_argument_names) != 0
            ):
                self.add_message(
                    "missing-any-param-doc",
                    args=(warning_node.name,),
                    node=warning_node,
                    confidence=HIGH,
                )
            else:
                self._compare_missing_args(
                    params_with_doc,
                    "missing-param-doc",
                    self.not_needed_param_in_docstring
                    | expected_but_ignored_argument_names,
                    expected_argument_names,
                    warning_node,
                )
                self._compare_missing_args(
                    params_with_type,
                    "missing-type-doc",
                    not_needed_type_in_docstring | expected_but_ignored_argument_names,
                    expected_argument_names,
                    warning_node,
                )
        self._compare_different_args(
            params_with_doc,
            "differing-param-doc",
            self.not_needed_param_in_docstring,
            expected_argument_names,
            warning_node,
        )
        self._compare_different_args(
            params_with_type,
            "differing-type-doc",
            not_needed_type_in_docstring,
            expected_argument_names,
            warning_node,
        )
        self._compare_ignored_args(
            params_with_doc,
            "useless-param-doc",
            expected_but_ignored_argument_names,
            warning_node,
        )
 
 | 
	check_arguments_in_docstring 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	6 
 | 
	sympy/physics/continuum_mechanics/cable.py 
 | 
	    def apply_load(self, order, load):
        """
        This method adds load to the cable.
        Parameters
        ==========
        order : Integer
            The order of the applied load.
                - For point loads, order = -1
                - For distributed load, order = 0
        load : tuple
            * For point loads, load is of the form (label, x, y, magnitude, direction), where:
            label : String or symbol
                The label of the load
            x : Sympifyable
                The x coordinate of the position of the load
            y : Sympifyable
                The y coordinate of the position of the load
            magnitude : Sympifyable
                The magnitude of the load. It must always be positive
            direction : Sympifyable
                The angle, in degrees, that the load vector makes with the horizontal
                in the counter-clockwise direction. It takes the values 0 to 360,
                inclusive.
            * For uniformly distributed load, load is of the form (label, magnitude)
            label : String or symbol
                The label of the load
            magnitude : Sympifyable
                The magnitude of the load. It must always be positive
        Examples
        ========
        For a point load of magnitude 12 units inclined at 30 degrees with the horizontal:
        >>> from sympy.physics.continuum_mechanics.cable import Cable
        >>> c = Cable(('A', 0, 10), ('B', 10, 10))
        >>> c.apply_load(-1, ('Z', 5, 5, 12, 30))
        >>> c.loads
        {'distributed': {}, 'point_load': {'Z': [12, 30]}}
        >>> c.loads_position
        {'Z': [5, 5]}
        For a uniformly distributed load of magnitude 9 units:
        >>> from sympy.physics.continuum_mechanics.cable import Cable
        >>> c = Cable(('A', 0, 10), ('B', 10, 10))
        >>> c.apply_load(0, ('X', 9))
        >>> c.loads
        {'distributed': {'X': 9}, 'point_load': {}}
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_Cable.apply_load.txt 
 | 
	    def apply_load(self, order, load):
        """
        This method adds load to the cable.
        Parameters
        ==========
        order : Integer
            The order of the applied load.
                - For point loads, order = -1
                - For distributed load, order = 0
        load : tuple
            * For point loads, load is of the form (label, x, y, magnitude, direction), where:
            label : String or symbol
                The label of the load
            x : Sympifyable
                The x coordinate of the position of the load
            y : Sympifyable
                The y coordinate of the position of the load
            magnitude : Sympifyable
                The magnitude of the load. It must always be positive
            direction : Sympifyable
                The angle, in degrees, that the load vector makes with the horizontal
                in the counter-clockwise direction. It takes the values 0 to 360,
                inclusive.
            * For uniformly distributed load, load is of the form (label, magnitude)
            label : String or symbol
                The label of the load
            magnitude : Sympifyable
                The magnitude of the load. It must always be positive
        Examples
        ========
        For a point load of magnitude 12 units inclined at 30 degrees with the horizontal:
        >>> from sympy.physics.continuum_mechanics.cable import Cable
        >>> c = Cable(('A', 0, 10), ('B', 10, 10))
        >>> c.apply_load(-1, ('Z', 5, 5, 12, 30))
        >>> c.loads
        {'distributed': {}, 'point_load': {'Z': [12, 30]}}
        >>> c.loads_position
        {'Z': [5, 5]}
        For a uniformly distributed load of magnitude 9 units:
        >>> from sympy.physics.continuum_mechanics.cable import Cable
        >>> c = Cable(('A', 0, 10), ('B', 10, 10))
        >>> c.apply_load(0, ('X', 9))
        >>> c.loads
        {'distributed': {'X': 9}, 'point_load': {}}
        """
        if order == -1:
            if len(self._loads["distributed"]) != 0:
                raise ValueError("Distributed load already exists")
            label = load[0]
            if label in self._loads["point_load"]:
                raise ValueError("Label already exists")
            x = sympify(load[1])
            y = sympify(load[2])
            if x > self._right_support[0] or x < self._left_support[0]:
                raise ValueError("The load should be positioned between the supports")
            magnitude = sympify(load[3])
            direction = sympify(load[4])
            self._loads["point_load"][label] = [magnitude, direction]
            self._loads_position[label] = [x, y]
        elif order == 0:
            if len(self._loads_position) != 0:
                raise ValueError("Point load(s) already exist")
            label = load[0]
            if label in self._loads["distributed"]:
                raise ValueError("Label already exists")
            magnitude = sympify(load[1])
            self._loads["distributed"][label] = magnitude
        else:
            raise ValueError("Order should be either -1 or 0")
 
 | 
	Cable.apply_load 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	9 
 | 
	sympy/stats/stochastic_process_types.py 
 | 
	    def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]:
        """
        Returns the list of communication classes that partition
        the states of the markov chain.
        A communication class is defined to be a set of states
        such that every state in that set is reachable from
        every other state in that set. Due to its properties
        this forms a class in the mathematical sense.
        Communication classes are also known as recurrence
        classes.
        Returns
        =======
        classes
            The ``classes`` are a list of tuples. Each
            tuple represents a single communication class
            with its properties. The first element in the
            tuple is the list of states in the class, the
            second element is whether the class is recurrent
            and the third element is the period of the
            communication class.
        Examples
        ========
        >>> from sympy.stats import DiscreteMarkovChain
        >>> from sympy import Matrix
        >>> T = Matrix([[0, 1, 0],
        ...             [1, 0, 0],
        ...             [1, 0, 0]])
        >>> X = DiscreteMarkovChain('X', [1, 2, 3], T)
        >>> classes = X.communication_classes()
        >>> for states, is_recurrent, period in classes:
        ...     states, is_recurrent, period
        ([1, 2], True, 2)
        ([3], False, 1)
        From this we can see that states ``1`` and ``2``
        communicate, are recurrent and have a period
        of 2. We can also see state ``3`` is transient
        with a period of 1.
        Notes
        =====
        The algorithm used is of order ``O(n**2)`` where
        ``n`` is the number of states in the markov chain.
        It uses Tarjan's algorithm to find the classes
        themselves and then it uses a breadth-first search
        algorithm to find each class's periodicity.
        Most of the algorithm's components approach ``O(n)``
        as the matrix becomes more and more sparse.
        References
        ==========
        .. [1] https://web.archive.org/web/20220207032113/https://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf
        .. [2] https://cecas.clemson.edu/~shierd/Shier/markov.pdf
        .. [3] https://www.proquest.com/openview/4adc6a51d8371be5b0e4c7dff287fc70/1?pq-origsite=gscholar&cbl=2026366&diss=y
        .. [4] https://www.mathworks.com/help/econ/dtmc.classify.html
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_DiscreteMarkovChain.communication_classes.txt 
 | 
	    def communication_classes(self) -> tList[tTuple[tList[Basic], Boolean, Integer]]:
        """
        Returns the list of communication classes that partition
        the states of the markov chain.
        A communication class is defined to be a set of states
        such that every state in that set is reachable from
        every other state in that set. Due to its properties
        this forms a class in the mathematical sense.
        Communication classes are also known as recurrence
        classes.
        Returns
        =======
        classes
            The ``classes`` are a list of tuples. Each
            tuple represents a single communication class
            with its properties. The first element in the
            tuple is the list of states in the class, the
            second element is whether the class is recurrent
            and the third element is the period of the
            communication class.
        Examples
        ========
        >>> from sympy.stats import DiscreteMarkovChain
        >>> from sympy import Matrix
        >>> T = Matrix([[0, 1, 0],
        ...             [1, 0, 0],
        ...             [1, 0, 0]])
        >>> X = DiscreteMarkovChain('X', [1, 2, 3], T)
        >>> classes = X.communication_classes()
        >>> for states, is_recurrent, period in classes:
        ...     states, is_recurrent, period
        ([1, 2], True, 2)
        ([3], False, 1)
        From this we can see that states ``1`` and ``2``
        communicate, are recurrent and have a period
        of 2. We can also see state ``3`` is transient
        with a period of 1.
        Notes
        =====
        The algorithm used is of order ``O(n**2)`` where
        ``n`` is the number of states in the markov chain.
        It uses Tarjan's algorithm to find the classes
        themselves and then it uses a breadth-first search
        algorithm to find each class's periodicity.
        Most of the algorithm's components approach ``O(n)``
        as the matrix becomes more and more sparse.
        References
        ==========
        .. [1] https://web.archive.org/web/20220207032113/https://www.columbia.edu/~ww2040/4701Sum07/4701-06-Notes-MCII.pdf
        .. [2] https://cecas.clemson.edu/~shierd/Shier/markov.pdf
        .. [3] https://www.proquest.com/openview/4adc6a51d8371be5b0e4c7dff287fc70/1?pq-origsite=gscholar&cbl=2026366&diss=y
        .. [4] https://www.mathworks.com/help/econ/dtmc.classify.html
        """
        n = self.number_of_states
        T = self.transition_probabilities
        if isinstance(T, MatrixSymbol):
            raise NotImplementedError("Cannot perform the operation with a symbolic matrix.")
        # begin Tarjan's algorithm
        V = Range(n)
        # don't use state names. Rather use state
        # indexes since we use them for matrix
        # indexing here and later onward
        E = [(i, j) for i in V for j in V if T[i, j] != 0]
        classes = strongly_connected_components((V, E))
        # end Tarjan's algorithm
        recurrence = []
        periods = []
        for class_ in classes:
            # begin recurrent check (similar to self._check_trans_probs())
            submatrix = T[class_, class_]  # get the submatrix with those states
            is_recurrent = S.true
            rows = submatrix.tolist()
            for row in rows:
                if (sum(row) - 1) != 0:
                    is_recurrent = S.false
                    break
            recurrence.append(is_recurrent)
            # end recurrent check
            # begin breadth-first search
            non_tree_edge_values: tSet[int] = set()
            visited = {class_[0]}
            newly_visited = {class_[0]}
            level = {class_[0]: 0}
            current_level = 0
            done = False  # imitate a do-while loop
            while not done:  # runs at most len(class_) times
                done = len(visited) == len(class_)
                current_level += 1
                # this loop and the while loop above run a combined len(class_) number of times.
                # so this triple nested loop runs through each of the n states once.
                for i in newly_visited:
                    # the loop below runs len(class_) number of times
                    # complexity is around about O(n * avg(len(class_)))
                    newly_visited = {j for j in class_ if T[i, j] != 0}
                    new_tree_edges = newly_visited.difference(visited)
                    for j in new_tree_edges:
                        level[j] = current_level
                    new_non_tree_edges = newly_visited.intersection(visited)
                    new_non_tree_edge_values = {level[i]-level[j]+1 for j in new_non_tree_edges}
                    non_tree_edge_values = non_tree_edge_values.union(new_non_tree_edge_values)
                    visited = visited.union(new_tree_edges)
            # igcd needs at least 2 arguments
            positive_ntev = {val_e for val_e in non_tree_edge_values if val_e > 0}
            if len(positive_ntev) == 0:
                periods.append(len(class_))
            elif len(positive_ntev) == 1:
                periods.append(positive_ntev.pop())
            else:
                periods.append(igcd(*positive_ntev))
            # end breadth-first search
        # convert back to the user's state names
        classes = [[_sympify(self._state_index[i]) for i in class_] for class_ in classes]
        return list(zip(classes, recurrence, map(Integer,periods)))
 
 | 
	DiscreteMarkovChain.communication_classes 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	15 
 | 
	sympy/physics/control/lti.py 
 | 
	    def doit(self, cancel=False, expand=False, **hints):
        """
        Returns the resultant transfer function or state space obtained by
        feedback connection of transfer functions or state space objects.
        Examples
        ========
        >>> from sympy.abc import s
        >>> from sympy import Matrix
        >>> from sympy.physics.control.lti import TransferFunction, Feedback, StateSpace
        >>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
        >>> controller = TransferFunction(5*s - 10, s + 7, s)
        >>> F1 = Feedback(plant, controller)
        >>> F1.doit()
        TransferFunction((s + 7)*(s**2 - 4*s + 2)*(3*s**2 + 7*s - 3), ((s + 7)*(s**2 - 4*s + 2) + (5*s - 10)*(3*s**2 + 7*s - 3))*(s**2 - 4*s + 2), s)
        >>> G = TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s)
        >>> F2 = Feedback(G, TransferFunction(1, 1, s))
        >>> F2.doit()
        TransferFunction((s**2 + 2*s + 3)*(2*s**2 + 5*s + 1), (s**2 + 2*s + 3)*(3*s**2 + 7*s + 4), s)
        Use kwarg ``expand=True`` to expand the resultant transfer function.
        Use ``cancel=True`` to cancel out the common terms in numerator and
        denominator.
        >>> F2.doit(cancel=True, expand=True)
        TransferFunction(2*s**2 + 5*s + 1, 3*s**2 + 7*s + 4, s)
        >>> F2.doit(expand=True)
        TransferFunction(2*s**4 + 9*s**3 + 17*s**2 + 17*s + 3, 3*s**4 + 13*s**3 + 27*s**2 + 29*s + 12, s)
        If the connection contain any ``StateSpace`` object then ``doit()``
        will return the equivalent ``StateSpace`` object.
        >>> A1 = Matrix([[-1.5, -2], [1, 0]])
        >>> B1 = Matrix([0.5, 0])
        >>> C1 = Matrix([[0, 1]])
        >>> A2 = Matrix([[0, 1], [-5, -2]])
        >>> B2 = Matrix([0, 3])
        >>> C2 = Matrix([[0, 1]])
        >>> ss1 = StateSpace(A1, B1, C1)
        >>> ss2 = StateSpace(A2, B2, C2)
        >>> F3 = Feedback(ss1, ss2)
        >>> F3.doit()
        StateSpace(Matrix([
        [-1.5, -2,  0, -0.5],
        [   1,  0,  0,    0],
        [   0,  0,  0,    1],
        [   0,  3, -5,   -2]]), Matrix([
        [0.5],
        [  0],
        [  0],
        [  0]]), Matrix([[0, 1, 0, 0]]), Matrix([[0]]))
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_Feedback.doit.txt 
 | 
	    def doit(self, cancel=False, expand=False, **hints):
        """
        Returns the resultant transfer function or state space obtained by
        feedback connection of transfer functions or state space objects.
        Examples
        ========
        >>> from sympy.abc import s
        >>> from sympy import Matrix
        >>> from sympy.physics.control.lti import TransferFunction, Feedback, StateSpace
        >>> plant = TransferFunction(3*s**2 + 7*s - 3, s**2 - 4*s + 2, s)
        >>> controller = TransferFunction(5*s - 10, s + 7, s)
        >>> F1 = Feedback(plant, controller)
        >>> F1.doit()
        TransferFunction((s + 7)*(s**2 - 4*s + 2)*(3*s**2 + 7*s - 3), ((s + 7)*(s**2 - 4*s + 2) + (5*s - 10)*(3*s**2 + 7*s - 3))*(s**2 - 4*s + 2), s)
        >>> G = TransferFunction(2*s**2 + 5*s + 1, s**2 + 2*s + 3, s)
        >>> F2 = Feedback(G, TransferFunction(1, 1, s))
        >>> F2.doit()
        TransferFunction((s**2 + 2*s + 3)*(2*s**2 + 5*s + 1), (s**2 + 2*s + 3)*(3*s**2 + 7*s + 4), s)
        Use kwarg ``expand=True`` to expand the resultant transfer function.
        Use ``cancel=True`` to cancel out the common terms in numerator and
        denominator.
        >>> F2.doit(cancel=True, expand=True)
        TransferFunction(2*s**2 + 5*s + 1, 3*s**2 + 7*s + 4, s)
        >>> F2.doit(expand=True)
        TransferFunction(2*s**4 + 9*s**3 + 17*s**2 + 17*s + 3, 3*s**4 + 13*s**3 + 27*s**2 + 29*s + 12, s)
        If the connection contain any ``StateSpace`` object then ``doit()``
        will return the equivalent ``StateSpace`` object.
        >>> A1 = Matrix([[-1.5, -2], [1, 0]])
        >>> B1 = Matrix([0.5, 0])
        >>> C1 = Matrix([[0, 1]])
        >>> A2 = Matrix([[0, 1], [-5, -2]])
        >>> B2 = Matrix([0, 3])
        >>> C2 = Matrix([[0, 1]])
        >>> ss1 = StateSpace(A1, B1, C1)
        >>> ss2 = StateSpace(A2, B2, C2)
        >>> F3 = Feedback(ss1, ss2)
        >>> F3.doit()
        StateSpace(Matrix([
        [-1.5, -2,  0, -0.5],
        [   1,  0,  0,    0],
        [   0,  0,  0,    1],
        [   0,  3, -5,   -2]]), Matrix([
        [0.5],
        [  0],
        [  0],
        [  0]]), Matrix([[0, 1, 0, 0]]), Matrix([[0]]))
        """
        if self.is_StateSpace_object:
            sys1_ss = self.sys1.doit().rewrite(StateSpace)
            sys2_ss = self.sys2.doit().rewrite(StateSpace)
            A1, B1, C1, D1 = sys1_ss.A, sys1_ss.B, sys1_ss.C, sys1_ss.D
            A2, B2, C2, D2 = sys2_ss.A, sys2_ss.B, sys2_ss.C, sys2_ss.D
            # Create identity matrices
            I_inputs = eye(self.num_inputs)
            I_outputs = eye(self.num_outputs)
            # Compute F and its inverse
            F = I_inputs - self.sign * D2 * D1
            E = F.inv()
            # Compute intermediate matrices
            E_D2 = E * D2
            E_C2 = E * C2
            T1 = I_outputs + self.sign * D1 * E_D2
            T2 = I_inputs + self.sign * E_D2 * D1
            A = Matrix.vstack(
            Matrix.hstack(A1 + self.sign * B1 * E_D2 * C1, self.sign * B1 * E_C2),
            Matrix.hstack(B2 * T1 * C1, A2 + self.sign * B2 * D1 * E_C2)
            )
            B = Matrix.vstack(B1 * T2, B2 * D1 * T2)
            C = Matrix.hstack(T1 * C1, self.sign * D1 * E_C2)
            D = D1 * T2
            return StateSpace(A, B, C, D)
        arg_list = list(self.sys1.args) if isinstance(self.sys1, Series) else [self.sys1]
        # F_n and F_d are resultant TFs of num and den of Feedback.
        F_n, unit = self.sys1.doit(), TransferFunction(1, 1, self.sys1.var)
        if self.sign == -1:
            F_d = Parallel(unit, Series(self.sys2, *arg_list)).doit()
        else:
            F_d = Parallel(unit, -Series(self.sys2, *arg_list)).doit()
        _resultant_tf = TransferFunction(F_n.num * F_d.den, F_n.den * F_d.num, F_n.var)
        if cancel:
            _resultant_tf = _resultant_tf.simplify()
        if expand:
            _resultant_tf = _resultant_tf.expand()
        return _resultant_tf
 
 | 
	Feedback.doit 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	17 
 | 
	sympy/integrals/integrals.py 
 | 
	    def as_sum(self, n=None, method="midpoint", evaluate=True):
        """
        Approximates a definite integral by a sum.
        Parameters
        ==========
        n :
            The number of subintervals to use, optional.
        method :
            One of: 'left', 'right', 'midpoint', 'trapezoid'.
        evaluate : bool
            If False, returns an unevaluated Sum expression. The default
            is True, evaluate the sum.
        Notes
        =====
        These methods of approximate integration are described in [1].
        Examples
        ========
        >>> from sympy import Integral, sin, sqrt
        >>> from sympy.abc import x, n
        >>> e = Integral(sin(x), (x, 3, 7))
        >>> e
        Integral(sin(x), (x, 3, 7))
        For demonstration purposes, this interval will only be split into 2
        regions, bounded by [3, 5] and [5, 7].
        The left-hand rule uses function evaluations at the left of each
        interval:
        >>> e.as_sum(2, 'left')
        2*sin(5) + 2*sin(3)
        The midpoint rule uses evaluations at the center of each interval:
        >>> e.as_sum(2, 'midpoint')
        2*sin(4) + 2*sin(6)
        The right-hand rule uses function evaluations at the right of each
        interval:
        >>> e.as_sum(2, 'right')
        2*sin(5) + 2*sin(7)
        The trapezoid rule uses function evaluations on both sides of the
        intervals. This is equivalent to taking the average of the left and
        right hand rule results:
        >>> s = e.as_sum(2, 'trapezoid')
        >>> s
        2*sin(5) + sin(3) + sin(7)
        >>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == s
        True
        Here, the discontinuity at x = 0 can be avoided by using the
        midpoint or right-hand method:
        >>> e = Integral(1/sqrt(x), (x, 0, 1))
        >>> e.as_sum(5).n(4)
        1.730
        >>> e.as_sum(10).n(4)
        1.809
        >>> e.doit().n(4)  # the actual value is 2
        2.000
        The left- or trapezoid method will encounter the discontinuity and
        return infinity:
        >>> e.as_sum(5, 'left')
        zoo
        The number of intervals can be symbolic. If omitted, a dummy symbol
        will be used for it.
        >>> e = Integral(x**2, (x, 0, 2))
        >>> e.as_sum(n, 'right').expand()
        8/3 + 4/n + 4/(3*n**2)
        This shows that the midpoint rule is more accurate, as its error
        term decays as the square of n:
        >>> e.as_sum(method='midpoint').expand()
        8/3 - 2/(3*_n**2)
        A symbolic sum is returned with evaluate=False:
        >>> e.as_sum(n, 'midpoint', evaluate=False)
        2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n
        See Also
        ========
        Integral.doit : Perform the integration using any hints
        References
        ==========
        .. [1] https://en.wikipedia.org/wiki/Riemann_sum#Riemann_summation_methods
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_Integral.as_sum.txt 
 | 
	    def as_sum(self, n=None, method="midpoint", evaluate=True):
        """
        Approximates a definite integral by a sum.
        Parameters
        ==========
        n :
            The number of subintervals to use, optional.
        method :
            One of: 'left', 'right', 'midpoint', 'trapezoid'.
        evaluate : bool
            If False, returns an unevaluated Sum expression. The default
            is True, evaluate the sum.
        Notes
        =====
        These methods of approximate integration are described in [1].
        Examples
        ========
        >>> from sympy import Integral, sin, sqrt
        >>> from sympy.abc import x, n
        >>> e = Integral(sin(x), (x, 3, 7))
        >>> e
        Integral(sin(x), (x, 3, 7))
        For demonstration purposes, this interval will only be split into 2
        regions, bounded by [3, 5] and [5, 7].
        The left-hand rule uses function evaluations at the left of each
        interval:
        >>> e.as_sum(2, 'left')
        2*sin(5) + 2*sin(3)
        The midpoint rule uses evaluations at the center of each interval:
        >>> e.as_sum(2, 'midpoint')
        2*sin(4) + 2*sin(6)
        The right-hand rule uses function evaluations at the right of each
        interval:
        >>> e.as_sum(2, 'right')
        2*sin(5) + 2*sin(7)
        The trapezoid rule uses function evaluations on both sides of the
        intervals. This is equivalent to taking the average of the left and
        right hand rule results:
        >>> s = e.as_sum(2, 'trapezoid')
        >>> s
        2*sin(5) + sin(3) + sin(7)
        >>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == s
        True
        Here, the discontinuity at x = 0 can be avoided by using the
        midpoint or right-hand method:
        >>> e = Integral(1/sqrt(x), (x, 0, 1))
        >>> e.as_sum(5).n(4)
        1.730
        >>> e.as_sum(10).n(4)
        1.809
        >>> e.doit().n(4)  # the actual value is 2
        2.000
        The left- or trapezoid method will encounter the discontinuity and
        return infinity:
        >>> e.as_sum(5, 'left')
        zoo
        The number of intervals can be symbolic. If omitted, a dummy symbol
        will be used for it.
        >>> e = Integral(x**2, (x, 0, 2))
        >>> e.as_sum(n, 'right').expand()
        8/3 + 4/n + 4/(3*n**2)
        This shows that the midpoint rule is more accurate, as its error
        term decays as the square of n:
        >>> e.as_sum(method='midpoint').expand()
        8/3 - 2/(3*_n**2)
        A symbolic sum is returned with evaluate=False:
        >>> e.as_sum(n, 'midpoint', evaluate=False)
        2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n
        See Also
        ========
        Integral.doit : Perform the integration using any hints
        References
        ==========
        .. [1] https://en.wikipedia.org/wiki/Riemann_sum#Riemann_summation_methods
        """
        from sympy.concrete.summations import Sum
        limits = self.limits
        if len(limits) > 1:
            raise NotImplementedError(
                "Multidimensional midpoint rule not implemented yet")
        else:
            limit = limits[0]
            if (len(limit) != 3 or limit[1].is_finite is False or
                limit[2].is_finite is False):
                raise ValueError("Expecting a definite integral over "
                                  "a finite interval.")
        if n is None:
            n = Dummy('n', integer=True, positive=True)
        else:
            n = sympify(n)
        if (n.is_positive is False or n.is_integer is False or
            n.is_finite is False):
            raise ValueError("n must be a positive integer, got %s" % n)
        x, a, b = limit
        dx = (b - a)/n
        k = Dummy('k', integer=True, positive=True)
        f = self.function
        if method == "left":
            result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n))
        elif method == "right":
            result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n))
        elif method == "midpoint":
            result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n))
        elif method == "trapezoid":
            result = dx*((f.subs(x, a) + f.subs(x, b))/2 +
                Sum(f.subs(x, a + k*dx), (k, 1, n - 1)))
        else:
            raise ValueError("Unknown method %s" % method)
        return result.doit() if evaluate else result
 
 | 
	Integral.as_sum 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	20 
 | 
	sympy/combinatorics/perm_groups.py 
 | 
	    def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):
        """Extend a sequence of points and generating set to a base and strong
        generating set.
        Parameters
        ==========
        base
            The sequence of points to be extended to a base. Optional
            parameter with default value ``[]``.
        gens
            The generating set to be extended to a strong generating set
            relative to the base obtained. Optional parameter with default
            value ``self.generators``.
        slp_dict
            If `True`, return a dictionary `{g: gens}` for each strong
            generator `g` where `gens` is a list of strong generators
            coming before `g` in `strong_gens`, such that the product
            of the elements of `gens` is equal to `g`.
        Returns
        =======
        (base, strong_gens)
            ``base`` is the base obtained, and ``strong_gens`` is the strong
            generating set relative to it. The original parameters ``base``,
            ``gens`` remain unchanged.
        Examples
        ========
        >>> from sympy.combinatorics.named_groups import AlternatingGroup
        >>> from sympy.combinatorics.testutil import _verify_bsgs
        >>> A = AlternatingGroup(7)
        >>> base = [2, 3]
        >>> seq = [2, 3]
        >>> base, strong_gens = A.schreier_sims_incremental(base=seq)
        >>> _verify_bsgs(A, base, strong_gens)
        True
        >>> base[:2]
        [2, 3]
        Notes
        =====
        This version of the Schreier-Sims algorithm runs in polynomial time.
        There are certain assumptions in the implementation - if the trivial
        group is provided, ``base`` and ``gens`` are returned immediately,
        as any sequence of points is a base for the trivial group. If the
        identity is present in the generators ``gens``, it is removed as
        it is a redundant generator.
        The implementation is described in [1], pp. 90-93.
        See Also
        ========
        schreier_sims, schreier_sims_random
        """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_PermutationGroup.schreier_sims_incremental.txt 
 | 
	    def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):
        """Extend a sequence of points and generating set to a base and strong
        generating set.
        Parameters
        ==========
        base
            The sequence of points to be extended to a base. Optional
            parameter with default value ``[]``.
        gens
            The generating set to be extended to a strong generating set
            relative to the base obtained. Optional parameter with default
            value ``self.generators``.
        slp_dict
            If `True`, return a dictionary `{g: gens}` for each strong
            generator `g` where `gens` is a list of strong generators
            coming before `g` in `strong_gens`, such that the product
            of the elements of `gens` is equal to `g`.
        Returns
        =======
        (base, strong_gens)
            ``base`` is the base obtained, and ``strong_gens`` is the strong
            generating set relative to it. The original parameters ``base``,
            ``gens`` remain unchanged.
        Examples
        ========
        >>> from sympy.combinatorics.named_groups import AlternatingGroup
        >>> from sympy.combinatorics.testutil import _verify_bsgs
        >>> A = AlternatingGroup(7)
        >>> base = [2, 3]
        >>> seq = [2, 3]
        >>> base, strong_gens = A.schreier_sims_incremental(base=seq)
        >>> _verify_bsgs(A, base, strong_gens)
        True
        >>> base[:2]
        [2, 3]
        Notes
        =====
        This version of the Schreier-Sims algorithm runs in polynomial time.
        There are certain assumptions in the implementation - if the trivial
        group is provided, ``base`` and ``gens`` are returned immediately,
        as any sequence of points is a base for the trivial group. If the
        identity is present in the generators ``gens``, it is removed as
        it is a redundant generator.
        The implementation is described in [1], pp. 90-93.
        See Also
        ========
        schreier_sims, schreier_sims_random
        """
        if base is None:
            base = []
        if gens is None:
            gens = self.generators[:]
        degree = self.degree
        id_af = list(range(degree))
        # handle the trivial group
        if len(gens) == 1 and gens[0].is_Identity:
            if slp_dict:
                return base, gens, {gens[0]: [gens[0]]}
            return base, gens
        # prevent side effects
        _base, _gens = base[:], gens[:]
        # remove the identity as a generator
        _gens = [x for x in _gens if not x.is_Identity]
        # make sure no generator fixes all base points
        for gen in _gens:
            if all(x == gen._array_form[x] for x in _base):
                for new in id_af:
                    if gen._array_form[new] != new:
                        break
                else:
                    assert None  # can this ever happen?
                _base.append(new)
        # distribute generators according to basic stabilizers
        strong_gens_distr = _distribute_gens_by_base(_base, _gens)
        strong_gens_slp = []
        # initialize the basic stabilizers, basic orbits and basic transversals
        orbs = {}
        transversals = {}
        slps = {}
        base_len = len(_base)
        for i in range(base_len):
            transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
                _base[i], pairs=True, af=True, slp=True)
            transversals[i] = dict(transversals[i])
            orbs[i] = list(transversals[i].keys())
        # main loop: amend the stabilizer chain until we have generators
        # for all stabilizers
        i = base_len - 1
        while i >= 0:
            # this flag is used to continue with the main loop from inside
            # a nested loop
            continue_i = False
            # test the generators for being a strong generating set
            db = {}
            for beta, u_beta in list(transversals[i].items()):
                for j, gen in enumerate(strong_gens_distr[i]):
                    gb = gen._array_form[beta]
                    u1 = transversals[i][gb]
                    g1 = _af_rmul(gen._array_form, u_beta)
                    slp = [(i, g) for g in slps[i][beta]]
                    slp = [(i, j)] + slp
                    if g1 != u1:
                        # test if the schreier generator is in the i+1-th
                        # would-be basic stabilizer
                        y = True
                        try:
                            u1_inv = db[gb]
                        except KeyError:
                            u1_inv = db[gb] = _af_invert(u1)
                        schreier_gen = _af_rmul(u1_inv, g1)
                        u1_inv_slp = slps[i][gb][:]
                        u1_inv_slp.reverse()
                        u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]
                        slp = u1_inv_slp + slp
                        h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)
                        if j <= base_len:
                            # new strong generator h at level j
                            y = False
                        elif h:
                            # h fixes all base points
                            y = False
                            moved = 0
                            while h[moved] == moved:
                                moved += 1
                            _base.append(moved)
                            base_len += 1
                            strong_gens_distr.append([])
                        if y is False:
                            # if a new strong generator is found, update the
                            # data structures and start over
                            h = _af_new(h)
                            strong_gens_slp.append((h, slp))
                            for l in range(i + 1, j):
                                strong_gens_distr[l].append(h)
                                transversals[l], slps[l] =\
                                _orbit_transversal(degree, strong_gens_distr[l],
                                    _base[l], pairs=True, af=True, slp=True)
                                transversals[l] = dict(transversals[l])
                                orbs[l] = list(transversals[l].keys())
                            i = j - 1
                            # continue main loop using the flag
                            continue_i = True
                    if continue_i is True:
                        break
                if continue_i is True:
                    break
            if continue_i is True:
                continue
            i -= 1
        strong_gens = _gens[:]
        if slp_dict:
            # create the list of the strong generators strong_gens and
            # rewrite the indices of strong_gens_slp in terms of the
            # elements of strong_gens
            for k, slp in strong_gens_slp:
                strong_gens.append(k)
                for i in range(len(slp)):
                    s = slp[i]
                    if isinstance(s[1], tuple):
                        slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1
                    else:
                        slp[i] = strong_gens_distr[s[0]][s[1]]
            strong_gens_slp = dict(strong_gens_slp)
            # add the original generators
            for g in _gens:
                strong_gens_slp[g] = [g]
            return (_base, strong_gens, strong_gens_slp)
        strong_gens.extend([k for k, _ in strong_gens_slp])
        return _base, strong_gens
 
 | 
	PermutationGroup.schreier_sims_incremental 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	30 
 | 
	sympy/calculus/finite_diff.py 
 | 
	def _as_finite_diff(derivative, points=1, x0=None, wrt=None):
    """
    Returns an approximation of a derivative of a function in
    the form of a finite difference formula. The expression is a
    weighted sum of the function at a number of discrete values of
    (one of) the independent variable(s).
    Parameters
    ==========
    derivative: a Derivative instance
    points: sequence or coefficient, optional
        If sequence: discrete values (length >= order+1) of the
        independent variable used for generating the finite
        difference weights.
        If it is a coefficient, it will be used as the step-size
        for generating an equidistant sequence of length order+1
        centered around ``x0``. default: 1 (step-size 1)
    x0: number or Symbol, optional
        the value of the independent variable (``wrt``) at which the
        derivative is to be approximated. Default: same as ``wrt``.
    wrt: Symbol, optional
        "with respect to" the variable for which the (partial)
        derivative is to be approximated for. If not provided it
        is required that the Derivative is ordinary. Default: ``None``.
    Examples
    ========
    >>> from sympy import symbols, Function, exp, sqrt, Symbol
    >>> from sympy.calculus.finite_diff import _as_finite_diff
    >>> x, h = symbols('x h')
    >>> f = Function('f')
    >>> _as_finite_diff(f(x).diff(x))
    -f(x - 1/2) + f(x + 1/2)
    The default step size and number of points are 1 and ``order + 1``
    respectively. We can change the step size by passing a symbol
    as a parameter:
    >>> _as_finite_diff(f(x).diff(x), h)
    -f(-h/2 + x)/h + f(h/2 + x)/h
    We can also specify the discretized values to be used in a sequence:
    >>> _as_finite_diff(f(x).diff(x), [x, x+h, x+2*h])
    -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
    The algorithm is not restricted to use equidistant spacing, nor
    do we need to make the approximation around ``x0``, but we can get
    an expression estimating the derivative at an offset:
    >>> e, sq2 = exp(1), sqrt(2)
    >>> xl = [x-h, x+h, x+e*h]
    >>> _as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2)
    2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/((-h + E*h)*(h + E*h)) +
    (-(-sqrt(2)*h + h)/(2*h) - (-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) +
    (-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h)
    Partial derivatives are also supported:
    >>> y = Symbol('y')
    >>> d2fdxdy=f(x,y).diff(x,y)
    >>> _as_finite_diff(d2fdxdy, wrt=x)
    -Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
    See also
    ========
    sympy.calculus.finite_diff.apply_finite_diff
    sympy.calculus.finite_diff.finite_diff_weights
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__as_finite_diff.txt 
 | 
	def _as_finite_diff(derivative, points=1, x0=None, wrt=None):
    """
    Returns an approximation of a derivative of a function in
    the form of a finite difference formula. The expression is a
    weighted sum of the function at a number of discrete values of
    (one of) the independent variable(s).
    Parameters
    ==========
    derivative: a Derivative instance
    points: sequence or coefficient, optional
        If sequence: discrete values (length >= order+1) of the
        independent variable used for generating the finite
        difference weights.
        If it is a coefficient, it will be used as the step-size
        for generating an equidistant sequence of length order+1
        centered around ``x0``. default: 1 (step-size 1)
    x0: number or Symbol, optional
        the value of the independent variable (``wrt``) at which the
        derivative is to be approximated. Default: same as ``wrt``.
    wrt: Symbol, optional
        "with respect to" the variable for which the (partial)
        derivative is to be approximated for. If not provided it
        is required that the Derivative is ordinary. Default: ``None``.
    Examples
    ========
    >>> from sympy import symbols, Function, exp, sqrt, Symbol
    >>> from sympy.calculus.finite_diff import _as_finite_diff
    >>> x, h = symbols('x h')
    >>> f = Function('f')
    >>> _as_finite_diff(f(x).diff(x))
    -f(x - 1/2) + f(x + 1/2)
    The default step size and number of points are 1 and ``order + 1``
    respectively. We can change the step size by passing a symbol
    as a parameter:
    >>> _as_finite_diff(f(x).diff(x), h)
    -f(-h/2 + x)/h + f(h/2 + x)/h
    We can also specify the discretized values to be used in a sequence:
    >>> _as_finite_diff(f(x).diff(x), [x, x+h, x+2*h])
    -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
    The algorithm is not restricted to use equidistant spacing, nor
    do we need to make the approximation around ``x0``, but we can get
    an expression estimating the derivative at an offset:
    >>> e, sq2 = exp(1), sqrt(2)
    >>> xl = [x-h, x+h, x+e*h]
    >>> _as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2)
    2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/((-h + E*h)*(h + E*h)) +
    (-(-sqrt(2)*h + h)/(2*h) - (-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) +
    (-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h)
    Partial derivatives are also supported:
    >>> y = Symbol('y')
    >>> d2fdxdy=f(x,y).diff(x,y)
    >>> _as_finite_diff(d2fdxdy, wrt=x)
    -Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
    See also
    ========
    sympy.calculus.finite_diff.apply_finite_diff
    sympy.calculus.finite_diff.finite_diff_weights
    """
    if derivative.is_Derivative:
        pass
    elif derivative.is_Atom:
        return derivative
    else:
        return derivative.fromiter(
            [_as_finite_diff(ar, points, x0, wrt) for ar
             in derivative.args], **derivative.assumptions0)
    if wrt is None:
        old = None
        for v in derivative.variables:
            if old is v:
                continue
            derivative = _as_finite_diff(derivative, points, x0, v)
            old = v
        return derivative
    order = derivative.variables.count(wrt)
    if x0 is None:
        x0 = wrt
    if not iterable(points):
        if getattr(points, 'is_Function', False) and wrt in points.args:
            points = points.subs(wrt, x0)
        # points is simply the step-size, let's make it a
        # equidistant sequence centered around x0
        if order % 2 == 0:
            # even order => odd number of points, grid point included
            points = [x0 + points*i for i
                      in range(-order//2, order//2 + 1)]
        else:
            # odd order => even number of points, half-way wrt grid point
            points = [x0 + points*S(i)/2 for i
                      in range(-order, order + 1, 2)]
    others = [wrt, 0]
    for v in set(derivative.variables):
        if v == wrt:
            continue
        others += [v, derivative.variables.count(v)]
    if len(points) < order+1:
        raise ValueError("Too few points for order %d" % order)
    return apply_finite_diff(order, points, [
        Derivative(derivative.expr.subs({wrt: x}), *others) for
        x in points], x0)
 
 | 
	_as_finite_diff 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	31 
 | 
	sympy/core/exprtools.py 
 | 
	def _mask_nc(eq, name=None):
    """
    Return ``eq`` with non-commutative objects replaced with Dummy
    symbols. A dictionary that can be used to restore the original
    values is returned: if it is None, the expression is noncommutative
    and cannot be made commutative. The third value returned is a list
    of any non-commutative symbols that appear in the returned equation.
    Explanation
    ===========
    All non-commutative objects other than Symbols are replaced with
    a non-commutative Symbol. Identical objects will be identified
    by identical symbols.
    If there is only 1 non-commutative object in an expression it will
    be replaced with a commutative symbol. Otherwise, the non-commutative
    entities are retained and the calling routine should handle
    replacements in this case since some care must be taken to keep
    track of the ordering of symbols when they occur within Muls.
    Parameters
    ==========
    name : str
        ``name``, if given, is the name that will be used with numbered Dummy
        variables that will replace the non-commutative objects and is mainly
        used for doctesting purposes.
    Examples
    ========
    >>> from sympy.physics.secondquant import Commutator, NO, F, Fd
    >>> from sympy import symbols
    >>> from sympy.core.exprtools import _mask_nc
    >>> from sympy.abc import x, y
    >>> A, B, C = symbols('A,B,C', commutative=False)
    One nc-symbol:
    >>> _mask_nc(A**2 - x**2, 'd')
    (_d0**2 - x**2, {_d0: A}, [])
    Multiple nc-symbols:
    >>> _mask_nc(A**2 - B**2, 'd')
    (A**2 - B**2, {}, [A, B])
    An nc-object with nc-symbols but no others outside of it:
    >>> _mask_nc(1 + x*Commutator(A, B), 'd')
    (_d0*x + 1, {_d0: Commutator(A, B)}, [])
    >>> _mask_nc(NO(Fd(x)*F(y)), 'd')
    (_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
    Multiple nc-objects:
    >>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
    >>> _mask_nc(eq, 'd')
    (x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
    Multiple nc-objects and nc-symbols:
    >>> eq = A*Commutator(A, B) + B*Commutator(A, C)
    >>> _mask_nc(eq, 'd')
    (A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__mask_nc.txt 
 | 
	def _mask_nc(eq, name=None):
    """
    Return ``eq`` with non-commutative objects replaced with Dummy
    symbols. A dictionary that can be used to restore the original
    values is returned: if it is None, the expression is noncommutative
    and cannot be made commutative. The third value returned is a list
    of any non-commutative symbols that appear in the returned equation.
    Explanation
    ===========
    All non-commutative objects other than Symbols are replaced with
    a non-commutative Symbol. Identical objects will be identified
    by identical symbols.
    If there is only 1 non-commutative object in an expression it will
    be replaced with a commutative symbol. Otherwise, the non-commutative
    entities are retained and the calling routine should handle
    replacements in this case since some care must be taken to keep
    track of the ordering of symbols when they occur within Muls.
    Parameters
    ==========
    name : str
        ``name``, if given, is the name that will be used with numbered Dummy
        variables that will replace the non-commutative objects and is mainly
        used for doctesting purposes.
    Examples
    ========
    >>> from sympy.physics.secondquant import Commutator, NO, F, Fd
    >>> from sympy import symbols
    >>> from sympy.core.exprtools import _mask_nc
    >>> from sympy.abc import x, y
    >>> A, B, C = symbols('A,B,C', commutative=False)
    One nc-symbol:
    >>> _mask_nc(A**2 - x**2, 'd')
    (_d0**2 - x**2, {_d0: A}, [])
    Multiple nc-symbols:
    >>> _mask_nc(A**2 - B**2, 'd')
    (A**2 - B**2, {}, [A, B])
    An nc-object with nc-symbols but no others outside of it:
    >>> _mask_nc(1 + x*Commutator(A, B), 'd')
    (_d0*x + 1, {_d0: Commutator(A, B)}, [])
    >>> _mask_nc(NO(Fd(x)*F(y)), 'd')
    (_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
    Multiple nc-objects:
    >>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
    >>> _mask_nc(eq, 'd')
    (x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
    Multiple nc-objects and nc-symbols:
    >>> eq = A*Commutator(A, B) + B*Commutator(A, C)
    >>> _mask_nc(eq, 'd')
    (A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
    """
    name = name or 'mask'
    # Make Dummy() append sequential numbers to the name
    def numbered_names():
        i = 0
        while True:
            yield name + str(i)
            i += 1
    names = numbered_names()
    def Dummy(*args, **kwargs):
        from .symbol import Dummy
        return Dummy(next(names), *args, **kwargs)
    expr = eq
    if expr.is_commutative:
        return eq, {}, []
    # identify nc-objects; symbols and other
    rep = []
    nc_obj = set()
    nc_syms = set()
    pot = preorder_traversal(expr, keys=default_sort_key)
    for i, a in enumerate(pot):
        if any(a == r[0] for r in rep):
            pot.skip()
        elif not a.is_commutative:
            if a.is_symbol:
                nc_syms.add(a)
                pot.skip()
            elif not (a.is_Add or a.is_Mul or a.is_Pow):
                nc_obj.add(a)
                pot.skip()
    # If there is only one nc symbol or object, it can be factored regularly
    # but polys is going to complain, so replace it with a Dummy.
    if len(nc_obj) == 1 and not nc_syms:
        rep.append((nc_obj.pop(), Dummy()))
    elif len(nc_syms) == 1 and not nc_obj:
        rep.append((nc_syms.pop(), Dummy()))
    # Any remaining nc-objects will be replaced with an nc-Dummy and
    # identified as an nc-Symbol to watch out for
    nc_obj = sorted(nc_obj, key=default_sort_key)
    for n in nc_obj:
        nc = Dummy(commutative=False)
        rep.append((n, nc))
        nc_syms.add(nc)
    expr = expr.subs(rep)
    nc_syms = list(nc_syms)
    nc_syms.sort(key=default_sort_key)
    return expr, {v: k for k, v in rep}, nc_syms
 
 | 
	_mask_nc 
 | 
	Repo-Level 
 | 
					
	sympy 
 | 
	33 
 | 
	sympy/solvers/simplex.py 
 | 
	def _simplex(A, B, C, D=None, dual=False):
    """Return ``(o, x, y)`` obtained from the two-phase simplex method
    using Bland's rule: ``o`` is the minimum value of primal,
    ``Cx - D``, under constraints ``Ax <= B`` (with ``x >= 0``) and
    the maximum of the dual, ``y^{T}B - D``, under constraints
    ``A^{T}*y >= C^{T}`` (with ``y >= 0``). To compute the dual of
    the system, pass `dual=True` and ``(o, y, x)`` will be returned.
    Note: the nonnegative constraints for ``x`` and ``y`` supercede
    any values of ``A`` and ``B`` that are inconsistent with that
    assumption, so if a constraint of ``x >= -1`` is represented
    in ``A`` and ``B``, no value will be obtained that is negative; if
    a constraint of ``x <= -1`` is represented, an error will be
    raised since no solution is possible.
    This routine relies on the ability of determining whether an
    expression is 0 or not. This is guaranteed if the input contains
    only Float or Rational entries. It will raise a TypeError if
    a relationship does not evaluate to True or False.
    Examples
    ========
    >>> from sympy.solvers.simplex import _simplex
    >>> from sympy import Matrix
    Consider the simple minimization of ``f = x + y + 1`` under the
    constraint that ``y + 2*x >= 4``. This is the "standard form" of
    a minimization.
    In the nonnegative quadrant, this inequality describes a area above
    a triangle with vertices at (0, 4), (0, 0) and (2, 0). The minimum
    of ``f`` occurs at (2, 0). Define A, B, C, D for the standard
    minimization:
    >>> A = Matrix([[2, 1]])
    >>> B = Matrix([4])
    >>> C = Matrix([[1, 1]])
    >>> D = Matrix([-1])
    Confirm that this is the system of interest:
    >>> from sympy.abc import x, y
    >>> X = Matrix([x, y])
    >>> (C*X - D)[0]
    x + y + 1
    >>> [i >= j for i, j in zip(A*X, B)]
    [2*x + y >= 4]
    Since `_simplex` will do a minimization for constraints given as
    ``A*x <= B``, the signs of ``A`` and ``B`` must be negated since
    the currently correspond to a greater-than inequality:
    >>> _simplex(-A, -B, C, D)
    (3, [2, 0], [1/2])
    The dual of minimizing ``f`` is maximizing ``F = c*y - d`` for
    ``a*y <= b`` where ``a``, ``b``, ``c``, ``d`` are derived from the
    transpose of the matrix representation of the standard minimization:
    >>> tr = lambda a, b, c, d: [i.T for i in (a, c, b, d)]
    >>> a, b, c, d = tr(A, B, C, D)
    This time ``a*x <= b`` is the expected inequality for the `_simplex`
    method, but to maximize ``F``, the sign of ``c`` and ``d`` must be
    changed (so that minimizing the negative will give the negative of
    the maximum of ``F``):
    >>> _simplex(a, b, -c, -d)
    (-3, [1/2], [2, 0])
    The negative of ``F`` and the min of ``f`` are the same. The dual
    point `[1/2]` is the value of ``y`` that minimized ``F = c*y - d``
    under constraints a*x <= b``:
    >>> y = Matrix(['y'])
    >>> (c*y - d)[0]
    4*y + 1
    >>> [i <= j for i, j in zip(a*y,b)]
    [2*y <= 1, y <= 1]
    In this 1-dimensional dual system, the more restrictive contraint is
    the first which limits ``y`` between 0 and 1/2 and the maximum of
    ``F`` is attained at the nonzero value, hence is ``4*(1/2) + 1 = 3``.
    In this case the values for ``x`` and ``y`` were the same when the
    dual representation was solved. This is not always the case (though
    the value of the function will be the same).
    >>> l = [[1, 1], [-1, 1], [0, 1], [-1, 0]], [5, 1, 2, -1], [[1, 1]], [-1]
    >>> A, B, C, D = [Matrix(i) for i in l]
    >>> _simplex(A, B, -C, -D)
    (-6, [3, 2], [1, 0, 0, 0])
    >>> _simplex(A, B, -C, -D, dual=True)  # [5, 0] != [3, 2]
    (-6, [1, 0, 0, 0], [5, 0])
    In both cases the function has the same value:
    >>> Matrix(C)*Matrix([3, 2]) == Matrix(C)*Matrix([5, 0])
    True
    See Also
    ========
    _lp - poses min/max problem in form compatible with _simplex
    lpmin - minimization which calls _lp
    lpmax - maximimzation which calls _lp
    References
    ==========
    .. [1] Thomas S. Ferguson, LINEAR PROGRAMMING: A Concise Introduction
           web.tecnico.ulisboa.pt/mcasquilho/acad/or/ftp/FergusonUCLA_lp.pdf
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__simplex.txt 
 | 
	def _simplex(A, B, C, D=None, dual=False):
    """Return ``(o, x, y)`` obtained from the two-phase simplex method
    using Bland's rule: ``o`` is the minimum value of primal,
    ``Cx - D``, under constraints ``Ax <= B`` (with ``x >= 0``) and
    the maximum of the dual, ``y^{T}B - D``, under constraints
    ``A^{T}*y >= C^{T}`` (with ``y >= 0``). To compute the dual of
    the system, pass `dual=True` and ``(o, y, x)`` will be returned.
    Note: the nonnegative constraints for ``x`` and ``y`` supercede
    any values of ``A`` and ``B`` that are inconsistent with that
    assumption, so if a constraint of ``x >= -1`` is represented
    in ``A`` and ``B``, no value will be obtained that is negative; if
    a constraint of ``x <= -1`` is represented, an error will be
    raised since no solution is possible.
    This routine relies on the ability of determining whether an
    expression is 0 or not. This is guaranteed if the input contains
    only Float or Rational entries. It will raise a TypeError if
    a relationship does not evaluate to True or False.
    Examples
    ========
    >>> from sympy.solvers.simplex import _simplex
    >>> from sympy import Matrix
    Consider the simple minimization of ``f = x + y + 1`` under the
    constraint that ``y + 2*x >= 4``. This is the "standard form" of
    a minimization.
    In the nonnegative quadrant, this inequality describes a area above
    a triangle with vertices at (0, 4), (0, 0) and (2, 0). The minimum
    of ``f`` occurs at (2, 0). Define A, B, C, D for the standard
    minimization:
    >>> A = Matrix([[2, 1]])
    >>> B = Matrix([4])
    >>> C = Matrix([[1, 1]])
    >>> D = Matrix([-1])
    Confirm that this is the system of interest:
    >>> from sympy.abc import x, y
    >>> X = Matrix([x, y])
    >>> (C*X - D)[0]
    x + y + 1
    >>> [i >= j for i, j in zip(A*X, B)]
    [2*x + y >= 4]
    Since `_simplex` will do a minimization for constraints given as
    ``A*x <= B``, the signs of ``A`` and ``B`` must be negated since
    the currently correspond to a greater-than inequality:
    >>> _simplex(-A, -B, C, D)
    (3, [2, 0], [1/2])
    The dual of minimizing ``f`` is maximizing ``F = c*y - d`` for
    ``a*y <= b`` where ``a``, ``b``, ``c``, ``d`` are derived from the
    transpose of the matrix representation of the standard minimization:
    >>> tr = lambda a, b, c, d: [i.T for i in (a, c, b, d)]
    >>> a, b, c, d = tr(A, B, C, D)
    This time ``a*x <= b`` is the expected inequality for the `_simplex`
    method, but to maximize ``F``, the sign of ``c`` and ``d`` must be
    changed (so that minimizing the negative will give the negative of
    the maximum of ``F``):
    >>> _simplex(a, b, -c, -d)
    (-3, [1/2], [2, 0])
    The negative of ``F`` and the min of ``f`` are the same. The dual
    point `[1/2]` is the value of ``y`` that minimized ``F = c*y - d``
    under constraints a*x <= b``:
    >>> y = Matrix(['y'])
    >>> (c*y - d)[0]
    4*y + 1
    >>> [i <= j for i, j in zip(a*y,b)]
    [2*y <= 1, y <= 1]
    In this 1-dimensional dual system, the more restrictive contraint is
    the first which limits ``y`` between 0 and 1/2 and the maximum of
    ``F`` is attained at the nonzero value, hence is ``4*(1/2) + 1 = 3``.
    In this case the values for ``x`` and ``y`` were the same when the
    dual representation was solved. This is not always the case (though
    the value of the function will be the same).
    >>> l = [[1, 1], [-1, 1], [0, 1], [-1, 0]], [5, 1, 2, -1], [[1, 1]], [-1]
    >>> A, B, C, D = [Matrix(i) for i in l]
    >>> _simplex(A, B, -C, -D)
    (-6, [3, 2], [1, 0, 0, 0])
    >>> _simplex(A, B, -C, -D, dual=True)  # [5, 0] != [3, 2]
    (-6, [1, 0, 0, 0], [5, 0])
    In both cases the function has the same value:
    >>> Matrix(C)*Matrix([3, 2]) == Matrix(C)*Matrix([5, 0])
    True
    See Also
    ========
    _lp - poses min/max problem in form compatible with _simplex
    lpmin - minimization which calls _lp
    lpmax - maximimzation which calls _lp
    References
    ==========
    .. [1] Thomas S. Ferguson, LINEAR PROGRAMMING: A Concise Introduction
           web.tecnico.ulisboa.pt/mcasquilho/acad/or/ftp/FergusonUCLA_lp.pdf
    """
    A, B, C, D = [Matrix(i) for i in (A, B, C, D or [0])]
    if dual:
        _o, d, p = _simplex(-A.T, C.T, B.T, -D)
        return -_o, d, p
    if A and B:
        M = Matrix([[A, B], [C, D]])
    else:
        if A or B:
            raise ValueError("must give A and B")
        # no constraints given
        M = Matrix([[C, D]])
    n = M.cols - 1
    m = M.rows - 1
    if not all(i.is_Float or i.is_Rational for i in M):
        # with literal Float and Rational we are guaranteed the
        # ability of determining whether an expression is 0 or not
        raise TypeError(filldedent("""
            Only rationals and floats are allowed.
            """
            )
        )
    # x variables have priority over y variables during Bland's rule
    # since False < True
    X = [(False, j) for j in range(n)]
    Y = [(True, i) for i in range(m)]
    # Phase 1: find a feasible solution or determine none exist
    ## keep track of last pivot row and column
    last = None
    while True:
        B = M[:-1, -1]
        A = M[:-1, :-1]
        if all(B[i] >= 0 for i in range(B.rows)):
            # We have found a feasible solution
            break
        # Find k: first row with a negative rightmost entry
        for k in range(B.rows):
            if B[k] < 0:
                break  # use current value of k below
        else:
            pass  # error will raise below
        # Choose pivot column, c
        piv_cols = [_ for _ in range(A.cols) if A[k, _] < 0]
        if not piv_cols:
            raise InfeasibleLPError(filldedent("""
                The constraint set is empty!"""))
        _, c = min((X[i], i) for i in piv_cols) # Bland's rule
        # Choose pivot row, r
        piv_rows = [_ for _ in range(A.rows) if A[_, c] > 0 and B[_] > 0]
        piv_rows.append(k)
        r = _choose_pivot_row(A, B, piv_rows, c, Y)
        # check for oscillation
        if (r, c) == last:
            # Not sure what to do here; it looks like there will be
            # oscillations; see o1 test added at this commit to
            # see a system with no solution and the o2 for one
            # with a solution. In the case of o2, the solution
            # from linprog is the same as the one from lpmin, but
            # the matrices created in the lpmin case are different
            # than those created without replacements in linprog and
            # the matrices in the linprog case lead to oscillations.
            # If the matrices could be re-written in linprog like
            # lpmin does, this behavior could be avoided and then
            # perhaps the oscillating case would only occur when
            # there is no solution. For now, the output is checked
            # before exit if oscillations were detected and an
            # error is raised there if the solution was invalid.
            #
            # cf section 6 of Ferguson for a non-cycling modification
            last = True
            break
        last = r, c
        M = _pivot(M, r, c)
        X[c], Y[r] = Y[r], X[c]
    # Phase 2: from a feasible solution, pivot to optimal
    while True:
        B = M[:-1, -1]
        A = M[:-1, :-1]
        C = M[-1, :-1]
        # Choose a pivot column, c
        piv_cols = [_ for _ in range(n) if C[_] < 0]
        if not piv_cols:
            break
        _, c = min((X[i], i) for i in piv_cols)  # Bland's rule
        # Choose a pivot row, r
        piv_rows = [_ for _ in range(m) if A[_, c] > 0]
        if not piv_rows:
            raise UnboundedLPError(filldedent("""
                Objective function can assume
                arbitrarily large values!"""))
        r = _choose_pivot_row(A, B, piv_rows, c, Y)
        M = _pivot(M, r, c)
        X[c], Y[r] = Y[r], X[c]
    argmax = [None] * n
    argmin_dual = [None] * m
    for i, (v, n) in enumerate(X):
        if v == False:
            argmax[n] = 0
        else:
            argmin_dual[n] = M[-1, i]
    for i, (v, n) in enumerate(Y):
        if v == True:
            argmin_dual[n] = 0
        else:
            argmax[n] = M[i, -1]
    if last and not all(i >= 0 for i in argmax + argmin_dual):
        raise InfeasibleLPError(filldedent("""
            Oscillating system led to invalid solution.
            If you believe there was a valid solution, please
            report this as a bug."""))
    return -M[-1, -1], argmax, argmin_dual
 
 | 
	_simplex 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	34 
 | 
	sympy/solvers/inequalities.py 
 | 
	def _solve_inequality(ie, s, linear=False):
    """Return the inequality with s isolated on the left, if possible.
    If the relationship is non-linear, a solution involving And or Or
    may be returned. False or True are returned if the relationship
    is never True or always True, respectively.
    If `linear` is True (default is False) an `s`-dependent expression
    will be isolated on the left, if possible
    but it will not be solved for `s` unless the expression is linear
    in `s`. Furthermore, only "safe" operations which do not change the
    sense of the relationship are applied: no division by an unsigned
    value is attempted unless the relationship involves Eq or Ne and
    no division by a value not known to be nonzero is ever attempted.
    Examples
    ========
    >>> from sympy import Eq, Symbol
    >>> from sympy.solvers.inequalities import _solve_inequality as f
    >>> from sympy.abc import x, y
    For linear expressions, the symbol can be isolated:
    >>> f(x - 2 < 0, x)
    x < 2
    >>> f(-x - 6 < x, x)
    x > -3
    Sometimes nonlinear relationships will be False
    >>> f(x**2 + 4 < 0, x)
    False
    Or they may involve more than one region of values:
    >>> f(x**2 - 4 < 0, x)
    (-2 < x) & (x < 2)
    To restrict the solution to a relational, set linear=True
    and only the x-dependent portion will be isolated on the left:
    >>> f(x**2 - 4 < 0, x, linear=True)
    x**2 < 4
    Division of only nonzero quantities is allowed, so x cannot
    be isolated by dividing by y:
    >>> y.is_nonzero is None  # it is unknown whether it is 0 or not
    True
    >>> f(x*y < 1, x)
    x*y < 1
    And while an equality (or inequality) still holds after dividing by a
    non-zero quantity
    >>> nz = Symbol('nz', nonzero=True)
    >>> f(Eq(x*nz, 1), x)
    Eq(x, 1/nz)
    the sign must be known for other inequalities involving > or <:
    >>> f(x*nz <= 1, x)
    nz*x <= 1
    >>> p = Symbol('p', positive=True)
    >>> f(x*p <= 1, x)
    x <= 1/p
    When there are denominators in the original expression that
    are removed by expansion, conditions for them will be returned
    as part of the result:
    >>> f(x < x*(2/x - 1), x)
    (x < 1) & Ne(x, 0)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests__solve_inequality.txt 
 | 
	def _solve_inequality(ie, s, linear=False):
    """Return the inequality with s isolated on the left, if possible.
    If the relationship is non-linear, a solution involving And or Or
    may be returned. False or True are returned if the relationship
    is never True or always True, respectively.
    If `linear` is True (default is False) an `s`-dependent expression
    will be isolated on the left, if possible
    but it will not be solved for `s` unless the expression is linear
    in `s`. Furthermore, only "safe" operations which do not change the
    sense of the relationship are applied: no division by an unsigned
    value is attempted unless the relationship involves Eq or Ne and
    no division by a value not known to be nonzero is ever attempted.
    Examples
    ========
    >>> from sympy import Eq, Symbol
    >>> from sympy.solvers.inequalities import _solve_inequality as f
    >>> from sympy.abc import x, y
    For linear expressions, the symbol can be isolated:
    >>> f(x - 2 < 0, x)
    x < 2
    >>> f(-x - 6 < x, x)
    x > -3
    Sometimes nonlinear relationships will be False
    >>> f(x**2 + 4 < 0, x)
    False
    Or they may involve more than one region of values:
    >>> f(x**2 - 4 < 0, x)
    (-2 < x) & (x < 2)
    To restrict the solution to a relational, set linear=True
    and only the x-dependent portion will be isolated on the left:
    >>> f(x**2 - 4 < 0, x, linear=True)
    x**2 < 4
    Division of only nonzero quantities is allowed, so x cannot
    be isolated by dividing by y:
    >>> y.is_nonzero is None  # it is unknown whether it is 0 or not
    True
    >>> f(x*y < 1, x)
    x*y < 1
    And while an equality (or inequality) still holds after dividing by a
    non-zero quantity
    >>> nz = Symbol('nz', nonzero=True)
    >>> f(Eq(x*nz, 1), x)
    Eq(x, 1/nz)
    the sign must be known for other inequalities involving > or <:
    >>> f(x*nz <= 1, x)
    nz*x <= 1
    >>> p = Symbol('p', positive=True)
    >>> f(x*p <= 1, x)
    x <= 1/p
    When there are denominators in the original expression that
    are removed by expansion, conditions for them will be returned
    as part of the result:
    >>> f(x < x*(2/x - 1), x)
    (x < 1) & Ne(x, 0)
    """
    from sympy.solvers.solvers import denoms
    if s not in ie.free_symbols:
        return ie
    if ie.rhs == s:
        ie = ie.reversed
    if ie.lhs == s and s not in ie.rhs.free_symbols:
        return ie
    def classify(ie, s, i):
        # return True or False if ie evaluates when substituting s with
        # i else None (if unevaluated) or NaN (when there is an error
        # in evaluating)
        try:
            v = ie.subs(s, i)
            if v is S.NaN:
                return v
            elif v not in (True, False):
                return
            return v
        except TypeError:
            return S.NaN
    rv = None
    oo = S.Infinity
    expr = ie.lhs - ie.rhs
    try:
        p = Poly(expr, s)
        if p.degree() == 0:
            rv = ie.func(p.as_expr(), 0)
        elif not linear and p.degree() > 1:
            # handle in except clause
            raise NotImplementedError
    except (PolynomialError, NotImplementedError):
        if not linear:
            try:
                rv = reduce_rational_inequalities([[ie]], s)
            except PolynomialError:
                rv = solve_univariate_inequality(ie, s)
            # remove restrictions wrt +/-oo that may have been
            # applied when using sets to simplify the relationship
            okoo = classify(ie, s, oo)
            if okoo is S.true and classify(rv, s, oo) is S.false:
                rv = rv.subs(s < oo, True)
            oknoo = classify(ie, s, -oo)
            if (oknoo is S.true and
                    classify(rv, s, -oo) is S.false):
                rv = rv.subs(-oo < s, True)
                rv = rv.subs(s > -oo, True)
            if rv is S.true:
                rv = (s <= oo) if okoo is S.true else (s < oo)
                if oknoo is not S.true:
                    rv = And(-oo < s, rv)
        else:
            p = Poly(expr)
    conds = []
    if rv is None:
        e = p.as_expr()  # this is in expanded form
        # Do a safe inversion of e, moving non-s terms
        # to the rhs and dividing by a nonzero factor if
        # the relational is Eq/Ne; for other relationals
        # the sign must also be positive or negative
        rhs = 0
        b, ax = e.as_independent(s, as_Add=True)
        e -= b
        rhs -= b
        ef = factor_terms(e)
        a, e = ef.as_independent(s, as_Add=False)
        if (a.is_zero != False or  # don't divide by potential 0
                a.is_negative ==
                a.is_positive is None and  # if sign is not known then
                ie.rel_op not in ('!=', '==')): # reject if not Eq/Ne
            e = ef
            a = S.One
        rhs /= a
        if a.is_positive:
            rv = ie.func(e, rhs)
        else:
            rv = ie.reversed.func(e, rhs)
        # return conditions under which the value is
        # valid, too.
        beginning_denoms = denoms(ie.lhs) | denoms(ie.rhs)
        current_denoms = denoms(rv)
        for d in beginning_denoms - current_denoms:
            c = _solve_inequality(Eq(d, 0), s, linear=linear)
            if isinstance(c, Eq) and c.lhs == s:
                if classify(rv, s, c.rhs) is S.true:
                    # rv is permitting this value but it shouldn't
                    conds.append(~c)
        for i in (-oo, oo):
            if (classify(rv, s, i) is S.true and
                    classify(ie, s, i) is not S.true):
                conds.append(s < i if i is oo else i < s)
    conds.append(rv)
    return And(*conds)
 
 | 
	_solve_inequality 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	37 
 | 
	sympy/assumptions/ask.py 
 | 
	def ask(proposition, assumptions=True, context=global_assumptions):
    """
    Function to evaluate the proposition with assumptions.
    Explanation
    ===========
    This function evaluates the proposition to ``True`` or ``False`` if
    the truth value can be determined. If not, it returns ``None``.
    It should be discerned from :func:`~.refine` which, when applied to a
    proposition, simplifies the argument to symbolic ``Boolean`` instead of
    Python built-in ``True``, ``False`` or ``None``.
    **Syntax**
        * ask(proposition)
            Evaluate the *proposition* in global assumption context.
        * ask(proposition, assumptions)
            Evaluate the *proposition* with respect to *assumptions* in
            global assumption context.
    Parameters
    ==========
    proposition : Boolean
        Proposition which will be evaluated to boolean value. If this is
        not ``AppliedPredicate``, it will be wrapped by ``Q.is_true``.
    assumptions : Boolean, optional
        Local assumptions to evaluate the *proposition*.
    context : AssumptionsContext, optional
        Default assumptions to evaluate the *proposition*. By default,
        this is ``sympy.assumptions.global_assumptions`` variable.
    Returns
    =======
    ``True``, ``False``, or ``None``
    Raises
    ======
    TypeError : *proposition* or *assumptions* is not valid logical expression.
    ValueError : assumptions are inconsistent.
    Examples
    ========
    >>> from sympy import ask, Q, pi
    >>> from sympy.abc import x, y
    >>> ask(Q.rational(pi))
    False
    >>> ask(Q.even(x*y), Q.even(x) & Q.integer(y))
    True
    >>> ask(Q.prime(4*x), Q.integer(x))
    False
    If the truth value cannot be determined, ``None`` will be returned.
    >>> print(ask(Q.odd(3*x))) # cannot determine unless we know x
    None
    ``ValueError`` is raised if assumptions are inconsistent.
    >>> ask(Q.integer(x), Q.even(x) & Q.odd(x))
    Traceback (most recent call last):
      ...
    ValueError: inconsistent assumptions Q.even(x) & Q.odd(x)
    Notes
    =====
    Relations in assumptions are not implemented (yet), so the following
    will not give a meaningful result.
    >>> ask(Q.positive(x), x > 0)
    It is however a work in progress.
    See Also
    ========
    sympy.assumptions.refine.refine : Simplification using assumptions.
        Proposition is not reduced to ``None`` if the truth value cannot
        be determined.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_ask.txt 
 | 
	def ask(proposition, assumptions=True, context=global_assumptions):
    """
    Function to evaluate the proposition with assumptions.
    Explanation
    ===========
    This function evaluates the proposition to ``True`` or ``False`` if
    the truth value can be determined. If not, it returns ``None``.
    It should be discerned from :func:`~.refine` which, when applied to a
    proposition, simplifies the argument to symbolic ``Boolean`` instead of
    Python built-in ``True``, ``False`` or ``None``.
    **Syntax**
        * ask(proposition)
            Evaluate the *proposition* in global assumption context.
        * ask(proposition, assumptions)
            Evaluate the *proposition* with respect to *assumptions* in
            global assumption context.
    Parameters
    ==========
    proposition : Boolean
        Proposition which will be evaluated to boolean value. If this is
        not ``AppliedPredicate``, it will be wrapped by ``Q.is_true``.
    assumptions : Boolean, optional
        Local assumptions to evaluate the *proposition*.
    context : AssumptionsContext, optional
        Default assumptions to evaluate the *proposition*. By default,
        this is ``sympy.assumptions.global_assumptions`` variable.
    Returns
    =======
    ``True``, ``False``, or ``None``
    Raises
    ======
    TypeError : *proposition* or *assumptions* is not valid logical expression.
    ValueError : assumptions are inconsistent.
    Examples
    ========
    >>> from sympy import ask, Q, pi
    >>> from sympy.abc import x, y
    >>> ask(Q.rational(pi))
    False
    >>> ask(Q.even(x*y), Q.even(x) & Q.integer(y))
    True
    >>> ask(Q.prime(4*x), Q.integer(x))
    False
    If the truth value cannot be determined, ``None`` will be returned.
    >>> print(ask(Q.odd(3*x))) # cannot determine unless we know x
    None
    ``ValueError`` is raised if assumptions are inconsistent.
    >>> ask(Q.integer(x), Q.even(x) & Q.odd(x))
    Traceback (most recent call last):
      ...
    ValueError: inconsistent assumptions Q.even(x) & Q.odd(x)
    Notes
    =====
    Relations in assumptions are not implemented (yet), so the following
    will not give a meaningful result.
    >>> ask(Q.positive(x), x > 0)
    It is however a work in progress.
    See Also
    ========
    sympy.assumptions.refine.refine : Simplification using assumptions.
        Proposition is not reduced to ``None`` if the truth value cannot
        be determined.
    """
    from sympy.assumptions.satask import satask
    from sympy.assumptions.lra_satask import lra_satask
    from sympy.logic.algorithms.lra_theory import UnhandledInput
    proposition = sympify(proposition)
    assumptions = sympify(assumptions)
    if isinstance(proposition, Predicate) or proposition.kind is not BooleanKind:
        raise TypeError("proposition must be a valid logical expression")
    if isinstance(assumptions, Predicate) or assumptions.kind is not BooleanKind:
        raise TypeError("assumptions must be a valid logical expression")
    binrelpreds = {Eq: Q.eq, Ne: Q.ne, Gt: Q.gt, Lt: Q.lt, Ge: Q.ge, Le: Q.le}
    if isinstance(proposition, AppliedPredicate):
        key, args = proposition.function, proposition.arguments
    elif proposition.func in binrelpreds:
        key, args = binrelpreds[type(proposition)], proposition.args
    else:
        key, args = Q.is_true, (proposition,)
    # convert local and global assumptions to CNF
    assump_cnf = CNF.from_prop(assumptions)
    assump_cnf.extend(context)
    # extract the relevant facts from assumptions with respect to args
    local_facts = _extract_all_facts(assump_cnf, args)
    # convert default facts and assumed facts to encoded CNF
    known_facts_cnf = get_all_known_facts()
    enc_cnf = EncodedCNF()
    enc_cnf.from_cnf(CNF(known_facts_cnf))
    enc_cnf.add_from_cnf(local_facts)
    # check the satisfiability of given assumptions
    if local_facts.clauses and satisfiable(enc_cnf) is False:
        raise ValueError("inconsistent assumptions %s" % assumptions)
    # quick computation for single fact
    res = _ask_single_fact(key, local_facts)
    if res is not None:
        return res
    # direct resolution method, no logic
    res = key(*args)._eval_ask(assumptions)
    if res is not None:
        return bool(res)
    # using satask (still costly)
    res = satask(proposition, assumptions=assumptions, context=context)
    if res is not None:
        return res
    try:
        res = lra_satask(proposition, assumptions=assumptions, context=context)
    except UnhandledInput:
        return None
    return res
 
 | 
	ask 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	38 
 | 
	sympy/matrices/sparsetools.py 
 | 
	def banded(*args, **kwargs):
    """Returns a SparseMatrix from the given dictionary describing
    the diagonals of the matrix. The keys are positive for upper
    diagonals and negative for those below the main diagonal. The
    values may be:
    * expressions or single-argument functions,
    * lists or tuples of values,
    * matrices
    Unless dimensions are given, the size of the returned matrix will
    be large enough to contain the largest non-zero value provided.
    kwargs
    ======
    rows : rows of the resulting matrix; computed if
           not given.
    cols : columns of the resulting matrix; computed if
           not given.
    Examples
    ========
    >>> from sympy import banded, ones, Matrix
    >>> from sympy.abc import x
    If explicit values are given in tuples,
    the matrix will autosize to contain all values, otherwise
    a single value is filled onto the entire diagonal:
    >>> banded({1: (1, 2, 3), -1: (4, 5, 6), 0: x})
    Matrix([
    [x, 1, 0, 0],
    [4, x, 2, 0],
    [0, 5, x, 3],
    [0, 0, 6, x]])
    A function accepting a single argument can be used to fill the
    diagonal as a function of diagonal index (which starts at 0).
    The size (or shape) of the matrix must be given to obtain more
    than a 1x1 matrix:
    >>> s = lambda d: (1 + d)**2
    >>> banded(5, {0: s, 2: s, -2: 2})
    Matrix([
    [1, 0, 1,  0,  0],
    [0, 4, 0,  4,  0],
    [2, 0, 9,  0,  9],
    [0, 2, 0, 16,  0],
    [0, 0, 2,  0, 25]])
    The diagonal of matrices placed on a diagonal will coincide
    with the indicated diagonal:
    >>> vert = Matrix([1, 2, 3])
    >>> banded({0: vert}, cols=3)
    Matrix([
    [1, 0, 0],
    [2, 1, 0],
    [3, 2, 1],
    [0, 3, 2],
    [0, 0, 3]])
    >>> banded(4, {0: ones(2)})
    Matrix([
    [1, 1, 0, 0],
    [1, 1, 0, 0],
    [0, 0, 1, 1],
    [0, 0, 1, 1]])
    Errors are raised if the designated size will not hold
    all values an integral number of times. Here, the rows
    are designated as odd (but an even number is required to
    hold the off-diagonal 2x2 ones):
    >>> banded({0: 2, 1: ones(2)}, rows=5)
    Traceback (most recent call last):
    ...
    ValueError:
    sequence does not fit an integral number of times in the matrix
    And here, an even number of rows is given...but the square
    matrix has an even number of columns, too. As we saw
    in the previous example, an odd number is required:
    >>> banded(4, {0: 2, 1: ones(2)})  # trying to make 4x4 and cols must be odd
    Traceback (most recent call last):
    ...
    ValueError:
    sequence does not fit an integral number of times in the matrix
    A way around having to count rows is to enclosing matrix elements
    in a tuple and indicate the desired number of them to the right:
    >>> banded({0: 2, 2: (ones(2),)*3})
    Matrix([
    [2, 0, 1, 1, 0, 0, 0, 0],
    [0, 2, 1, 1, 0, 0, 0, 0],
    [0, 0, 2, 0, 1, 1, 0, 0],
    [0, 0, 0, 2, 1, 1, 0, 0],
    [0, 0, 0, 0, 2, 0, 1, 1],
    [0, 0, 0, 0, 0, 2, 1, 1]])
    An error will be raised if more than one value
    is written to a given entry. Here, the ones overlap
    with the main diagonal if they are placed on the
    first diagonal:
    >>> banded({0: (2,)*5, 1: (ones(2),)*3})
    Traceback (most recent call last):
    ...
    ValueError: collision at (1, 1)
    By placing a 0 at the bottom left of the 2x2 matrix of
    ones, the collision is avoided:
    >>> u2 = Matrix([
    ... [1, 1],
    ... [0, 1]])
    >>> banded({0: [2]*5, 1: [u2]*3})
    Matrix([
    [2, 1, 1, 0, 0, 0, 0],
    [0, 2, 1, 0, 0, 0, 0],
    [0, 0, 2, 1, 1, 0, 0],
    [0, 0, 0, 2, 1, 0, 0],
    [0, 0, 0, 0, 2, 1, 1],
    [0, 0, 0, 0, 0, 0, 1]])
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_banded.txt 
 | 
	def banded(*args, **kwargs):
    """Returns a SparseMatrix from the given dictionary describing
    the diagonals of the matrix. The keys are positive for upper
    diagonals and negative for those below the main diagonal. The
    values may be:
    * expressions or single-argument functions,
    * lists or tuples of values,
    * matrices
    Unless dimensions are given, the size of the returned matrix will
    be large enough to contain the largest non-zero value provided.
    kwargs
    ======
    rows : rows of the resulting matrix; computed if
           not given.
    cols : columns of the resulting matrix; computed if
           not given.
    Examples
    ========
    >>> from sympy import banded, ones, Matrix
    >>> from sympy.abc import x
    If explicit values are given in tuples,
    the matrix will autosize to contain all values, otherwise
    a single value is filled onto the entire diagonal:
    >>> banded({1: (1, 2, 3), -1: (4, 5, 6), 0: x})
    Matrix([
    [x, 1, 0, 0],
    [4, x, 2, 0],
    [0, 5, x, 3],
    [0, 0, 6, x]])
    A function accepting a single argument can be used to fill the
    diagonal as a function of diagonal index (which starts at 0).
    The size (or shape) of the matrix must be given to obtain more
    than a 1x1 matrix:
    >>> s = lambda d: (1 + d)**2
    >>> banded(5, {0: s, 2: s, -2: 2})
    Matrix([
    [1, 0, 1,  0,  0],
    [0, 4, 0,  4,  0],
    [2, 0, 9,  0,  9],
    [0, 2, 0, 16,  0],
    [0, 0, 2,  0, 25]])
    The diagonal of matrices placed on a diagonal will coincide
    with the indicated diagonal:
    >>> vert = Matrix([1, 2, 3])
    >>> banded({0: vert}, cols=3)
    Matrix([
    [1, 0, 0],
    [2, 1, 0],
    [3, 2, 1],
    [0, 3, 2],
    [0, 0, 3]])
    >>> banded(4, {0: ones(2)})
    Matrix([
    [1, 1, 0, 0],
    [1, 1, 0, 0],
    [0, 0, 1, 1],
    [0, 0, 1, 1]])
    Errors are raised if the designated size will not hold
    all values an integral number of times. Here, the rows
    are designated as odd (but an even number is required to
    hold the off-diagonal 2x2 ones):
    >>> banded({0: 2, 1: ones(2)}, rows=5)
    Traceback (most recent call last):
    ...
    ValueError:
    sequence does not fit an integral number of times in the matrix
    And here, an even number of rows is given...but the square
    matrix has an even number of columns, too. As we saw
    in the previous example, an odd number is required:
    >>> banded(4, {0: 2, 1: ones(2)})  # trying to make 4x4 and cols must be odd
    Traceback (most recent call last):
    ...
    ValueError:
    sequence does not fit an integral number of times in the matrix
    A way around having to count rows is to enclosing matrix elements
    in a tuple and indicate the desired number of them to the right:
    >>> banded({0: 2, 2: (ones(2),)*3})
    Matrix([
    [2, 0, 1, 1, 0, 0, 0, 0],
    [0, 2, 1, 1, 0, 0, 0, 0],
    [0, 0, 2, 0, 1, 1, 0, 0],
    [0, 0, 0, 2, 1, 1, 0, 0],
    [0, 0, 0, 0, 2, 0, 1, 1],
    [0, 0, 0, 0, 0, 2, 1, 1]])
    An error will be raised if more than one value
    is written to a given entry. Here, the ones overlap
    with the main diagonal if they are placed on the
    first diagonal:
    >>> banded({0: (2,)*5, 1: (ones(2),)*3})
    Traceback (most recent call last):
    ...
    ValueError: collision at (1, 1)
    By placing a 0 at the bottom left of the 2x2 matrix of
    ones, the collision is avoided:
    >>> u2 = Matrix([
    ... [1, 1],
    ... [0, 1]])
    >>> banded({0: [2]*5, 1: [u2]*3})
    Matrix([
    [2, 1, 1, 0, 0, 0, 0],
    [0, 2, 1, 0, 0, 0, 0],
    [0, 0, 2, 1, 1, 0, 0],
    [0, 0, 0, 2, 1, 0, 0],
    [0, 0, 0, 0, 2, 1, 1],
    [0, 0, 0, 0, 0, 0, 1]])
    """
    try:
        if len(args) not in (1, 2, 3):
            raise TypeError
        if not isinstance(args[-1], (dict, Dict)):
            raise TypeError
        if len(args) == 1:
            rows = kwargs.get('rows', None)
            cols = kwargs.get('cols', None)
            if rows is not None:
                rows = as_int(rows)
            if cols is not None:
                cols = as_int(cols)
        elif len(args) == 2:
            rows = cols = as_int(args[0])
        else:
            rows, cols = map(as_int, args[:2])
        # fails with ValueError if any keys are not ints
        _ = all(as_int(k) for k in args[-1])
    except (ValueError, TypeError):
        raise TypeError(filldedent(
            '''unrecognized input to banded:
            expecting [[row,] col,] {int: value}'''))
    def rc(d):
        # return row,col coord of diagonal start
        r = -d if d < 0 else 0
        c = 0 if r else d
        return r, c
    smat = {}
    undone = []
    tba = Dummy()
    # first handle objects with size
    for d, v in args[-1].items():
        r, c = rc(d)
        # note: only list and tuple are recognized since this
        # will allow other Basic objects like Tuple
        # into the matrix if so desired
        if isinstance(v, (list, tuple)):
            extra = 0
            for i, vi in enumerate(v):
                i += extra
                if is_sequence(vi):
                    vi = SparseMatrix(vi)
                    smat[r + i, c + i] = vi
                    extra += min(vi.shape) - 1
                else:
                    smat[r + i, c + i] = vi
        elif is_sequence(v):
            v = SparseMatrix(v)
            rv, cv = v.shape
            if rows and cols:
                nr, xr = divmod(rows - r, rv)
                nc, xc = divmod(cols - c, cv)
                x = xr or xc
                do = min(nr, nc)
            elif rows:
                do, x = divmod(rows - r, rv)
            elif cols:
                do, x = divmod(cols - c, cv)
            else:
                do = 1
                x = 0
            if x:
                raise ValueError(filldedent('''
                    sequence does not fit an integral number of times
                    in the matrix'''))
            j = min(v.shape)
            for i in range(do):
                smat[r, c] = v
                r += j
                c += j
        elif v:
            smat[r, c] = tba
            undone.append((d, v))
    s = SparseMatrix(None, smat)  # to expand matrices
    smat = s.todok()
    # check for dim errors here
    if rows is not None and rows < s.rows:
        raise ValueError('Designated rows %s < needed %s' % (rows, s.rows))
    if cols is not None and cols < s.cols:
        raise ValueError('Designated cols %s < needed %s' % (cols, s.cols))
    if rows is cols is None:
        rows = s.rows
        cols = s.cols
    elif rows is not None and cols is None:
        cols = max(rows, s.cols)
    elif cols is not None and rows is None:
        rows = max(cols, s.rows)
    def update(i, j, v):
        # update smat and make sure there are
        # no collisions
        if v:
            if (i, j) in smat and smat[i, j] not in (tba, v):
                raise ValueError('collision at %s' % ((i, j),))
            smat[i, j] = v
    if undone:
        for d, vi in undone:
            r, c = rc(d)
            v = vi if callable(vi) else lambda _: vi
            i = 0
            while r + i < rows and c + i < cols:
                update(r + i, c + i, v(i))
                i += 1
    return SparseMatrix(rows, cols, smat)
 
 | 
	banded 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	39 
 | 
	sympy/combinatorics/tensor_can.py 
 | 
	def canonicalize(g, dummies, msym, *v):
    """
    canonicalize tensor formed by tensors
    Parameters
    ==========
    g : permutation representing the tensor
    dummies : list representing the dummy indices
      it can be a list of dummy indices of the same type
      or a list of lists of dummy indices, one list for each
      type of index;
      the dummy indices must come after the free indices,
      and put in order contravariant, covariant
      [d0, -d0, d1,-d1,...]
    msym :  symmetry of the metric(s)
        it can be an integer or a list;
        in the first case it is the symmetry of the dummy index metric;
        in the second case it is the list of the symmetries of the
        index metric for each type
    v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`
    base_i, gens_i : BSGS for tensors of this type.
        The BSGS should have minimal base under lexicographic ordering;
        if not, an attempt is made do get the minimal BSGS;
        in case of failure,
        canonicalize_naive is used, which is much slower.
    n_i :    number of tensors of type `i`.
    sym_i :  symmetry under exchange of component tensors of type `i`.
        Both for msym and sym_i the cases are
            * None  no symmetry
            * 0     commuting
            * 1     anticommuting
    Returns
    =======
    0 if the tensor is zero, else return the array form of
    the permutation representing the canonical form of the tensor.
    Algorithm
    =========
    First one uses canonical_free to get the minimum tensor under
    lexicographic order, using only the slot symmetries.
    If the component tensors have not minimal BSGS, it is attempted
    to find it; if the attempt fails canonicalize_naive
    is used instead.
    Compute the residual slot symmetry keeping fixed the free indices
    using tensor_gens(base, gens, list_free_indices, sym).
    Reduce the problem eliminating the free indices.
    Then use double_coset_can_rep and lift back the result reintroducing
    the free indices.
    Examples
    ========
    one type of index with commuting metric;
    `A_{a b}` and `B_{a b}` antisymmetric and commuting
    `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`
    `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices
    g = [1, 3, 0, 5, 4, 2, 6, 7]
    `T_c = 0`
    >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product
    >>> from sympy.combinatorics import Permutation
    >>> base2a, gens2a = get_symmetric_group_sgs(2, 1)
    >>> t0 = (base2a, gens2a, 1, 0)
    >>> t1 = (base2a, gens2a, 2, 0)
    >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7])
    >>> canonicalize(g, range(6), 0, t0, t1)
    0
    same as above, but with `B_{a b}` anticommuting
    `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`
    can = [0,2,1,4,3,5,7,6]
    >>> t1 = (base2a, gens2a, 2, 1)
    >>> canonicalize(g, range(6), 0, t0, t1)
    [0, 2, 1, 4, 3, 5, 7, 6]
    two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,
    both with commuting metric
    `f^{a b c}` antisymmetric, commuting
    `A_{m a}` no symmetry, commuting
    `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`
    ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]
    g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]
    The canonical tensor is
    `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`
    can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]
    >>> base_f, gens_f = get_symmetric_group_sgs(3, 1)
    >>> base1, gens1 = get_symmetric_group_sgs(1)
    >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
    >>> t0 = (base_f, gens_f, 2, 0)
    >>> t1 = (base_A, gens_A, 4, 0)
    >>> dummies = [range(2, 10), range(10, 14)]
    >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15])
    >>> canonicalize(g, dummies, [0, 0], t0, t1)
    [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_canonicalize.txt 
 | 
	def canonicalize(g, dummies, msym, *v):
    """
    canonicalize tensor formed by tensors
    Parameters
    ==========
    g : permutation representing the tensor
    dummies : list representing the dummy indices
      it can be a list of dummy indices of the same type
      or a list of lists of dummy indices, one list for each
      type of index;
      the dummy indices must come after the free indices,
      and put in order contravariant, covariant
      [d0, -d0, d1,-d1,...]
    msym :  symmetry of the metric(s)
        it can be an integer or a list;
        in the first case it is the symmetry of the dummy index metric;
        in the second case it is the list of the symmetries of the
        index metric for each type
    v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`
    base_i, gens_i : BSGS for tensors of this type.
        The BSGS should have minimal base under lexicographic ordering;
        if not, an attempt is made do get the minimal BSGS;
        in case of failure,
        canonicalize_naive is used, which is much slower.
    n_i :    number of tensors of type `i`.
    sym_i :  symmetry under exchange of component tensors of type `i`.
        Both for msym and sym_i the cases are
            * None  no symmetry
            * 0     commuting
            * 1     anticommuting
    Returns
    =======
    0 if the tensor is zero, else return the array form of
    the permutation representing the canonical form of the tensor.
    Algorithm
    =========
    First one uses canonical_free to get the minimum tensor under
    lexicographic order, using only the slot symmetries.
    If the component tensors have not minimal BSGS, it is attempted
    to find it; if the attempt fails canonicalize_naive
    is used instead.
    Compute the residual slot symmetry keeping fixed the free indices
    using tensor_gens(base, gens, list_free_indices, sym).
    Reduce the problem eliminating the free indices.
    Then use double_coset_can_rep and lift back the result reintroducing
    the free indices.
    Examples
    ========
    one type of index with commuting metric;
    `A_{a b}` and `B_{a b}` antisymmetric and commuting
    `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`
    `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices
    g = [1, 3, 0, 5, 4, 2, 6, 7]
    `T_c = 0`
    >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product
    >>> from sympy.combinatorics import Permutation
    >>> base2a, gens2a = get_symmetric_group_sgs(2, 1)
    >>> t0 = (base2a, gens2a, 1, 0)
    >>> t1 = (base2a, gens2a, 2, 0)
    >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7])
    >>> canonicalize(g, range(6), 0, t0, t1)
    0
    same as above, but with `B_{a b}` anticommuting
    `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`
    can = [0,2,1,4,3,5,7,6]
    >>> t1 = (base2a, gens2a, 2, 1)
    >>> canonicalize(g, range(6), 0, t0, t1)
    [0, 2, 1, 4, 3, 5, 7, 6]
    two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,
    both with commuting metric
    `f^{a b c}` antisymmetric, commuting
    `A_{m a}` no symmetry, commuting
    `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`
    ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]
    g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]
    The canonical tensor is
    `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`
    can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]
    >>> base_f, gens_f = get_symmetric_group_sgs(3, 1)
    >>> base1, gens1 = get_symmetric_group_sgs(1)
    >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)
    >>> t0 = (base_f, gens_f, 2, 0)
    >>> t1 = (base_A, gens_A, 4, 0)
    >>> dummies = [range(2, 10), range(10, 14)]
    >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15])
    >>> canonicalize(g, dummies, [0, 0], t0, t1)
    [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]
    """
    from sympy.combinatorics.testutil import canonicalize_naive
    if not isinstance(msym, list):
        if msym not in (0, 1, None):
            raise ValueError('msym must be 0, 1 or None')
        num_types = 1
    else:
        num_types = len(msym)
        if not all(msymx in (0, 1, None) for msymx in msym):
            raise ValueError('msym entries must be 0, 1 or None')
        if len(dummies) != num_types:
            raise ValueError(
                'dummies and msym must have the same number of elements')
    size = g.size
    num_tensors = 0
    v1 = []
    for base_i, gens_i, n_i, sym_i in v:
        # check that the BSGS is minimal;
        # this property is used in double_coset_can_rep;
        # if it is not minimal use canonicalize_naive
        if not _is_minimal_bsgs(base_i, gens_i):
            mbsgs = get_minimal_bsgs(base_i, gens_i)
            if not mbsgs:
                can = canonicalize_naive(g, dummies, msym, *v)
                return can
            base_i, gens_i = mbsgs
        v1.append((base_i, gens_i, [[]] * n_i, sym_i))
        num_tensors += n_i
    if num_types == 1 and not isinstance(msym, list):
        dummies = [dummies]
        msym = [msym]
    flat_dummies = []
    for dumx in dummies:
        flat_dummies.extend(dumx)
    if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)):
        raise ValueError('dummies is not valid')
    # slot symmetry of the tensor
    size1, sbase, sgens = gens_products(*v1)
    if size != size1:
        raise ValueError(
            'g has size %d, generators have size %d' % (size, size1))
    free = [i for i in range(size - 2) if i not in flat_dummies]
    num_free = len(free)
    # g1 minimal tensor under slot symmetry
    g1 = canonical_free(sbase, sgens, g, num_free)
    if not flat_dummies:
        return g1
    # save the sign of g1
    sign = 0 if g1[-1] == size - 1 else 1
    # the free indices are kept fixed.
    # Determine free_i, the list of slots of tensors which are fixed
    # since they are occupied by free indices, which are fixed.
    start = 0
    for i, (base_i, gens_i, n_i, sym_i) in enumerate(v):
        free_i = []
        len_tens = gens_i[0].size - 2
        # for each component tensor get a list od fixed islots
        for j in range(n_i):
            # get the elements corresponding to the component tensor
            h = g1[start:(start + len_tens)]
            fr = []
            # get the positions of the fixed elements in h
            for k in free:
                if k in h:
                    fr.append(h.index(k))
            free_i.append(fr)
            start += len_tens
        v1[i] = (base_i, gens_i, free_i, sym_i)
    # BSGS of the tensor with fixed free indices
    # if tensor_gens fails in gens_product, use canonicalize_naive
    size, sbase, sgens = gens_products(*v1)
    # reduce the permutations getting rid of the free indices
    pos_free = [g1.index(x) for x in range(num_free)]
    size_red = size - num_free
    g1_red = [x - num_free for x in g1 if x in flat_dummies]
    if sign:
        g1_red.extend([size_red - 1, size_red - 2])
    else:
        g1_red.extend([size_red - 2, size_red - 1])
    map_slots = _get_map_slots(size, pos_free)
    sbase_red = [map_slots[i] for i in sbase if i not in pos_free]
    sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens]
    dummies_red = [[x - num_free for x in y] for y in dummies]
    transv_red = get_transversals(sbase_red, sgens_red)
    g1_red = _af_new(g1_red)
    g2 = double_coset_can_rep(
        dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red)
    if g2 == 0:
        return 0
    # lift to the case with the free indices
    g3 = _lift_sgens(size, pos_free, free, g2)
    return g3
 
 | 
	canonicalize 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	43 
 | 
	sympy/combinatorics/coset_table.py 
 | 
	def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None,
                                    incomplete=False, modified=False):
    """
    This is easier of the two implemented methods of coset enumeration.
    and is often called the HLT method, after Hazelgrove, Leech, Trotter
    The idea is that we make use of ``scan_and_fill`` makes new definitions
    whenever the scan is incomplete to enable the scan to complete; this way
    we fill in the gaps in the scan of the relator or subgroup generator,
    that's why the name relator-based method.
    An instance of `CosetTable` for `fp_grp` can be passed as the keyword
    argument `draft` in which case the coset enumeration will start with
    that instance and attempt to complete it.
    When `incomplete` is `True` and the function is unable to complete for
    some reason, the partially complete table will be returned.
    # TODO: complete the docstring
    See Also
    ========
    scan_and_fill,
    Examples
    ========
    >>> from sympy.combinatorics.free_groups import free_group
    >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
    >>> F, x, y = free_group("x, y")
    # Example 5.1 from [1]
    >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
    >>> C = coset_enumeration_r(f, [x])
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [0, 0, 1, 2]
    [1, 1, 2, 0]
    [2, 2, 0, 1]
    >>> C.p
    [0, 1, 2, 1, 1]
    # Example from exercises Q2 [1]
    >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
    >>> C = coset_enumeration_r(f, [])
    >>> C.compress(); C.standardize()
    >>> C.table
    [[1, 2, 3, 4],
    [5, 0, 6, 7],
    [0, 5, 7, 6],
    [7, 6, 5, 0],
    [6, 7, 0, 5],
    [2, 1, 4, 3],
    [3, 4, 2, 1],
    [4, 3, 1, 2]]
    # Example 5.2
    >>> f = FpGroup(F, [x**2, y**3, (x*y)**3])
    >>> Y = [x*y]
    >>> C = coset_enumeration_r(f, Y)
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [1, 1, 2, 1]
    [0, 0, 0, 2]
    [3, 3, 1, 0]
    [2, 2, 3, 3]
    # Example 5.3
    >>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
    >>> Y = []
    >>> C = coset_enumeration_r(f, Y)
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [1, 3, 1, 3]
    [2, 0, 2, 0]
    [3, 1, 3, 1]
    [0, 2, 0, 2]
    # Example 5.4
    >>> F, a, b, c, d, e = free_group("a, b, c, d, e")
    >>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1])
    >>> Y = [a]
    >>> C = coset_enumeration_r(f, Y)
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    # example of "compress" method
    >>> C.compress()
    >>> C.table
    [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
    # Exercises Pg. 161, Q2.
    >>> F, x, y = free_group("x, y")
    >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
    >>> Y = []
    >>> C = coset_enumeration_r(f, Y)
    >>> C.compress()
    >>> C.standardize()
    >>> C.table
    [[1, 2, 3, 4],
    [5, 0, 6, 7],
    [0, 5, 7, 6],
    [7, 6, 5, 0],
    [6, 7, 0, 5],
    [2, 1, 4, 3],
    [3, 4, 2, 1],
    [4, 3, 1, 2]]
    # John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
    # Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490
    # from 1973chwd.pdf
    # Table 1. Ex. 1
    >>> F, r, s, t = free_group("r, s, t")
    >>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2])
    >>> C = coset_enumeration_r(E1, [r])
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [0, 0, 0, 0, 0, 0]
    Ex. 2
    >>> F, a, b = free_group("a, b")
    >>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5])
    >>> C = coset_enumeration_r(Cox, [a])
    >>> index = 0
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         index += 1
    >>> index
    500
    # Ex. 3
    >>> F, a, b = free_group("a, b")
    >>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \
            (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4])
    >>> C = coset_enumeration_r(B_2_4, [a])
    >>> index = 0
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         index += 1
    >>> index
    1024
    References
    ==========
    .. [1] Holt, D., Eick, B., O'Brien, E.
           "Handbook of computational group theory"
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_coset_enumeration_r.txt 
 | 
	def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None,
                                    incomplete=False, modified=False):
    """
    This is easier of the two implemented methods of coset enumeration.
    and is often called the HLT method, after Hazelgrove, Leech, Trotter
    The idea is that we make use of ``scan_and_fill`` makes new definitions
    whenever the scan is incomplete to enable the scan to complete; this way
    we fill in the gaps in the scan of the relator or subgroup generator,
    that's why the name relator-based method.
    An instance of `CosetTable` for `fp_grp` can be passed as the keyword
    argument `draft` in which case the coset enumeration will start with
    that instance and attempt to complete it.
    When `incomplete` is `True` and the function is unable to complete for
    some reason, the partially complete table will be returned.
    # TODO: complete the docstring
    See Also
    ========
    scan_and_fill,
    Examples
    ========
    >>> from sympy.combinatorics.free_groups import free_group
    >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
    >>> F, x, y = free_group("x, y")
    # Example 5.1 from [1]
    >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
    >>> C = coset_enumeration_r(f, [x])
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [0, 0, 1, 2]
    [1, 1, 2, 0]
    [2, 2, 0, 1]
    >>> C.p
    [0, 1, 2, 1, 1]
    # Example from exercises Q2 [1]
    >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
    >>> C = coset_enumeration_r(f, [])
    >>> C.compress(); C.standardize()
    >>> C.table
    [[1, 2, 3, 4],
    [5, 0, 6, 7],
    [0, 5, 7, 6],
    [7, 6, 5, 0],
    [6, 7, 0, 5],
    [2, 1, 4, 3],
    [3, 4, 2, 1],
    [4, 3, 1, 2]]
    # Example 5.2
    >>> f = FpGroup(F, [x**2, y**3, (x*y)**3])
    >>> Y = [x*y]
    >>> C = coset_enumeration_r(f, Y)
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [1, 1, 2, 1]
    [0, 0, 0, 2]
    [3, 3, 1, 0]
    [2, 2, 3, 3]
    # Example 5.3
    >>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
    >>> Y = []
    >>> C = coset_enumeration_r(f, Y)
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [1, 3, 1, 3]
    [2, 0, 2, 0]
    [3, 1, 3, 1]
    [0, 2, 0, 2]
    # Example 5.4
    >>> F, a, b, c, d, e = free_group("a, b, c, d, e")
    >>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1])
    >>> Y = [a]
    >>> C = coset_enumeration_r(f, Y)
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    # example of "compress" method
    >>> C.compress()
    >>> C.table
    [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
    # Exercises Pg. 161, Q2.
    >>> F, x, y = free_group("x, y")
    >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
    >>> Y = []
    >>> C = coset_enumeration_r(f, Y)
    >>> C.compress()
    >>> C.standardize()
    >>> C.table
    [[1, 2, 3, 4],
    [5, 0, 6, 7],
    [0, 5, 7, 6],
    [7, 6, 5, 0],
    [6, 7, 0, 5],
    [2, 1, 4, 3],
    [3, 4, 2, 1],
    [4, 3, 1, 2]]
    # John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
    # Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490
    # from 1973chwd.pdf
    # Table 1. Ex. 1
    >>> F, r, s, t = free_group("r, s, t")
    >>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2])
    >>> C = coset_enumeration_r(E1, [r])
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         print(C.table[i])
    [0, 0, 0, 0, 0, 0]
    Ex. 2
    >>> F, a, b = free_group("a, b")
    >>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5])
    >>> C = coset_enumeration_r(Cox, [a])
    >>> index = 0
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         index += 1
    >>> index
    500
    # Ex. 3
    >>> F, a, b = free_group("a, b")
    >>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \
            (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4])
    >>> C = coset_enumeration_r(B_2_4, [a])
    >>> index = 0
    >>> for i in range(len(C.p)):
    ...     if C.p[i] == i:
    ...         index += 1
    >>> index
    1024
    References
    ==========
    .. [1] Holt, D., Eick, B., O'Brien, E.
           "Handbook of computational group theory"
    """
    # 1. Initialize a coset table C for < X|R >
    C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
    # Define coset table methods.
    if modified:
        _scan_and_fill = C.modified_scan_and_fill
        _define = C.modified_define
    else:
        _scan_and_fill = C.scan_and_fill
        _define = C.define
    if draft:
        C.table = draft.table[:]
        C.p = draft.p[:]
    R = fp_grp.relators
    A_dict = C.A_dict
    p = C.p
    for i in range(len(Y)):
        if modified:
            _scan_and_fill(0, Y[i], C._grp.generators[i])
        else:
            _scan_and_fill(0, Y[i])
    alpha = 0
    while alpha < C.n:
        if p[alpha] == alpha:
            try:
                for w in R:
                    if modified:
                        _scan_and_fill(alpha, w, C._grp.identity)
                    else:
                        _scan_and_fill(alpha, w)
                    # if alpha was eliminated during the scan then break
                    if p[alpha] < alpha:
                        break
                if p[alpha] == alpha:
                    for x in A_dict:
                        if C.table[alpha][A_dict[x]] is None:
                            _define(alpha, x)
            except ValueError as e:
                if incomplete:
                    return C
                raise e
        alpha += 1
    return C
 
 | 
	coset_enumeration_r 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	44 
 | 
	sympy/simplify/cse_main.py 
 | 
	def cse(exprs, symbols=None, optimizations=None, postprocess=None,
        order='canonical', ignore=(), list=True):
    """ Perform common subexpression elimination on an expression.
    Parameters
    ==========
    exprs : list of SymPy expressions, or a single SymPy expression
        The expressions to reduce.
    symbols : infinite iterator yielding unique Symbols
        The symbols used to label the common subexpressions which are pulled
        out. The ``numbered_symbols`` generator is useful. The default is a
        stream of symbols of the form "x0", "x1", etc. This must be an
        infinite iterator.
    optimizations : list of (callable, callable) pairs
        The (preprocessor, postprocessor) pairs of external optimization
        functions. Optionally 'basic' can be passed for a set of predefined
        basic optimizations. Such 'basic' optimizations were used by default
        in old implementation, however they can be really slow on larger
        expressions. Now, no pre or post optimizations are made by default.
    postprocess : a function which accepts the two return values of cse and
        returns the desired form of output from cse, e.g. if you want the
        replacements reversed the function might be the following lambda:
        lambda r, e: return reversed(r), e
    order : string, 'none' or 'canonical'
        The order by which Mul and Add arguments are processed. If set to
        'canonical', arguments will be canonically ordered. If set to 'none',
        ordering will be faster but dependent on expressions hashes, thus
        machine dependent and variable. For large expressions where speed is a
        concern, use the setting order='none'.
    ignore : iterable of Symbols
        Substitutions containing any Symbol from ``ignore`` will be ignored.
    list : bool, (default True)
        Returns expression in list or else with same type as input (when False).
    Returns
    =======
    replacements : list of (Symbol, expression) pairs
        All of the common subexpressions that were replaced. Subexpressions
        earlier in this list might show up in subexpressions later in this
        list.
    reduced_exprs : list of SymPy expressions
        The reduced expressions with all of the replacements above.
    Examples
    ========
    >>> from sympy import cse, SparseMatrix
    >>> from sympy.abc import x, y, z, w
    >>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3)
    ([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3])
    List of expressions with recursive substitutions:
    >>> m = SparseMatrix([x + y, x + y + z])
    >>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m])
    ([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([
    [x0],
    [x1]])])
    Note: the type and mutability of input matrices is retained.
    >>> isinstance(_[1][-1], SparseMatrix)
    True
    The user may disallow substitutions containing certain symbols:
    >>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,))
    ([(x0, x + 1)], [x0*y**2, 3*x0*y**2])
    The default return value for the reduced expression(s) is a list, even if there is only
    one expression. The `list` flag preserves the type of the input in the output:
    >>> cse(x)
    ([], [x])
    >>> cse(x, list=False)
    ([], x)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_cse.txt 
 | 
	def cse(exprs, symbols=None, optimizations=None, postprocess=None,
        order='canonical', ignore=(), list=True):
    """ Perform common subexpression elimination on an expression.
    Parameters
    ==========
    exprs : list of SymPy expressions, or a single SymPy expression
        The expressions to reduce.
    symbols : infinite iterator yielding unique Symbols
        The symbols used to label the common subexpressions which are pulled
        out. The ``numbered_symbols`` generator is useful. The default is a
        stream of symbols of the form "x0", "x1", etc. This must be an
        infinite iterator.
    optimizations : list of (callable, callable) pairs
        The (preprocessor, postprocessor) pairs of external optimization
        functions. Optionally 'basic' can be passed for a set of predefined
        basic optimizations. Such 'basic' optimizations were used by default
        in old implementation, however they can be really slow on larger
        expressions. Now, no pre or post optimizations are made by default.
    postprocess : a function which accepts the two return values of cse and
        returns the desired form of output from cse, e.g. if you want the
        replacements reversed the function might be the following lambda:
        lambda r, e: return reversed(r), e
    order : string, 'none' or 'canonical'
        The order by which Mul and Add arguments are processed. If set to
        'canonical', arguments will be canonically ordered. If set to 'none',
        ordering will be faster but dependent on expressions hashes, thus
        machine dependent and variable. For large expressions where speed is a
        concern, use the setting order='none'.
    ignore : iterable of Symbols
        Substitutions containing any Symbol from ``ignore`` will be ignored.
    list : bool, (default True)
        Returns expression in list or else with same type as input (when False).
    Returns
    =======
    replacements : list of (Symbol, expression) pairs
        All of the common subexpressions that were replaced. Subexpressions
        earlier in this list might show up in subexpressions later in this
        list.
    reduced_exprs : list of SymPy expressions
        The reduced expressions with all of the replacements above.
    Examples
    ========
    >>> from sympy import cse, SparseMatrix
    >>> from sympy.abc import x, y, z, w
    >>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3)
    ([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3])
    List of expressions with recursive substitutions:
    >>> m = SparseMatrix([x + y, x + y + z])
    >>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m])
    ([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([
    [x0],
    [x1]])])
    Note: the type and mutability of input matrices is retained.
    >>> isinstance(_[1][-1], SparseMatrix)
    True
    The user may disallow substitutions containing certain symbols:
    >>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,))
    ([(x0, x + 1)], [x0*y**2, 3*x0*y**2])
    The default return value for the reduced expression(s) is a list, even if there is only
    one expression. The `list` flag preserves the type of the input in the output:
    >>> cse(x)
    ([], [x])
    >>> cse(x, list=False)
    ([], x)
    """
    if not list:
        return _cse_homogeneous(exprs,
            symbols=symbols, optimizations=optimizations,
            postprocess=postprocess, order=order, ignore=ignore)
    if isinstance(exprs, (int, float)):
        exprs = sympify(exprs)
    # Handle the case if just one expression was passed.
    if isinstance(exprs, (Basic, MatrixBase)):
        exprs = [exprs]
    copy = exprs
    temp = []
    for e in exprs:
        if isinstance(e, (Matrix, ImmutableMatrix)):
            temp.append(Tuple(*e.flat()))
        elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
            temp.append(Tuple(*e.todok().items()))
        else:
            temp.append(e)
    exprs = temp
    del temp
    if optimizations is None:
        optimizations = []
    elif optimizations == 'basic':
        optimizations = basic_optimizations
    # Preprocess the expressions to give us better optimization opportunities.
    reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs]
    if symbols is None:
        symbols = numbered_symbols(cls=Symbol)
    else:
        # In case we get passed an iterable with an __iter__ method instead of
        # an actual iterator.
        symbols = iter(symbols)
    # Find other optimization opportunities.
    opt_subs = opt_cse(reduced_exprs, order)
    # Main CSE algorithm.
    replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs,
                                           order, ignore)
    # Postprocess the expressions to return the expressions to canonical form.
    exprs = copy
    replacements = [(sym, postprocess_for_cse(subtree, optimizations))
                    for sym, subtree in replacements]
    reduced_exprs = [postprocess_for_cse(e, optimizations)
                     for e in reduced_exprs]
    # Get the matrices back
    for i, e in enumerate(exprs):
        if isinstance(e, (Matrix, ImmutableMatrix)):
            reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i])
            if isinstance(e, ImmutableMatrix):
                reduced_exprs[i] = reduced_exprs[i].as_immutable()
        elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
            m = SparseMatrix(e.rows, e.cols, {})
            for k, v in reduced_exprs[i]:
                m[k] = v
            if isinstance(e, ImmutableSparseMatrix):
                m = m.as_immutable()
            reduced_exprs[i] = m
    if postprocess is None:
        return replacements, reduced_exprs
    return postprocess(replacements, reduced_exprs)
 
 | 
	cse 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	45 
 | 
	sympy/polys/matrices/dense.py 
 | 
	def ddm_irref_den(a, K):
    """a  <--  rref(a); return (den, pivots)
    Compute the fraction-free reduced row echelon form (RREF) of $a$. Modifies
    $a$ in place and returns a tuple containing the denominator of the RREF and
    a list of the pivot columns.
    Explanation
    ===========
    The algorithm used is the fraction-free version of Gauss-Jordan elimination
    described as FFGJ in [1]_. Here it is modified to handle zero or missing
    pivots and to avoid redundant arithmetic.
    The domain $K$ must support exact division (``K.exquo``) but does not need
    to be a field. This method is suitable for most exact rings and fields like
    :ref:`ZZ`, :ref:`QQ` and :ref:`QQ(a)`. In the case of :ref:`QQ` or
    :ref:`K(x)` it might be more efficient to clear denominators and use
    :ref:`ZZ` or :ref:`K[x]` instead.
    For inexact domains like :ref:`RR` and :ref:`CC` use ``ddm_irref`` instead.
    Examples
    ========
    >>> from sympy.polys.matrices.dense import ddm_irref_den
    >>> from sympy import ZZ, Matrix
    >>> M = [[ZZ(1), ZZ(2), ZZ(3)], [ZZ(4), ZZ(5), ZZ(6)]]
    >>> den, pivots = ddm_irref_den(M, ZZ)
    >>> M
    [[-3, 0, 3], [0, -3, -6]]
    >>> den
    -3
    >>> pivots
    [0, 1]
    >>> Matrix(M).rref()[0]
    Matrix([
    [1, 0, -1],
    [0, 1,  2]])
    See Also
    ========
    ddm_irref
        A version of this routine that uses field division.
    sdm_irref
        A sparse version of :func:`ddm_irref`.
    sdm_rref_den
        A sparse version of :func:`ddm_irref_den`.
    sympy.polys.matrices.domainmatrix.DomainMatrix.rref_den
        Higher level interface.
    References
    ==========
    .. [1] Fraction-free algorithms for linear and polynomial equations.
        George C. Nakos , Peter R. Turner , Robert M. Williams.
        https://dl.acm.org/doi/10.1145/271130.271133
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_ddm_irref_den.txt 
 | 
	def ddm_irref_den(a, K):
    """a  <--  rref(a); return (den, pivots)
    Compute the fraction-free reduced row echelon form (RREF) of $a$. Modifies
    $a$ in place and returns a tuple containing the denominator of the RREF and
    a list of the pivot columns.
    Explanation
    ===========
    The algorithm used is the fraction-free version of Gauss-Jordan elimination
    described as FFGJ in [1]_. Here it is modified to handle zero or missing
    pivots and to avoid redundant arithmetic.
    The domain $K$ must support exact division (``K.exquo``) but does not need
    to be a field. This method is suitable for most exact rings and fields like
    :ref:`ZZ`, :ref:`QQ` and :ref:`QQ(a)`. In the case of :ref:`QQ` or
    :ref:`K(x)` it might be more efficient to clear denominators and use
    :ref:`ZZ` or :ref:`K[x]` instead.
    For inexact domains like :ref:`RR` and :ref:`CC` use ``ddm_irref`` instead.
    Examples
    ========
    >>> from sympy.polys.matrices.dense import ddm_irref_den
    >>> from sympy import ZZ, Matrix
    >>> M = [[ZZ(1), ZZ(2), ZZ(3)], [ZZ(4), ZZ(5), ZZ(6)]]
    >>> den, pivots = ddm_irref_den(M, ZZ)
    >>> M
    [[-3, 0, 3], [0, -3, -6]]
    >>> den
    -3
    >>> pivots
    [0, 1]
    >>> Matrix(M).rref()[0]
    Matrix([
    [1, 0, -1],
    [0, 1,  2]])
    See Also
    ========
    ddm_irref
        A version of this routine that uses field division.
    sdm_irref
        A sparse version of :func:`ddm_irref`.
    sdm_rref_den
        A sparse version of :func:`ddm_irref_den`.
    sympy.polys.matrices.domainmatrix.DomainMatrix.rref_den
        Higher level interface.
    References
    ==========
    .. [1] Fraction-free algorithms for linear and polynomial equations.
        George C. Nakos , Peter R. Turner , Robert M. Williams.
        https://dl.acm.org/doi/10.1145/271130.271133
    """
    #
    # A simpler presentation of this algorithm is given in [1]:
    #
    # Given an n x n matrix A and n x 1 matrix b:
    #
    #   for i in range(n):
    #       if i != 0:
    #           d = a[i-1][i-1]
    #       for j in range(n):
    #           if j == i:
    #               continue
    #           b[j] = a[i][i]*b[j] - a[j][i]*b[i]
    #           for k in range(n):
    #               a[j][k] = a[i][i]*a[j][k] - a[j][i]*a[i][k]
    #               if i != 0:
    #                   a[j][k] /= d
    #
    # Our version here is a bit more complicated because:
    #
    #  1. We use row-swaps to avoid zero pivots.
    #  2. We allow for some columns to be missing pivots.
    #  3. We avoid a lot of redundant arithmetic.
    #
    # TODO: Use a non-trivial pivoting strategy. Even just row swapping makes a
    # big difference to performance if e.g. the upper-left entry of the matrix
    # is a huge polynomial.
    # a is (m x n)
    m = len(a)
    if not m:
        return K.one, []
    n = len(a[0])
    d = None
    pivots = []
    no_pivots = []
    # i, j will be the row and column indices of the current pivot
    i = 0
    for j in range(n):
        # next pivot?
        aij = a[i][j]
        # swap rows if zero
        if not aij:
            for ip in range(i+1, m):
                aij = a[ip][j]
                # row-swap
                if aij:
                    a[i], a[ip] = a[ip], a[i]
                    break
            else:
                # go to next column
                no_pivots.append(j)
                continue
        # Now aij is the pivot and i,j are the row and column. We need to clear
        # the column above and below but we also need to keep track of the
        # denominator of the RREF which means also multiplying everything above
        # and to the left by the current pivot aij and dividing by d (which we
        # multiplied everything by in the previous iteration so this is an
        # exact division).
        #
        # First handle the upper left corner which is usually already diagonal
        # with all diagonal entries equal to the current denominator but there
        # can be other non-zero entries in any column that has no pivot.
        # Update previous pivots in the matrix
        if pivots:
            pivot_val = aij * a[0][pivots[0]]
            # Divide out the common factor
            if d is not None:
                pivot_val = K.exquo(pivot_val, d)
            # Could defer this until the end but it is pretty cheap and
            # helps when debugging.
            for ip, jp in enumerate(pivots):
                a[ip][jp] = pivot_val
        # Update columns without pivots
        for jnp in no_pivots:
            for ip in range(i):
                aijp = a[ip][jnp]
                if aijp:
                    aijp *= aij
                    if d is not None:
                        aijp = K.exquo(aijp, d)
                    a[ip][jnp] = aijp
        # Eliminate above, below and to the right as in ordinary division free
        # Gauss-Jordan elmination except also dividing out d from every entry.
        for jp, aj in enumerate(a):
            # Skip the current row
            if jp == i:
                continue
            # Eliminate to the right in all rows
            for kp in range(j+1, n):
                ajk = aij * aj[kp] - aj[j] * a[i][kp]
                if d is not None:
                    ajk = K.exquo(ajk, d)
                aj[kp] = ajk
            # Set to zero above and below the pivot
            aj[j] = K.zero
        # next row
        pivots.append(j)
        i += 1
        # no more rows left?
        if i >= m:
            break
        if not K.is_one(aij):
            d = aij
        else:
            d = None
    if not pivots:
        denom = K.one
    else:
        denom = a[0][pivots[0]]
    return denom, pivots
 
 | 
	ddm_irref_den 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	46 
 | 
	sympy/core/sorting.py 
 | 
	def default_sort_key(item, order=None):
    """Return a key that can be used for sorting.
    The key has the structure:
    (class_key, (len(args), args), exponent.sort_key(), coefficient)
    This key is supplied by the sort_key routine of Basic objects when
    ``item`` is a Basic object or an object (other than a string) that
    sympifies to a Basic object. Otherwise, this function produces the
    key.
    The ``order`` argument is passed along to the sort_key routine and is
    used to determine how the terms *within* an expression are ordered.
    (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex',
    and reversed values of the same (e.g. 'rev-lex'). The default order
    value is None (which translates to 'lex').
    Examples
    ========
    >>> from sympy import S, I, default_sort_key, sin, cos, sqrt
    >>> from sympy.core.function import UndefinedFunction
    >>> from sympy.abc import x
    The following are equivalent ways of getting the key for an object:
    >>> x.sort_key() == default_sort_key(x)
    True
    Here are some examples of the key that is produced:
    >>> default_sort_key(UndefinedFunction('f'))
    ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'),
        (0, ()), (), 1), 1)
    >>> default_sort_key('1')
    ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1)
    >>> default_sort_key(S.One)
    ((1, 0, 'Number'), (0, ()), (), 1)
    >>> default_sort_key(2)
    ((1, 0, 'Number'), (0, ()), (), 2)
    While sort_key is a method only defined for SymPy objects,
    default_sort_key will accept anything as an argument so it is
    more robust as a sorting key. For the following, using key=
    lambda i: i.sort_key() would fail because 2 does not have a sort_key
    method; that's why default_sort_key is used. Note, that it also
    handles sympification of non-string items likes ints:
    >>> a = [2, I, -I]
    >>> sorted(a, key=default_sort_key)
    [2, -I, I]
    The returned key can be used anywhere that a key can be specified for
    a function, e.g. sort, min, max, etc...:
    >>> a.sort(key=default_sort_key); a[0]
    2
    >>> min(a, key=default_sort_key)
    2
    Notes
    =====
    The key returned is useful for getting items into a canonical order
    that will be the same across platforms. It is not directly useful for
    sorting lists of expressions:
    >>> a, b = x, 1/x
    Since ``a`` has only 1 term, its value of sort_key is unaffected by
    ``order``:
    >>> a.sort_key() == a.sort_key('rev-lex')
    True
    If ``a`` and ``b`` are combined then the key will differ because there
    are terms that can be ordered:
    >>> eq = a + b
    >>> eq.sort_key() == eq.sort_key('rev-lex')
    False
    >>> eq.as_ordered_terms()
    [x, 1/x]
    >>> eq.as_ordered_terms('rev-lex')
    [1/x, x]
    But since the keys for each of these terms are independent of ``order``'s
    value, they do not sort differently when they appear separately in a list:
    >>> sorted(eq.args, key=default_sort_key)
    [1/x, x]
    >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex'))
    [1/x, x]
    The order of terms obtained when using these keys is the order that would
    be obtained if those terms were *factors* in a product.
    Although it is useful for quickly putting expressions in canonical order,
    it does not sort expressions based on their complexity defined by the
    number of operations, power of variables and others:
    >>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key)
    [sin(x)*cos(x), sin(x)]
    >>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key)
    [sqrt(x), x, x**2, x**3]
    See Also
    ========
    ordered, sympy.core.expr.Expr.as_ordered_factors, sympy.core.expr.Expr.as_ordered_terms
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_default_sort_key.txt 
 | 
	def default_sort_key(item, order=None):
    """Return a key that can be used for sorting.
    The key has the structure:
    (class_key, (len(args), args), exponent.sort_key(), coefficient)
    This key is supplied by the sort_key routine of Basic objects when
    ``item`` is a Basic object or an object (other than a string) that
    sympifies to a Basic object. Otherwise, this function produces the
    key.
    The ``order`` argument is passed along to the sort_key routine and is
    used to determine how the terms *within* an expression are ordered.
    (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex',
    and reversed values of the same (e.g. 'rev-lex'). The default order
    value is None (which translates to 'lex').
    Examples
    ========
    >>> from sympy import S, I, default_sort_key, sin, cos, sqrt
    >>> from sympy.core.function import UndefinedFunction
    >>> from sympy.abc import x
    The following are equivalent ways of getting the key for an object:
    >>> x.sort_key() == default_sort_key(x)
    True
    Here are some examples of the key that is produced:
    >>> default_sort_key(UndefinedFunction('f'))
    ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'),
        (0, ()), (), 1), 1)
    >>> default_sort_key('1')
    ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1)
    >>> default_sort_key(S.One)
    ((1, 0, 'Number'), (0, ()), (), 1)
    >>> default_sort_key(2)
    ((1, 0, 'Number'), (0, ()), (), 2)
    While sort_key is a method only defined for SymPy objects,
    default_sort_key will accept anything as an argument so it is
    more robust as a sorting key. For the following, using key=
    lambda i: i.sort_key() would fail because 2 does not have a sort_key
    method; that's why default_sort_key is used. Note, that it also
    handles sympification of non-string items likes ints:
    >>> a = [2, I, -I]
    >>> sorted(a, key=default_sort_key)
    [2, -I, I]
    The returned key can be used anywhere that a key can be specified for
    a function, e.g. sort, min, max, etc...:
    >>> a.sort(key=default_sort_key); a[0]
    2
    >>> min(a, key=default_sort_key)
    2
    Notes
    =====
    The key returned is useful for getting items into a canonical order
    that will be the same across platforms. It is not directly useful for
    sorting lists of expressions:
    >>> a, b = x, 1/x
    Since ``a`` has only 1 term, its value of sort_key is unaffected by
    ``order``:
    >>> a.sort_key() == a.sort_key('rev-lex')
    True
    If ``a`` and ``b`` are combined then the key will differ because there
    are terms that can be ordered:
    >>> eq = a + b
    >>> eq.sort_key() == eq.sort_key('rev-lex')
    False
    >>> eq.as_ordered_terms()
    [x, 1/x]
    >>> eq.as_ordered_terms('rev-lex')
    [1/x, x]
    But since the keys for each of these terms are independent of ``order``'s
    value, they do not sort differently when they appear separately in a list:
    >>> sorted(eq.args, key=default_sort_key)
    [1/x, x]
    >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex'))
    [1/x, x]
    The order of terms obtained when using these keys is the order that would
    be obtained if those terms were *factors* in a product.
    Although it is useful for quickly putting expressions in canonical order,
    it does not sort expressions based on their complexity defined by the
    number of operations, power of variables and others:
    >>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key)
    [sin(x)*cos(x), sin(x)]
    >>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key)
    [sqrt(x), x, x**2, x**3]
    See Also
    ========
    ordered, sympy.core.expr.Expr.as_ordered_factors, sympy.core.expr.Expr.as_ordered_terms
    """
    from .basic import Basic
    from .singleton import S
    if isinstance(item, Basic):
        return item.sort_key(order=order)
    if iterable(item, exclude=str):
        if isinstance(item, dict):
            args = item.items()
            unordered = True
        elif isinstance(item, set):
            args = item
            unordered = True
        else:
            # e.g. tuple, list
            args = list(item)
            unordered = False
        args = [default_sort_key(arg, order=order) for arg in args]
        if unordered:
            # e.g. dict, set
            args = sorted(args)
        cls_index, args = 10, (len(args), tuple(args))
    else:
        if not isinstance(item, str):
            try:
                item = sympify(item, strict=True)
            except SympifyError:
                # e.g. lambda x: x
                pass
            else:
                if isinstance(item, Basic):
                    # e.g int -> Integer
                    return default_sort_key(item)
                # e.g. UndefinedFunction
        # e.g. str
        cls_index, args = 0, (1, (str(item),))
    return (cls_index, 0, item.__class__.__name__
            ), args, S.One.sort_key(), S.One
 
 | 
	default_sort_key 
 | 
	Repo-Level 
 | 
					
	sympy 
 | 
	47 
 | 
	sympy/solvers/diophantine/diophantine.py 
 | 
	def diophantine(eq, param=symbols("t", integer=True), syms=None,
                permute=False):
    """
    Simplify the solution procedure of diophantine equation ``eq`` by
    converting it into a product of terms which should equal zero.
    Explanation
    ===========
    For example, when solving, `x^2 - y^2 = 0` this is treated as
    `(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
    independently and combined. Each term is solved by calling
    ``diop_solve()``. (Although it is possible to call ``diop_solve()``
    directly, one must be careful to pass an equation in the correct
    form and to interpret the output correctly; ``diophantine()`` is
    the public-facing function to use in general.)
    Output of ``diophantine()`` is a set of tuples. The elements of the
    tuple are the solutions for each variable in the equation and
    are arranged according to the alphabetic ordering of the variables.
    e.g. For an equation with two variables, `a` and `b`, the first
    element of the tuple is the solution for `a` and the second for `b`.
    Usage
    =====
    ``diophantine(eq, t, syms)``: Solve the diophantine
    equation ``eq``.
    ``t`` is the optional parameter to be used by ``diop_solve()``.
    ``syms`` is an optional list of symbols which determines the
    order of the elements in the returned tuple.
    By default, only the base solution is returned. If ``permute`` is set to
    True then permutations of the base solution and/or permutations of the
    signs of the values will be returned when applicable.
    Details
    =======
    ``eq`` should be an expression which is assumed to be zero.
    ``t`` is the parameter to be used in the solution.
    Examples
    ========
    >>> from sympy import diophantine
    >>> from sympy.abc import a, b
    >>> eq = a**4 + b**4 - (2**4 + 3**4)
    >>> diophantine(eq)
    {(2, 3)}
    >>> diophantine(eq, permute=True)
    {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
    >>> from sympy.abc import x, y, z
    >>> diophantine(x**2 - y**2)
    {(t_0, -t_0), (t_0, t_0)}
    >>> diophantine(x*(2*x + 3*y - z))
    {(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
    >>> diophantine(x**2 + 3*x*y + 4*x)
    {(0, n1), (-3*t_0 - 4, t_0)}
    See Also
    ========
    diop_solve
    sympy.utilities.iterables.permute_signs
    sympy.utilities.iterables.signed_permutations
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_diophantine.txt 
 | 
	def diophantine(eq, param=symbols("t", integer=True), syms=None,
                permute=False):
    """
    Simplify the solution procedure of diophantine equation ``eq`` by
    converting it into a product of terms which should equal zero.
    Explanation
    ===========
    For example, when solving, `x^2 - y^2 = 0` this is treated as
    `(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
    independently and combined. Each term is solved by calling
    ``diop_solve()``. (Although it is possible to call ``diop_solve()``
    directly, one must be careful to pass an equation in the correct
    form and to interpret the output correctly; ``diophantine()`` is
    the public-facing function to use in general.)
    Output of ``diophantine()`` is a set of tuples. The elements of the
    tuple are the solutions for each variable in the equation and
    are arranged according to the alphabetic ordering of the variables.
    e.g. For an equation with two variables, `a` and `b`, the first
    element of the tuple is the solution for `a` and the second for `b`.
    Usage
    =====
    ``diophantine(eq, t, syms)``: Solve the diophantine
    equation ``eq``.
    ``t`` is the optional parameter to be used by ``diop_solve()``.
    ``syms`` is an optional list of symbols which determines the
    order of the elements in the returned tuple.
    By default, only the base solution is returned. If ``permute`` is set to
    True then permutations of the base solution and/or permutations of the
    signs of the values will be returned when applicable.
    Details
    =======
    ``eq`` should be an expression which is assumed to be zero.
    ``t`` is the parameter to be used in the solution.
    Examples
    ========
    >>> from sympy import diophantine
    >>> from sympy.abc import a, b
    >>> eq = a**4 + b**4 - (2**4 + 3**4)
    >>> diophantine(eq)
    {(2, 3)}
    >>> diophantine(eq, permute=True)
    {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
    >>> from sympy.abc import x, y, z
    >>> diophantine(x**2 - y**2)
    {(t_0, -t_0), (t_0, t_0)}
    >>> diophantine(x*(2*x + 3*y - z))
    {(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
    >>> diophantine(x**2 + 3*x*y + 4*x)
    {(0, n1), (-3*t_0 - 4, t_0)}
    See Also
    ========
    diop_solve
    sympy.utilities.iterables.permute_signs
    sympy.utilities.iterables.signed_permutations
    """
    eq = _sympify(eq)
    if isinstance(eq, Eq):
        eq = eq.lhs - eq.rhs
    try:
        var = list(eq.expand(force=True).free_symbols)
        var.sort(key=default_sort_key)
        if syms:
            if not is_sequence(syms):
                raise TypeError(
                    'syms should be given as a sequence, e.g. a list')
            syms = [i for i in syms if i in var]
            if syms != var:
                dict_sym_index = dict(zip(syms, range(len(syms))))
                return {tuple([t[dict_sym_index[i]] for i in var])
                            for t in diophantine(eq, param, permute=permute)}
        n, d = eq.as_numer_denom()
        if n.is_number:
            return set()
        if not d.is_number:
            dsol = diophantine(d)
            good = diophantine(n) - dsol
            return {s for s in good if _mexpand(d.subs(zip(var, s)))}
        eq = factor_terms(n)
        assert not eq.is_number
        eq = eq.as_independent(*var, as_Add=False)[1]
        p = Poly(eq)
        assert not any(g.is_number for g in p.gens)
        eq = p.as_expr()
        assert eq.is_polynomial()
    except (GeneratorsNeeded, AssertionError):
        raise TypeError(filldedent('''
    Equation should be a polynomial with Rational coefficients.'''))
    # permute only sign
    do_permute_signs = False
    # permute sign and values
    do_permute_signs_var = False
    # permute few signs
    permute_few_signs = False
    try:
        # if we know that factoring should not be attempted, skip
        # the factoring step
        v, c, t = classify_diop(eq)
        # check for permute sign
        if permute:
            len_var = len(v)
            permute_signs_for = [
                GeneralSumOfSquares.name,
                GeneralSumOfEvenPowers.name]
            permute_signs_check = [
                HomogeneousTernaryQuadratic.name,
                HomogeneousTernaryQuadraticNormal.name,
                BinaryQuadratic.name]
            if t in permute_signs_for:
                do_permute_signs_var = True
            elif t in permute_signs_check:
                # if all the variables in eq have even powers
                # then do_permute_sign = True
                if len_var == 3:
                    var_mul = list(subsets(v, 2))
                    # here var_mul is like [(x, y), (x, z), (y, z)]
                    xy_coeff = True
                    x_coeff = True
                    var1_mul_var2 = (a[0]*a[1] for a in var_mul)
                    # if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
                    # `xy_coeff` => True and do_permute_sign => False.
                    # Means no permuted solution.
                    for v1_mul_v2 in var1_mul_var2:
                        try:
                            coeff = c[v1_mul_v2]
                        except KeyError:
                            coeff = 0
                        xy_coeff = bool(xy_coeff) and bool(coeff)
                    var_mul = list(subsets(v, 1))
                    # here var_mul is like [(x,), (y, )]
                    for v1 in var_mul:
                        try:
                            coeff = c[v1[0]]
                        except KeyError:
                            coeff = 0
                        x_coeff = bool(x_coeff) and bool(coeff)
                    if not any((xy_coeff, x_coeff)):
                        # means only x**2, y**2, z**2, const is present
                        do_permute_signs = True
                    elif not x_coeff:
                        permute_few_signs = True
                elif len_var == 2:
                    var_mul = list(subsets(v, 2))
                    # here var_mul is like [(x, y)]
                    xy_coeff = True
                    x_coeff = True
                    var1_mul_var2 = (x[0]*x[1] for x in var_mul)
                    for v1_mul_v2 in var1_mul_var2:
                        try:
                            coeff = c[v1_mul_v2]
                        except KeyError:
                            coeff = 0
                        xy_coeff = bool(xy_coeff) and bool(coeff)
                    var_mul = list(subsets(v, 1))
                    # here var_mul is like [(x,), (y, )]
                    for v1 in var_mul:
                        try:
                            coeff = c[v1[0]]
                        except KeyError:
                            coeff = 0
                        x_coeff = bool(x_coeff) and bool(coeff)
                    if not any((xy_coeff, x_coeff)):
                        # means only x**2, y**2 and const is present
                        # so we can get more soln by permuting this soln.
                        do_permute_signs = True
                    elif not x_coeff:
                        # when coeff(x), coeff(y) is not present then signs of
                        #  x, y can be permuted such that their sign are same
                        # as sign of x*y.
                        # e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
                        # 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
                        permute_few_signs = True
        if t == 'general_sum_of_squares':
            # trying to factor such expressions will sometimes hang
            terms = [(eq, 1)]
        else:
            raise TypeError
    except (TypeError, NotImplementedError):
        fl = factor_list(eq)
        if fl[0].is_Rational and fl[0] != 1:
            return diophantine(eq/fl[0], param=param, syms=syms, permute=permute)
        terms = fl[1]
    sols = set()
    for term in terms:
        base, _ = term
        var_t, _, eq_type = classify_diop(base, _dict=False)
        _, base = signsimp(base, evaluate=False).as_coeff_Mul()
        solution = diop_solve(base, param)
        if eq_type in [
                Linear.name,
                HomogeneousTernaryQuadratic.name,
                HomogeneousTernaryQuadraticNormal.name,
                GeneralPythagorean.name]:
            sols.add(merge_solution(var, var_t, solution))
        elif eq_type in [
                BinaryQuadratic.name,
                GeneralSumOfSquares.name,
                GeneralSumOfEvenPowers.name,
                Univariate.name]:
            sols.update(merge_solution(var, var_t, sol) for sol in solution)
        else:
            raise NotImplementedError('unhandled type: %s' % eq_type)
    # remove null merge results
    if () in sols:
        sols.remove(())
    null = tuple([0]*len(var))
    # if there is no solution, return trivial solution
    if not sols and eq.subs(zip(var, null)).is_zero:
        if all(check_assumptions(val, **s.assumptions0) is not False for val, s in zip(null, var)):
            sols.add(null)
    final_soln = set()
    for sol in sols:
        if all(int_valued(s) for s in sol):
            if do_permute_signs:
                permuted_sign = set(permute_signs(sol))
                final_soln.update(permuted_sign)
            elif permute_few_signs:
                lst = list(permute_signs(sol))
                lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
                permuted_sign = set(lst)
                final_soln.update(permuted_sign)
            elif do_permute_signs_var:
                permuted_sign_var = set(signed_permutations(sol))
                final_soln.update(permuted_sign_var)
            else:
                final_soln.add(sol)
        else:
                final_soln.add(sol)
    return final_soln
 
 | 
	diophantine 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	49 
 | 
	sympy/ntheory/egyptian_fraction.py 
 | 
	def egyptian_fraction(r, algorithm="Greedy"):
    """
    Return the list of denominators of an Egyptian fraction
    expansion [1]_ of the said rational `r`.
    Parameters
    ==========
    r : Rational or (p, q)
        a positive rational number, ``p/q``.
    algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional
        Denotes the algorithm to be used (the default is "Greedy").
    Examples
    ========
    >>> from sympy import Rational
    >>> from sympy.ntheory.egyptian_fraction import egyptian_fraction
    >>> egyptian_fraction(Rational(3, 7))
    [3, 11, 231]
    >>> egyptian_fraction((3, 7), "Graham Jewett")
    [7, 8, 9, 56, 57, 72, 3192]
    >>> egyptian_fraction((3, 7), "Takenouchi")
    [4, 7, 28]
    >>> egyptian_fraction((3, 7), "Golomb")
    [3, 15, 35]
    >>> egyptian_fraction((11, 5), "Golomb")
    [1, 2, 3, 4, 9, 234, 1118, 2580]
    See Also
    ========
    sympy.core.numbers.Rational
    Notes
    =====
    Currently the following algorithms are supported:
    1) Greedy Algorithm
       Also called the Fibonacci-Sylvester algorithm [2]_.
       At each step, extract the largest unit fraction less
       than the target and replace the target with the remainder.
       It has some distinct properties:
       a) Given `p/q` in lowest terms, generates an expansion of maximum
          length `p`. Even as the numerators get large, the number of
          terms is seldom more than a handful.
       b) Uses minimal memory.
       c) The terms can blow up (standard examples of this are 5/121 and
          31/311).  The denominator is at most squared at each step
          (doubly-exponential growth) and typically exhibits
          singly-exponential growth.
    2) Graham Jewett Algorithm
       The algorithm suggested by the result of Graham and Jewett.
       Note that this has a tendency to blow up: the length of the
       resulting expansion is always ``2**(x/gcd(x, y)) - 1``.  See [3]_.
    3) Takenouchi Algorithm
       The algorithm suggested by Takenouchi (1921).
       Differs from the Graham-Jewett algorithm only in the handling
       of duplicates.  See [3]_.
    4) Golomb's Algorithm
       A method given by Golumb (1962), using modular arithmetic and
       inverses.  It yields the same results as a method using continued
       fractions proposed by Bleicher (1972).  See [4]_.
    If the given rational is greater than or equal to 1, a greedy algorithm
    of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking
    all the unit fractions of this sequence until adding one more would be
    greater than the given number.  This list of denominators is prefixed
    to the result from the requested algorithm used on the remainder.  For
    example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4,
    5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5,
    6, 7] is part of the harmonic sequence summing to 363/140, leaving a
    remainder of 31/420, which yields [14, 420] by the Greedy algorithm.
    The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3,
    4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on.
    References
    ==========
    .. [1] https://en.wikipedia.org/wiki/Egyptian_fraction
    .. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions
    .. [3] https://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html
    .. [4] https://web.archive.org/web/20180413004012/https://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_egyptian_fraction.txt 
 | 
	def egyptian_fraction(r, algorithm="Greedy"):
    """
    Return the list of denominators of an Egyptian fraction
    expansion [1]_ of the said rational `r`.
    Parameters
    ==========
    r : Rational or (p, q)
        a positive rational number, ``p/q``.
    algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional
        Denotes the algorithm to be used (the default is "Greedy").
    Examples
    ========
    >>> from sympy import Rational
    >>> from sympy.ntheory.egyptian_fraction import egyptian_fraction
    >>> egyptian_fraction(Rational(3, 7))
    [3, 11, 231]
    >>> egyptian_fraction((3, 7), "Graham Jewett")
    [7, 8, 9, 56, 57, 72, 3192]
    >>> egyptian_fraction((3, 7), "Takenouchi")
    [4, 7, 28]
    >>> egyptian_fraction((3, 7), "Golomb")
    [3, 15, 35]
    >>> egyptian_fraction((11, 5), "Golomb")
    [1, 2, 3, 4, 9, 234, 1118, 2580]
    See Also
    ========
    sympy.core.numbers.Rational
    Notes
    =====
    Currently the following algorithms are supported:
    1) Greedy Algorithm
       Also called the Fibonacci-Sylvester algorithm [2]_.
       At each step, extract the largest unit fraction less
       than the target and replace the target with the remainder.
       It has some distinct properties:
       a) Given `p/q` in lowest terms, generates an expansion of maximum
          length `p`. Even as the numerators get large, the number of
          terms is seldom more than a handful.
       b) Uses minimal memory.
       c) The terms can blow up (standard examples of this are 5/121 and
          31/311).  The denominator is at most squared at each step
          (doubly-exponential growth) and typically exhibits
          singly-exponential growth.
    2) Graham Jewett Algorithm
       The algorithm suggested by the result of Graham and Jewett.
       Note that this has a tendency to blow up: the length of the
       resulting expansion is always ``2**(x/gcd(x, y)) - 1``.  See [3]_.
    3) Takenouchi Algorithm
       The algorithm suggested by Takenouchi (1921).
       Differs from the Graham-Jewett algorithm only in the handling
       of duplicates.  See [3]_.
    4) Golomb's Algorithm
       A method given by Golumb (1962), using modular arithmetic and
       inverses.  It yields the same results as a method using continued
       fractions proposed by Bleicher (1972).  See [4]_.
    If the given rational is greater than or equal to 1, a greedy algorithm
    of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking
    all the unit fractions of this sequence until adding one more would be
    greater than the given number.  This list of denominators is prefixed
    to the result from the requested algorithm used on the remainder.  For
    example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4,
    5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5,
    6, 7] is part of the harmonic sequence summing to 363/140, leaving a
    remainder of 31/420, which yields [14, 420] by the Greedy algorithm.
    The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3,
    4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on.
    References
    ==========
    .. [1] https://en.wikipedia.org/wiki/Egyptian_fraction
    .. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions
    .. [3] https://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html
    .. [4] https://web.archive.org/web/20180413004012/https://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf
    """
    if not isinstance(r, Rational):
        if isinstance(r, (Tuple, tuple)) and len(r) == 2:
            r = Rational(*r)
        else:
            raise ValueError("Value must be a Rational or tuple of ints")
    if r <= 0:
        raise ValueError("Value must be positive")
    # common cases that all methods agree on
    x, y = r.as_numer_denom()
    if y == 1 and x == 2:
        return [Integer(i) for i in [1, 2, 3, 6]]
    if x == y + 1:
        return [S.One, y]
    prefix, rem = egypt_harmonic(r)
    if rem == 0:
        return prefix
    # work in Python ints
    x, y = rem.p, rem.q
    # assert x < y and gcd(x, y) = 1
    if algorithm == "Greedy":
        postfix = egypt_greedy(x, y)
    elif algorithm == "Graham Jewett":
        postfix = egypt_graham_jewett(x, y)
    elif algorithm == "Takenouchi":
        postfix = egypt_takenouchi(x, y)
    elif algorithm == "Golomb":
        postfix = egypt_golomb(x, y)
    else:
        raise ValueError("Entered invalid algorithm")
    return prefix + [Integer(i) for i in postfix]
 
 | 
	egyptian_fraction 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	52 
 | 
	sympy/physics/secondquant.py 
 | 
	def evaluate_deltas(e):
    """
    We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
    Explanation
    ===========
    If one index is repeated it is summed over and in effect substituted with
    the other one. If both indices are repeated we substitute according to what
    is the preferred index.  this is determined by
    KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
    In case there are no possible substitutions or if a substitution would
    imply a loss of information, nothing is done.
    In case an index appears in more than one KroneckerDelta, the resulting
    substitution depends on the order of the factors.  Since the ordering is platform
    dependent, the literal expression resulting from this function may be hard to
    predict.
    Examples
    ========
    We assume the following:
    >>> from sympy import symbols, Function, Dummy, KroneckerDelta
    >>> from sympy.physics.secondquant import evaluate_deltas
    >>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
    >>> a,b = symbols('a b', above_fermi=True, cls=Dummy)
    >>> p,q = symbols('p q', cls=Dummy)
    >>> f = Function('f')
    >>> t = Function('t')
    The order of preference for these indices according to KroneckerDelta is
    (a, b, i, j, p, q).
    Trivial cases:
    >>> evaluate_deltas(KroneckerDelta(i,j)*f(i))       # d_ij f(i) -> f(j)
    f(_j)
    >>> evaluate_deltas(KroneckerDelta(i,j)*f(j))       # d_ij f(j) -> f(i)
    f(_i)
    >>> evaluate_deltas(KroneckerDelta(i,p)*f(p))       # d_ip f(p) -> f(i)
    f(_i)
    >>> evaluate_deltas(KroneckerDelta(q,p)*f(p))       # d_qp f(p) -> f(q)
    f(_q)
    >>> evaluate_deltas(KroneckerDelta(q,p)*f(q))       # d_qp f(q) -> f(p)
    f(_p)
    More interesting cases:
    >>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
    f(_i, _q)*t(_a, _i)
    >>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
    f(_a, _q)*t(_a, _i)
    >>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
    f(_p, _p)
    Finally, here are some cases where nothing is done, because that would
    imply a loss of information:
    >>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
    f(_q)*KroneckerDelta(_i, _p)
    >>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
    f(_i)*KroneckerDelta(_i, _p)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_evaluate_deltas.txt 
 | 
	def evaluate_deltas(e):
    """
    We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
    Explanation
    ===========
    If one index is repeated it is summed over and in effect substituted with
    the other one. If both indices are repeated we substitute according to what
    is the preferred index.  this is determined by
    KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
    In case there are no possible substitutions or if a substitution would
    imply a loss of information, nothing is done.
    In case an index appears in more than one KroneckerDelta, the resulting
    substitution depends on the order of the factors.  Since the ordering is platform
    dependent, the literal expression resulting from this function may be hard to
    predict.
    Examples
    ========
    We assume the following:
    >>> from sympy import symbols, Function, Dummy, KroneckerDelta
    >>> from sympy.physics.secondquant import evaluate_deltas
    >>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
    >>> a,b = symbols('a b', above_fermi=True, cls=Dummy)
    >>> p,q = symbols('p q', cls=Dummy)
    >>> f = Function('f')
    >>> t = Function('t')
    The order of preference for these indices according to KroneckerDelta is
    (a, b, i, j, p, q).
    Trivial cases:
    >>> evaluate_deltas(KroneckerDelta(i,j)*f(i))       # d_ij f(i) -> f(j)
    f(_j)
    >>> evaluate_deltas(KroneckerDelta(i,j)*f(j))       # d_ij f(j) -> f(i)
    f(_i)
    >>> evaluate_deltas(KroneckerDelta(i,p)*f(p))       # d_ip f(p) -> f(i)
    f(_i)
    >>> evaluate_deltas(KroneckerDelta(q,p)*f(p))       # d_qp f(p) -> f(q)
    f(_q)
    >>> evaluate_deltas(KroneckerDelta(q,p)*f(q))       # d_qp f(q) -> f(p)
    f(_p)
    More interesting cases:
    >>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
    f(_i, _q)*t(_a, _i)
    >>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
    f(_a, _q)*t(_a, _i)
    >>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
    f(_p, _p)
    Finally, here are some cases where nothing is done, because that would
    imply a loss of information:
    >>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
    f(_q)*KroneckerDelta(_i, _p)
    >>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
    f(_i)*KroneckerDelta(_i, _p)
    """
    # We treat Deltas only in mul objects
    # for general function objects we don't evaluate KroneckerDeltas in arguments,
    # but here we hard code exceptions to this rule
    accepted_functions = (
        Add,
    )
    if isinstance(e, accepted_functions):
        return e.func(*[evaluate_deltas(arg) for arg in e.args])
    elif isinstance(e, Mul):
        # find all occurrences of delta function and count each index present in
        # expression.
        deltas = []
        indices = {}
        for i in e.args:
            for s in i.free_symbols:
                if s in indices:
                    indices[s] += 1
                else:
                    indices[s] = 0  # geek counting simplifies logic below
            if isinstance(i, KroneckerDelta):
                deltas.append(i)
        for d in deltas:
            # If we do something, and there are more deltas, we should recurse
            # to treat the resulting expression properly
            if d.killable_index.is_Symbol and indices[d.killable_index]:
                e = e.subs(d.killable_index, d.preferred_index)
                if len(deltas) > 1:
                    return evaluate_deltas(e)
            elif (d.preferred_index.is_Symbol and indices[d.preferred_index]
                  and d.indices_contain_equal_information):
                e = e.subs(d.preferred_index, d.killable_index)
                if len(deltas) > 1:
                    return evaluate_deltas(e)
            else:
                pass
        return e
    # nothing to do, maybe we hit a Symbol or a number
    else:
        return e
 
 | 
	evaluate_deltas 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	55 
 | 
	sympy/calculus/finite_diff.py 
 | 
	def finite_diff_weights(order, x_list, x0=S.One):
    """
    Calculates the finite difference weights for an arbitrarily spaced
    one-dimensional grid (``x_list``) for derivatives at ``x0`` of order
    0, 1, ..., up to ``order`` using a recursive formula. Order of accuracy
    is at least ``len(x_list) - order``, if ``x_list`` is defined correctly.
    Parameters
    ==========
    order: int
        Up to what derivative order weights should be calculated.
        0 corresponds to interpolation.
    x_list: sequence
        Sequence of (unique) values for the independent variable.
        It is useful (but not necessary) to order ``x_list`` from
        nearest to furthest from ``x0``; see examples below.
    x0: Number or Symbol
        Root or value of the independent variable for which the finite
        difference weights should be generated. Default is ``S.One``.
    Returns
    =======
    list
        A list of sublists, each corresponding to coefficients for
        increasing derivative order, and each containing lists of
        coefficients for increasing subsets of x_list.
    Examples
    ========
    >>> from sympy import finite_diff_weights, S
    >>> res = finite_diff_weights(1, [-S(1)/2, S(1)/2, S(3)/2, S(5)/2], 0)
    >>> res
    [[[1, 0, 0, 0],
      [1/2, 1/2, 0, 0],
      [3/8, 3/4, -1/8, 0],
      [5/16, 15/16, -5/16, 1/16]],
     [[0, 0, 0, 0],
      [-1, 1, 0, 0],
      [-1, 1, 0, 0],
      [-23/24, 7/8, 1/8, -1/24]]]
    >>> res[0][-1]  # FD weights for 0th derivative, using full x_list
    [5/16, 15/16, -5/16, 1/16]
    >>> res[1][-1]  # FD weights for 1st derivative
    [-23/24, 7/8, 1/8, -1/24]
    >>> res[1][-2]  # FD weights for 1st derivative, using x_list[:-1]
    [-1, 1, 0, 0]
    >>> res[1][-1][0]  # FD weight for 1st deriv. for x_list[0]
    -23/24
    >>> res[1][-1][1]  # FD weight for 1st deriv. for x_list[1], etc.
    7/8
    Each sublist contains the most accurate formula at the end.
    Note, that in the above example ``res[1][1]`` is the same as ``res[1][2]``.
    Since res[1][2] has an order of accuracy of
    ``len(x_list[:3]) - order = 3 - 1 = 2``, the same is true for ``res[1][1]``!
    >>> res = finite_diff_weights(1, [S(0), S(1), -S(1), S(2), -S(2)], 0)[1]
    >>> res
    [[0, 0, 0, 0, 0],
     [-1, 1, 0, 0, 0],
     [0, 1/2, -1/2, 0, 0],
     [-1/2, 1, -1/3, -1/6, 0],
     [0, 2/3, -2/3, -1/12, 1/12]]
    >>> res[0]  # no approximation possible, using x_list[0] only
    [0, 0, 0, 0, 0]
    >>> res[1]  # classic forward step approximation
    [-1, 1, 0, 0, 0]
    >>> res[2]  # classic centered approximation
    [0, 1/2, -1/2, 0, 0]
    >>> res[3:]  # higher order approximations
    [[-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]]
    Let us compare this to a differently defined ``x_list``. Pay attention to
    ``foo[i][k]`` corresponding to the gridpoint defined by ``x_list[k]``.
    >>> foo = finite_diff_weights(1, [-S(2), -S(1), S(0), S(1), S(2)], 0)[1]
    >>> foo
    [[0, 0, 0, 0, 0],
     [-1, 1, 0, 0, 0],
     [1/2, -2, 3/2, 0, 0],
     [1/6, -1, 1/2, 1/3, 0],
     [1/12, -2/3, 0, 2/3, -1/12]]
    >>> foo[1]  # not the same and of lower accuracy as res[1]!
    [-1, 1, 0, 0, 0]
    >>> foo[2]  # classic double backward step approximation
    [1/2, -2, 3/2, 0, 0]
    >>> foo[4]  # the same as res[4]
    [1/12, -2/3, 0, 2/3, -1/12]
    Note that, unless you plan on using approximations based on subsets of
    ``x_list``, the order of gridpoints does not matter.
    The capability to generate weights at arbitrary points can be
    used e.g. to minimize Runge's phenomenon by using Chebyshev nodes:
    >>> from sympy import cos, symbols, pi, simplify
    >>> N, (h, x) = 4, symbols('h x')
    >>> x_list = [x+h*cos(i*pi/(N)) for i in range(N,-1,-1)] # chebyshev nodes
    >>> print(x_list)
    [-h + x, -sqrt(2)*h/2 + x, x, sqrt(2)*h/2 + x, h + x]
    >>> mycoeffs = finite_diff_weights(1, x_list, 0)[1][4]
    >>> [simplify(c) for c in  mycoeffs] #doctest: +NORMALIZE_WHITESPACE
    [(h**3/2 + h**2*x - 3*h*x**2 - 4*x**3)/h**4,
    (-sqrt(2)*h**3 - 4*h**2*x + 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
    (6*h**2*x - 8*x**3)/h**4,
    (sqrt(2)*h**3 - 4*h**2*x - 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
    (-h**3/2 + h**2*x + 3*h*x**2 - 4*x**3)/h**4]
    Notes
    =====
    If weights for a finite difference approximation of 3rd order
    derivative is wanted, weights for 0th, 1st and 2nd order are
    calculated "for free", so are formulae using subsets of ``x_list``.
    This is something one can take advantage of to save computational cost.
    Be aware that one should define ``x_list`` from nearest to furthest from
    ``x0``. If not, subsets of ``x_list`` will yield poorer approximations,
    which might not grand an order of accuracy of ``len(x_list) - order``.
    See also
    ========
    sympy.calculus.finite_diff.apply_finite_diff
    References
    ==========
    .. [1] Generation of Finite Difference Formulas on Arbitrarily Spaced
            Grids, Bengt Fornberg; Mathematics of computation; 51; 184;
            (1988); 699-706; doi:10.1090/S0025-5718-1988-0935077-0
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_finite_diff_weights.txt 
 | 
	def finite_diff_weights(order, x_list, x0=S.One):
    """
    Calculates the finite difference weights for an arbitrarily spaced
    one-dimensional grid (``x_list``) for derivatives at ``x0`` of order
    0, 1, ..., up to ``order`` using a recursive formula. Order of accuracy
    is at least ``len(x_list) - order``, if ``x_list`` is defined correctly.
    Parameters
    ==========
    order: int
        Up to what derivative order weights should be calculated.
        0 corresponds to interpolation.
    x_list: sequence
        Sequence of (unique) values for the independent variable.
        It is useful (but not necessary) to order ``x_list`` from
        nearest to furthest from ``x0``; see examples below.
    x0: Number or Symbol
        Root or value of the independent variable for which the finite
        difference weights should be generated. Default is ``S.One``.
    Returns
    =======
    list
        A list of sublists, each corresponding to coefficients for
        increasing derivative order, and each containing lists of
        coefficients for increasing subsets of x_list.
    Examples
    ========
    >>> from sympy import finite_diff_weights, S
    >>> res = finite_diff_weights(1, [-S(1)/2, S(1)/2, S(3)/2, S(5)/2], 0)
    >>> res
    [[[1, 0, 0, 0],
      [1/2, 1/2, 0, 0],
      [3/8, 3/4, -1/8, 0],
      [5/16, 15/16, -5/16, 1/16]],
     [[0, 0, 0, 0],
      [-1, 1, 0, 0],
      [-1, 1, 0, 0],
      [-23/24, 7/8, 1/8, -1/24]]]
    >>> res[0][-1]  # FD weights for 0th derivative, using full x_list
    [5/16, 15/16, -5/16, 1/16]
    >>> res[1][-1]  # FD weights for 1st derivative
    [-23/24, 7/8, 1/8, -1/24]
    >>> res[1][-2]  # FD weights for 1st derivative, using x_list[:-1]
    [-1, 1, 0, 0]
    >>> res[1][-1][0]  # FD weight for 1st deriv. for x_list[0]
    -23/24
    >>> res[1][-1][1]  # FD weight for 1st deriv. for x_list[1], etc.
    7/8
    Each sublist contains the most accurate formula at the end.
    Note, that in the above example ``res[1][1]`` is the same as ``res[1][2]``.
    Since res[1][2] has an order of accuracy of
    ``len(x_list[:3]) - order = 3 - 1 = 2``, the same is true for ``res[1][1]``!
    >>> res = finite_diff_weights(1, [S(0), S(1), -S(1), S(2), -S(2)], 0)[1]
    >>> res
    [[0, 0, 0, 0, 0],
     [-1, 1, 0, 0, 0],
     [0, 1/2, -1/2, 0, 0],
     [-1/2, 1, -1/3, -1/6, 0],
     [0, 2/3, -2/3, -1/12, 1/12]]
    >>> res[0]  # no approximation possible, using x_list[0] only
    [0, 0, 0, 0, 0]
    >>> res[1]  # classic forward step approximation
    [-1, 1, 0, 0, 0]
    >>> res[2]  # classic centered approximation
    [0, 1/2, -1/2, 0, 0]
    >>> res[3:]  # higher order approximations
    [[-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]]
    Let us compare this to a differently defined ``x_list``. Pay attention to
    ``foo[i][k]`` corresponding to the gridpoint defined by ``x_list[k]``.
    >>> foo = finite_diff_weights(1, [-S(2), -S(1), S(0), S(1), S(2)], 0)[1]
    >>> foo
    [[0, 0, 0, 0, 0],
     [-1, 1, 0, 0, 0],
     [1/2, -2, 3/2, 0, 0],
     [1/6, -1, 1/2, 1/3, 0],
     [1/12, -2/3, 0, 2/3, -1/12]]
    >>> foo[1]  # not the same and of lower accuracy as res[1]!
    [-1, 1, 0, 0, 0]
    >>> foo[2]  # classic double backward step approximation
    [1/2, -2, 3/2, 0, 0]
    >>> foo[4]  # the same as res[4]
    [1/12, -2/3, 0, 2/3, -1/12]
    Note that, unless you plan on using approximations based on subsets of
    ``x_list``, the order of gridpoints does not matter.
    The capability to generate weights at arbitrary points can be
    used e.g. to minimize Runge's phenomenon by using Chebyshev nodes:
    >>> from sympy import cos, symbols, pi, simplify
    >>> N, (h, x) = 4, symbols('h x')
    >>> x_list = [x+h*cos(i*pi/(N)) for i in range(N,-1,-1)] # chebyshev nodes
    >>> print(x_list)
    [-h + x, -sqrt(2)*h/2 + x, x, sqrt(2)*h/2 + x, h + x]
    >>> mycoeffs = finite_diff_weights(1, x_list, 0)[1][4]
    >>> [simplify(c) for c in  mycoeffs] #doctest: +NORMALIZE_WHITESPACE
    [(h**3/2 + h**2*x - 3*h*x**2 - 4*x**3)/h**4,
    (-sqrt(2)*h**3 - 4*h**2*x + 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
    (6*h**2*x - 8*x**3)/h**4,
    (sqrt(2)*h**3 - 4*h**2*x - 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
    (-h**3/2 + h**2*x + 3*h*x**2 - 4*x**3)/h**4]
    Notes
    =====
    If weights for a finite difference approximation of 3rd order
    derivative is wanted, weights for 0th, 1st and 2nd order are
    calculated "for free", so are formulae using subsets of ``x_list``.
    This is something one can take advantage of to save computational cost.
    Be aware that one should define ``x_list`` from nearest to furthest from
    ``x0``. If not, subsets of ``x_list`` will yield poorer approximations,
    which might not grand an order of accuracy of ``len(x_list) - order``.
    See also
    ========
    sympy.calculus.finite_diff.apply_finite_diff
    References
    ==========
    .. [1] Generation of Finite Difference Formulas on Arbitrarily Spaced
            Grids, Bengt Fornberg; Mathematics of computation; 51; 184;
            (1988); 699-706; doi:10.1090/S0025-5718-1988-0935077-0
    """
    # The notation below closely corresponds to the one used in the paper.
    order = S(order)
    if not order.is_number:
        raise ValueError("Cannot handle symbolic order.")
    if order < 0:
        raise ValueError("Negative derivative order illegal.")
    if int(order) != order:
        raise ValueError("Non-integer order illegal")
    M = order
    N = len(x_list) - 1
    delta = [[[0 for nu in range(N+1)] for n in range(N+1)] for
             m in range(M+1)]
    delta[0][0][0] = S.One
    c1 = S.One
    for n in range(1, N+1):
        c2 = S.One
        for nu in range(n):
            c3 = x_list[n] - x_list[nu]
            c2 = c2 * c3
            if n <= M:
                delta[n][n-1][nu] = 0
            for m in range(min(n, M)+1):
                delta[m][n][nu] = (x_list[n]-x0)*delta[m][n-1][nu] -\
                    m*delta[m-1][n-1][nu]
                delta[m][n][nu] /= c3
        for m in range(min(n, M)+1):
            delta[m][n][n] = c1/c2*(m*delta[m-1][n-1][n-1] -
                                    (x_list[n-1]-x0)*delta[m][n-1][n-1])
        c1 = c2
    return delta
 
 | 
	finite_diff_weights 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	58 
 | 
	sympy/utilities/iterables.py 
 | 
	def generate_bell(n):
    """Return permutations of [0, 1, ..., n - 1] such that each permutation
    differs from the last by the exchange of a single pair of neighbors.
    The ``n!`` permutations are returned as an iterator. In order to obtain
    the next permutation from a random starting permutation, use the
    ``next_trotterjohnson`` method of the Permutation class (which generates
    the same sequence in a different manner).
    Examples
    ========
    >>> from itertools import permutations
    >>> from sympy.utilities.iterables import generate_bell
    >>> from sympy import zeros, Matrix
    This is the sort of permutation used in the ringing of physical bells,
    and does not produce permutations in lexicographical order. Rather, the
    permutations differ from each other by exactly one inversion, and the
    position at which the swapping occurs varies periodically in a simple
    fashion. Consider the first few permutations of 4 elements generated
    by ``permutations`` and ``generate_bell``:
    >>> list(permutations(range(4)))[:5]
    [(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)]
    >>> list(generate_bell(4))[:5]
    [(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)]
    Notice how the 2nd and 3rd lexicographical permutations have 3 elements
    out of place whereas each "bell" permutation always has only two
    elements out of place relative to the previous permutation (and so the
    signature (+/-1) of a permutation is opposite of the signature of the
    previous permutation).
    How the position of inversion varies across the elements can be seen
    by tracing out where the largest number appears in the permutations:
    >>> m = zeros(4, 24)
    >>> for i, p in enumerate(generate_bell(4)):
    ...     m[:, i] = Matrix([j - 3 for j in list(p)])  # make largest zero
    >>> m.print_nonzero('X')
    [XXX  XXXXXX  XXXXXX  XXX]
    [XX XX XXXX XX XXXX XX XX]
    [X XXXX XX XXXX XX XXXX X]
    [ XXXXXX  XXXXXX  XXXXXX ]
    See Also
    ========
    sympy.combinatorics.permutations.Permutation.next_trotterjohnson
    References
    ==========
    .. [1] https://en.wikipedia.org/wiki/Method_ringing
    .. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018
    .. [3] https://web.archive.org/web/20160313023044/http://programminggeeks.com/bell-algorithm-for-permutation/
    .. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm
    .. [5] Generating involutions, derangements, and relatives by ECO
           Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_generate_bell.txt 
 | 
	def generate_bell(n):
    """Return permutations of [0, 1, ..., n - 1] such that each permutation
    differs from the last by the exchange of a single pair of neighbors.
    The ``n!`` permutations are returned as an iterator. In order to obtain
    the next permutation from a random starting permutation, use the
    ``next_trotterjohnson`` method of the Permutation class (which generates
    the same sequence in a different manner).
    Examples
    ========
    >>> from itertools import permutations
    >>> from sympy.utilities.iterables import generate_bell
    >>> from sympy import zeros, Matrix
    This is the sort of permutation used in the ringing of physical bells,
    and does not produce permutations in lexicographical order. Rather, the
    permutations differ from each other by exactly one inversion, and the
    position at which the swapping occurs varies periodically in a simple
    fashion. Consider the first few permutations of 4 elements generated
    by ``permutations`` and ``generate_bell``:
    >>> list(permutations(range(4)))[:5]
    [(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)]
    >>> list(generate_bell(4))[:5]
    [(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)]
    Notice how the 2nd and 3rd lexicographical permutations have 3 elements
    out of place whereas each "bell" permutation always has only two
    elements out of place relative to the previous permutation (and so the
    signature (+/-1) of a permutation is opposite of the signature of the
    previous permutation).
    How the position of inversion varies across the elements can be seen
    by tracing out where the largest number appears in the permutations:
    >>> m = zeros(4, 24)
    >>> for i, p in enumerate(generate_bell(4)):
    ...     m[:, i] = Matrix([j - 3 for j in list(p)])  # make largest zero
    >>> m.print_nonzero('X')
    [XXX  XXXXXX  XXXXXX  XXX]
    [XX XX XXXX XX XXXX XX XX]
    [X XXXX XX XXXX XX XXXX X]
    [ XXXXXX  XXXXXX  XXXXXX ]
    See Also
    ========
    sympy.combinatorics.permutations.Permutation.next_trotterjohnson
    References
    ==========
    .. [1] https://en.wikipedia.org/wiki/Method_ringing
    .. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018
    .. [3] https://web.archive.org/web/20160313023044/http://programminggeeks.com/bell-algorithm-for-permutation/
    .. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm
    .. [5] Generating involutions, derangements, and relatives by ECO
           Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010
    """
    n = as_int(n)
    if n < 1:
        raise ValueError('n must be a positive integer')
    if n == 1:
        yield (0,)
    elif n == 2:
        yield (0, 1)
        yield (1, 0)
    elif n == 3:
        yield from [(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)]
    else:
        m = n - 1
        op = [0] + [-1]*m
        l = list(range(n))
        while True:
            yield tuple(l)
            # find biggest element with op
            big = None, -1  # idx, value
            for i in range(n):
                if op[i] and l[i] > big[1]:
                    big = i, l[i]
            i, _ = big
            if i is None:
                break  # there are no ops left
            # swap it with neighbor in the indicated direction
            j = i + op[i]
            l[i], l[j] = l[j], l[i]
            op[i], op[j] = op[j], op[i]
            # if it landed at the end or if the neighbor in the same
            # direction is bigger then turn off op
            if j == 0 or j == m or l[j + op[j]] > l[j]:
                op[j] = 0
            # any element bigger to the left gets +1 op
            for i in range(j):
                if l[i] > l[j]:
                    op[i] = 1
            # any element bigger to the right gets -1 op
            for i in range(j + 1, n):
                if l[i] > l[j]:
                    op[i] = -1
 
 | 
	generate_bell 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	59 
 | 
	sympy/physics/quantum/identitysearch.py 
 | 
	def generate_gate_rules(gate_seq, return_as_muls=False):
    """Returns a set of gate rules.  Each gate rules is represented
    as a 2-tuple of tuples or Muls.  An empty tuple represents an arbitrary
    scalar value.
    This function uses the four operations (LL, LR, RL, RR)
    to generate the gate rules.
    A gate rule is an expression such as ABC = D or AB = CD, where
    A, B, C, and D are gates.  Each value on either side of the
    equal sign represents a circuit.  The four operations allow
    one to find a set of equivalent circuits from a gate identity.
    The letters denoting the operation tell the user what
    activities to perform on each expression.  The first letter
    indicates which side of the equal sign to focus on.  The
    second letter indicates which gate to focus on given the
    side.  Once this information is determined, the inverse
    of the gate is multiplied on both circuits to create a new
    gate rule.
    For example, given the identity, ABCD = 1, a LL operation
    means look at the left value and multiply both left sides by the
    inverse of the leftmost gate A.  If A is Hermitian, the inverse
    of A is still A.  The resulting new rule is BCD = A.
    The following is a summary of the four operations.  Assume
    that in the examples, all gates are Hermitian.
        LL : left circuit, left multiply
             ABCD = E -> AABCD = AE -> BCD = AE
        LR : left circuit, right multiply
             ABCD = E -> ABCDD = ED -> ABC = ED
        RL : right circuit, left multiply
             ABC = ED -> EABC = EED -> EABC = D
        RR : right circuit, right multiply
             AB = CD -> ABD = CDD -> ABD = C
    The number of gate rules generated is n*(n+1), where n
    is the number of gates in the sequence (unproven).
    Parameters
    ==========
    gate_seq : Gate tuple, Mul, or Number
        A variable length tuple or Mul of Gates whose product is equal to
        a scalar matrix
    return_as_muls : bool
        True to return a set of Muls; False to return a set of tuples
    Examples
    ========
    Find the gate rules of the current circuit using tuples:
    >>> from sympy.physics.quantum.identitysearch import generate_gate_rules
    >>> from sympy.physics.quantum.gate import X, Y, Z
    >>> x = X(0); y = Y(0); z = Z(0)
    >>> generate_gate_rules((x, x))
    {((X(0),), (X(0),)), ((X(0), X(0)), ())}
    >>> generate_gate_rules((x, y, z))
    {((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))),
     ((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))),
     ((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))),
     ((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)),
     ((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()),
     ((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())}
    Find the gate rules of the current circuit using Muls:
    >>> generate_gate_rules(x*x, return_as_muls=True)
    {(1, 1)}
    >>> generate_gate_rules(x*y*z, return_as_muls=True)
    {(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)),
     (1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)),
     (Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)),
     (X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1),
     (Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)),
     (Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))}
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_generate_gate_rules.txt 
 | 
	def generate_gate_rules(gate_seq, return_as_muls=False):
    """Returns a set of gate rules.  Each gate rules is represented
    as a 2-tuple of tuples or Muls.  An empty tuple represents an arbitrary
    scalar value.
    This function uses the four operations (LL, LR, RL, RR)
    to generate the gate rules.
    A gate rule is an expression such as ABC = D or AB = CD, where
    A, B, C, and D are gates.  Each value on either side of the
    equal sign represents a circuit.  The four operations allow
    one to find a set of equivalent circuits from a gate identity.
    The letters denoting the operation tell the user what
    activities to perform on each expression.  The first letter
    indicates which side of the equal sign to focus on.  The
    second letter indicates which gate to focus on given the
    side.  Once this information is determined, the inverse
    of the gate is multiplied on both circuits to create a new
    gate rule.
    For example, given the identity, ABCD = 1, a LL operation
    means look at the left value and multiply both left sides by the
    inverse of the leftmost gate A.  If A is Hermitian, the inverse
    of A is still A.  The resulting new rule is BCD = A.
    The following is a summary of the four operations.  Assume
    that in the examples, all gates are Hermitian.
        LL : left circuit, left multiply
             ABCD = E -> AABCD = AE -> BCD = AE
        LR : left circuit, right multiply
             ABCD = E -> ABCDD = ED -> ABC = ED
        RL : right circuit, left multiply
             ABC = ED -> EABC = EED -> EABC = D
        RR : right circuit, right multiply
             AB = CD -> ABD = CDD -> ABD = C
    The number of gate rules generated is n*(n+1), where n
    is the number of gates in the sequence (unproven).
    Parameters
    ==========
    gate_seq : Gate tuple, Mul, or Number
        A variable length tuple or Mul of Gates whose product is equal to
        a scalar matrix
    return_as_muls : bool
        True to return a set of Muls; False to return a set of tuples
    Examples
    ========
    Find the gate rules of the current circuit using tuples:
    >>> from sympy.physics.quantum.identitysearch import generate_gate_rules
    >>> from sympy.physics.quantum.gate import X, Y, Z
    >>> x = X(0); y = Y(0); z = Z(0)
    >>> generate_gate_rules((x, x))
    {((X(0),), (X(0),)), ((X(0), X(0)), ())}
    >>> generate_gate_rules((x, y, z))
    {((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))),
     ((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))),
     ((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))),
     ((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)),
     ((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()),
     ((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())}
    Find the gate rules of the current circuit using Muls:
    >>> generate_gate_rules(x*x, return_as_muls=True)
    {(1, 1)}
    >>> generate_gate_rules(x*y*z, return_as_muls=True)
    {(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)),
     (1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)),
     (Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)),
     (X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1),
     (Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)),
     (Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))}
    """
    if isinstance(gate_seq, Number):
        if return_as_muls:
            return {(S.One, S.One)}
        else:
            return {((), ())}
    elif isinstance(gate_seq, Mul):
        gate_seq = gate_seq.args
    # Each item in queue is a 3-tuple:
    #     i)   first item is the left side of an equality
    #    ii)   second item is the right side of an equality
    #   iii)   third item is the number of operations performed
    # The argument, gate_seq, will start on the left side, and
    # the right side will be empty, implying the presence of an
    # identity.
    queue = deque()
    # A set of gate rules
    rules = set()
    # Maximum number of operations to perform
    max_ops = len(gate_seq)
    def process_new_rule(new_rule, ops):
        if new_rule is not None:
            new_left, new_right = new_rule
            if new_rule not in rules and (new_right, new_left) not in rules:
                rules.add(new_rule)
            # If haven't reached the max limit on operations
            if ops + 1 < max_ops:
                queue.append(new_rule + (ops + 1,))
    queue.append((gate_seq, (), 0))
    rules.add((gate_seq, ()))
    while len(queue) > 0:
        left, right, ops = queue.popleft()
        # Do a LL
        new_rule = ll_op(left, right)
        process_new_rule(new_rule, ops)
        # Do a LR
        new_rule = lr_op(left, right)
        process_new_rule(new_rule, ops)
        # Do a RL
        new_rule = rl_op(left, right)
        process_new_rule(new_rule, ops)
        # Do a RR
        new_rule = rr_op(left, right)
        process_new_rule(new_rule, ops)
    if return_as_muls:
        # Convert each rule as tuples into a rule as muls
        mul_rules = set()
        for rule in rules:
            left, right = rule
            mul_rules.add((Mul(*left), Mul(*right)))
        rules = mul_rules
    return rules
 
 | 
	generate_gate_rules 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	60 
 | 
	sympy/tensor/index_methods.py 
 | 
	def get_contraction_structure(expr):
    """Determine dummy indices of ``expr`` and describe its structure
    By *dummy* we mean indices that are summation indices.
    The structure of the expression is determined and described as follows:
    1) A conforming summation of Indexed objects is described with a dict where
       the keys are summation indices and the corresponding values are sets
       containing all terms for which the summation applies.  All Add objects
       in the SymPy expression tree are described like this.
    2) For all nodes in the SymPy expression tree that are *not* of type Add, the
       following applies:
       If a node discovers contractions in one of its arguments, the node
       itself will be stored as a key in the dict.  For that key, the
       corresponding value is a list of dicts, each of which is the result of a
       recursive call to get_contraction_structure().  The list contains only
       dicts for the non-trivial deeper contractions, omitting dicts with None
       as the one and only key.
    .. Note:: The presence of expressions among the dictionary keys indicates
       multiple levels of index contractions.  A nested dict displays nested
       contractions and may itself contain dicts from a deeper level.  In
       practical calculations the summation in the deepest nested level must be
       calculated first so that the outer expression can access the resulting
       indexed object.
    Examples
    ========
    >>> from sympy.tensor.index_methods import get_contraction_structure
    >>> from sympy import default_sort_key
    >>> from sympy.tensor import IndexedBase, Idx
    >>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
    >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l'])
    >>> get_contraction_structure(x[i]*y[i] + A[j, j])
    {(i,): {x[i]*y[i]}, (j,): {A[j, j]}}
    >>> get_contraction_structure(x[i]*y[j])
    {None: {x[i]*y[j]}}
    A multiplication of contracted factors results in nested dicts representing
    the internal contractions.
    >>> d = get_contraction_structure(x[i, i]*y[j, j])
    >>> sorted(d.keys(), key=default_sort_key)
    [None, x[i, i]*y[j, j]]
    In this case, the product has no contractions:
    >>> d[None]
    {x[i, i]*y[j, j]}
    Factors are contracted "first":
    >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key)
    [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}]
    A parenthesized Add object is also returned as a nested dictionary.  The
    term containing the parenthesis is a Mul with a contraction among the
    arguments, so it will be found as a key in the result.  It stores the
    dictionary resulting from a recursive call on the Add expression.
    >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j]))
    >>> sorted(d.keys(), key=default_sort_key)
    [(A[i, j]*x[j] + y[i])*x[i], (i,)]
    >>> d[(i,)]
    {(A[i, j]*x[j] + y[i])*x[i]}
    >>> d[x[i]*(A[i, j]*x[j] + y[i])]
    [{None: {y[i]}, (j,): {A[i, j]*x[j]}}]
    Powers with contractions in either base or exponent will also be found as
    keys in the dictionary, mapping to a list of results from recursive calls:
    >>> d = get_contraction_structure(A[j, j]**A[i, i])
    >>> d[None]
    {A[j, j]**A[i, i]}
    >>> nested_contractions = d[A[j, j]**A[i, i]]
    >>> nested_contractions[0]
    {(j,): {A[j, j]}}
    >>> nested_contractions[1]
    {(i,): {A[i, i]}}
    The description of the contraction structure may appear complicated when
    represented with a string in the above examples, but it is easy to iterate
    over:
    >>> from sympy import Expr
    >>> for key in d:
    ...     if isinstance(key, Expr):
    ...         continue
    ...     for term in d[key]:
    ...         if term in d:
    ...             # treat deepest contraction first
    ...             pass
    ...     # treat outermost contactions here
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_get_contraction_structure.txt 
 | 
	def get_contraction_structure(expr):
    """Determine dummy indices of ``expr`` and describe its structure
    By *dummy* we mean indices that are summation indices.
    The structure of the expression is determined and described as follows:
    1) A conforming summation of Indexed objects is described with a dict where
       the keys are summation indices and the corresponding values are sets
       containing all terms for which the summation applies.  All Add objects
       in the SymPy expression tree are described like this.
    2) For all nodes in the SymPy expression tree that are *not* of type Add, the
       following applies:
       If a node discovers contractions in one of its arguments, the node
       itself will be stored as a key in the dict.  For that key, the
       corresponding value is a list of dicts, each of which is the result of a
       recursive call to get_contraction_structure().  The list contains only
       dicts for the non-trivial deeper contractions, omitting dicts with None
       as the one and only key.
    .. Note:: The presence of expressions among the dictionary keys indicates
       multiple levels of index contractions.  A nested dict displays nested
       contractions and may itself contain dicts from a deeper level.  In
       practical calculations the summation in the deepest nested level must be
       calculated first so that the outer expression can access the resulting
       indexed object.
    Examples
    ========
    >>> from sympy.tensor.index_methods import get_contraction_structure
    >>> from sympy import default_sort_key
    >>> from sympy.tensor import IndexedBase, Idx
    >>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
    >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l'])
    >>> get_contraction_structure(x[i]*y[i] + A[j, j])
    {(i,): {x[i]*y[i]}, (j,): {A[j, j]}}
    >>> get_contraction_structure(x[i]*y[j])
    {None: {x[i]*y[j]}}
    A multiplication of contracted factors results in nested dicts representing
    the internal contractions.
    >>> d = get_contraction_structure(x[i, i]*y[j, j])
    >>> sorted(d.keys(), key=default_sort_key)
    [None, x[i, i]*y[j, j]]
    In this case, the product has no contractions:
    >>> d[None]
    {x[i, i]*y[j, j]}
    Factors are contracted "first":
    >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key)
    [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}]
    A parenthesized Add object is also returned as a nested dictionary.  The
    term containing the parenthesis is a Mul with a contraction among the
    arguments, so it will be found as a key in the result.  It stores the
    dictionary resulting from a recursive call on the Add expression.
    >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j]))
    >>> sorted(d.keys(), key=default_sort_key)
    [(A[i, j]*x[j] + y[i])*x[i], (i,)]
    >>> d[(i,)]
    {(A[i, j]*x[j] + y[i])*x[i]}
    >>> d[x[i]*(A[i, j]*x[j] + y[i])]
    [{None: {y[i]}, (j,): {A[i, j]*x[j]}}]
    Powers with contractions in either base or exponent will also be found as
    keys in the dictionary, mapping to a list of results from recursive calls:
    >>> d = get_contraction_structure(A[j, j]**A[i, i])
    >>> d[None]
    {A[j, j]**A[i, i]}
    >>> nested_contractions = d[A[j, j]**A[i, i]]
    >>> nested_contractions[0]
    {(j,): {A[j, j]}}
    >>> nested_contractions[1]
    {(i,): {A[i, i]}}
    The description of the contraction structure may appear complicated when
    represented with a string in the above examples, but it is easy to iterate
    over:
    >>> from sympy import Expr
    >>> for key in d:
    ...     if isinstance(key, Expr):
    ...         continue
    ...     for term in d[key]:
    ...         if term in d:
    ...             # treat deepest contraction first
    ...             pass
    ...     # treat outermost contactions here
    """
    # We call ourself recursively to inspect sub expressions.
    if isinstance(expr, Indexed):
        junk, key = _remove_repeated(expr.indices)
        return {key or None: {expr}}
    elif expr.is_Atom:
        return {None: {expr}}
    elif expr.is_Mul:
        junk, junk, key = _get_indices_Mul(expr, return_dummies=True)
        result = {key or None: {expr}}
        # recurse on every factor
        nested = []
        for fac in expr.args:
            facd = get_contraction_structure(fac)
            if not (None in facd and len(facd) == 1):
                nested.append(facd)
        if nested:
            result[expr] = nested
        return result
    elif expr.is_Pow or isinstance(expr, exp):
        # recurse in base and exp separately.  If either has internal
        # contractions we must include ourselves as a key in the returned dict
        b, e = expr.as_base_exp()
        dbase = get_contraction_structure(b)
        dexp = get_contraction_structure(e)
        dicts = []
        for d in dbase, dexp:
            if not (None in d and len(d) == 1):
                dicts.append(d)
        result = {None: {expr}}
        if dicts:
            result[expr] = dicts
        return result
    elif expr.is_Add:
        # Note: we just collect all terms with identical summation indices, We
        # do nothing to identify equivalent terms here, as this would require
        # substitutions or pattern matching in expressions of unknown
        # complexity.
        result = {}
        for term in expr.args:
            # recurse on every term
            d = get_contraction_structure(term)
            for key in d:
                if key in result:
                    result[key] |= d[key]
                else:
                    result[key] = d[key]
        return result
    elif isinstance(expr, Piecewise):
        # FIXME:  No support for Piecewise yet
        return {None: expr}
    elif isinstance(expr, Function):
        # Collect non-trivial contraction structures in each argument
        # We do not report repeated indices in separate arguments as a
        # contraction
        deeplist = []
        for arg in expr.args:
            deep = get_contraction_structure(arg)
            if not (None in deep and len(deep) == 1):
                deeplist.append(deep)
        d = {None: {expr}}
        if deeplist:
            d[expr] = deeplist
        return d
    # this test is expensive, so it should be at the end
    elif not expr.has(Indexed):
        return {None: {expr}}
    raise NotImplementedError(
        "FIXME: No specialized handling of type %s" % type(expr))
 
 | 
	get_contraction_structure 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	61 
 | 
	sympy/physics/vector/functions.py 
 | 
	def get_motion_params(frame, **kwargs):
    """
    Returns the three motion parameters - (acceleration, velocity, and
    position) as vectorial functions of time in the given frame.
    If a higher order differential function is provided, the lower order
    functions are used as boundary conditions. For example, given the
    acceleration, the velocity and position parameters are taken as
    boundary conditions.
    The values of time at which the boundary conditions are specified
    are taken from timevalue1(for position boundary condition) and
    timevalue2(for velocity boundary condition).
    If any of the boundary conditions are not provided, they are taken
    to be zero by default (zero vectors, in case of vectorial inputs). If
    the boundary conditions are also functions of time, they are converted
    to constants by substituting the time values in the dynamicsymbols._t
    time Symbol.
    This function can also be used for calculating rotational motion
    parameters. Have a look at the Parameters and Examples for more clarity.
    Parameters
    ==========
    frame : ReferenceFrame
        The frame to express the motion parameters in
    acceleration : Vector
        Acceleration of the object/frame as a function of time
    velocity : Vector
        Velocity as function of time or as boundary condition
        of velocity at time = timevalue1
    position : Vector
        Velocity as function of time or as boundary condition
        of velocity at time = timevalue1
    timevalue1 : sympyfiable
        Value of time for position boundary condition
    timevalue2 : sympyfiable
        Value of time for velocity boundary condition
    Examples
    ========
    >>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols
    >>> from sympy.physics.vector import init_vprinting
    >>> init_vprinting(pretty_print=False)
    >>> from sympy import symbols
    >>> R = ReferenceFrame('R')
    >>> v1, v2, v3 = dynamicsymbols('v1 v2 v3')
    >>> v = v1*R.x + v2*R.y + v3*R.z
    >>> get_motion_params(R, position = v)
    (v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z)
    >>> a, b, c = symbols('a b c')
    >>> v = a*R.x + b*R.y + c*R.z
    >>> get_motion_params(R, velocity = v)
    (0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z)
    >>> parameters = get_motion_params(R, acceleration = v)
    >>> parameters[1]
    a*t*R.x + b*t*R.y + c*t*R.z
    >>> parameters[2]
    a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_get_motion_params.txt 
 | 
	def get_motion_params(frame, **kwargs):
    """
    Returns the three motion parameters - (acceleration, velocity, and
    position) as vectorial functions of time in the given frame.
    If a higher order differential function is provided, the lower order
    functions are used as boundary conditions. For example, given the
    acceleration, the velocity and position parameters are taken as
    boundary conditions.
    The values of time at which the boundary conditions are specified
    are taken from timevalue1(for position boundary condition) and
    timevalue2(for velocity boundary condition).
    If any of the boundary conditions are not provided, they are taken
    to be zero by default (zero vectors, in case of vectorial inputs). If
    the boundary conditions are also functions of time, they are converted
    to constants by substituting the time values in the dynamicsymbols._t
    time Symbol.
    This function can also be used for calculating rotational motion
    parameters. Have a look at the Parameters and Examples for more clarity.
    Parameters
    ==========
    frame : ReferenceFrame
        The frame to express the motion parameters in
    acceleration : Vector
        Acceleration of the object/frame as a function of time
    velocity : Vector
        Velocity as function of time or as boundary condition
        of velocity at time = timevalue1
    position : Vector
        Velocity as function of time or as boundary condition
        of velocity at time = timevalue1
    timevalue1 : sympyfiable
        Value of time for position boundary condition
    timevalue2 : sympyfiable
        Value of time for velocity boundary condition
    Examples
    ========
    >>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols
    >>> from sympy.physics.vector import init_vprinting
    >>> init_vprinting(pretty_print=False)
    >>> from sympy import symbols
    >>> R = ReferenceFrame('R')
    >>> v1, v2, v3 = dynamicsymbols('v1 v2 v3')
    >>> v = v1*R.x + v2*R.y + v3*R.z
    >>> get_motion_params(R, position = v)
    (v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z)
    >>> a, b, c = symbols('a b c')
    >>> v = a*R.x + b*R.y + c*R.z
    >>> get_motion_params(R, velocity = v)
    (0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z)
    >>> parameters = get_motion_params(R, acceleration = v)
    >>> parameters[1]
    a*t*R.x + b*t*R.y + c*t*R.z
    >>> parameters[2]
    a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z
    """
    def _process_vector_differential(vectdiff, condition, variable, ordinate,
                                     frame):
        """
        Helper function for get_motion methods. Finds derivative of vectdiff
        wrt variable, and its integral using the specified boundary condition
        at value of variable = ordinate.
        Returns a tuple of - (derivative, function and integral) wrt vectdiff
        """
        # Make sure boundary condition is independent of 'variable'
        if condition != 0:
            condition = express(condition, frame, variables=True)
        # Special case of vectdiff == 0
        if vectdiff == Vector(0):
            return (0, 0, condition)
        # Express vectdiff completely in condition's frame to give vectdiff1
        vectdiff1 = express(vectdiff, frame)
        # Find derivative of vectdiff
        vectdiff2 = time_derivative(vectdiff, frame)
        # Integrate and use boundary condition
        vectdiff0 = Vector(0)
        lims = (variable, ordinate, variable)
        for dim in frame:
            function1 = vectdiff1.dot(dim)
            abscissa = dim.dot(condition).subs({variable: ordinate})
            # Indefinite integral of 'function1' wrt 'variable', using
            # the given initial condition (ordinate, abscissa).
            vectdiff0 += (integrate(function1, lims) + abscissa) * dim
        # Return tuple
        return (vectdiff2, vectdiff, vectdiff0)
    _check_frame(frame)
    # Decide mode of operation based on user's input
    if 'acceleration' in kwargs:
        mode = 2
    elif 'velocity' in kwargs:
        mode = 1
    else:
        mode = 0
    # All the possible parameters in kwargs
    # Not all are required for every case
    # If not specified, set to default values(may or may not be used in
    # calculations)
    conditions = ['acceleration', 'velocity', 'position',
                  'timevalue', 'timevalue1', 'timevalue2']
    for i, x in enumerate(conditions):
        if x not in kwargs:
            if i < 3:
                kwargs[x] = Vector(0)
            else:
                kwargs[x] = S.Zero
        elif i < 3:
            _check_vector(kwargs[x])
        else:
            kwargs[x] = sympify(kwargs[x])
    if mode == 2:
        vel = _process_vector_differential(kwargs['acceleration'],
                                           kwargs['velocity'],
                                           dynamicsymbols._t,
                                           kwargs['timevalue2'], frame)[2]
        pos = _process_vector_differential(vel, kwargs['position'],
                                           dynamicsymbols._t,
                                           kwargs['timevalue1'], frame)[2]
        return (kwargs['acceleration'], vel, pos)
    elif mode == 1:
        return _process_vector_differential(kwargs['velocity'],
                                            kwargs['position'],
                                            dynamicsymbols._t,
                                            kwargs['timevalue1'], frame)
    else:
        vel = time_derivative(kwargs['position'], frame)
        acc = time_derivative(vel, frame)
        return (acc, vel, kwargs['position'])
 
 | 
	get_motion_params 
 | 
	Repo-Level 
 | 
					
	sympy 
 | 
	63 
 | 
	sympy/integrals/heurisch.py 
 | 
	def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3,
             degree_offset=0, unnecessary_permutations=None,
             _try_heurisch=None):
    """
    Compute indefinite integral using heuristic Risch algorithm.
    Explanation
    ===========
    This is a heuristic approach to indefinite integration in finite
    terms using the extended heuristic (parallel) Risch algorithm, based
    on Manuel Bronstein's "Poor Man's Integrator".
    The algorithm supports various classes of functions including
    transcendental elementary or special functions like Airy,
    Bessel, Whittaker and Lambert.
    Note that this algorithm is not a decision procedure. If it isn't
    able to compute the antiderivative for a given function, then this is
    not a proof that such a functions does not exist.  One should use
    recursive Risch algorithm in such case.  It's an open question if
    this algorithm can be made a full decision procedure.
    This is an internal integrator procedure. You should use top level
    'integrate' function in most cases, as this procedure needs some
    preprocessing steps and otherwise may fail.
    Specification
    =============
     heurisch(f, x, rewrite=False, hints=None)
       where
         f : expression
         x : symbol
         rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
         hints   -> a list of functions that may appear in anti-derivate
          - hints = None          --> no suggestions at all
          - hints = [ ]           --> try to figure out
          - hints = [f1, ..., fn] --> we know better
    Examples
    ========
    >>> from sympy import tan
    >>> from sympy.integrals.heurisch import heurisch
    >>> from sympy.abc import x, y
    >>> heurisch(y*tan(x), x)
    y*log(tan(x)**2 + 1)/2
    See Manuel Bronstein's "Poor Man's Integrator":
    References
    ==========
    .. [1] https://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
    For more information on the implemented algorithm refer to:
    .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration
       Method and its Implementation in Maple, Proceedings of
       ISSAC'89, ACM Press, 212-217.
    .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I),
       Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
    .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III):
       Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
    .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch
       Algorithm (II), ACM Transactions on Mathematical
       Software 11 (1985), 356-362.
    See Also
    ========
    sympy.integrals.integrals.Integral.doit
    sympy.integrals.integrals.Integral
    sympy.integrals.heurisch.components
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_heurisch.txt 
 | 
	def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3,
             degree_offset=0, unnecessary_permutations=None,
             _try_heurisch=None):
    """
    Compute indefinite integral using heuristic Risch algorithm.
    Explanation
    ===========
    This is a heuristic approach to indefinite integration in finite
    terms using the extended heuristic (parallel) Risch algorithm, based
    on Manuel Bronstein's "Poor Man's Integrator".
    The algorithm supports various classes of functions including
    transcendental elementary or special functions like Airy,
    Bessel, Whittaker and Lambert.
    Note that this algorithm is not a decision procedure. If it isn't
    able to compute the antiderivative for a given function, then this is
    not a proof that such a functions does not exist.  One should use
    recursive Risch algorithm in such case.  It's an open question if
    this algorithm can be made a full decision procedure.
    This is an internal integrator procedure. You should use top level
    'integrate' function in most cases, as this procedure needs some
    preprocessing steps and otherwise may fail.
    Specification
    =============
     heurisch(f, x, rewrite=False, hints=None)
       where
         f : expression
         x : symbol
         rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
         hints   -> a list of functions that may appear in anti-derivate
          - hints = None          --> no suggestions at all
          - hints = [ ]           --> try to figure out
          - hints = [f1, ..., fn] --> we know better
    Examples
    ========
    >>> from sympy import tan
    >>> from sympy.integrals.heurisch import heurisch
    >>> from sympy.abc import x, y
    >>> heurisch(y*tan(x), x)
    y*log(tan(x)**2 + 1)/2
    See Manuel Bronstein's "Poor Man's Integrator":
    References
    ==========
    .. [1] https://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
    For more information on the implemented algorithm refer to:
    .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration
       Method and its Implementation in Maple, Proceedings of
       ISSAC'89, ACM Press, 212-217.
    .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I),
       Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
    .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III):
       Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
    .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch
       Algorithm (II), ACM Transactions on Mathematical
       Software 11 (1985), 356-362.
    See Also
    ========
    sympy.integrals.integrals.Integral.doit
    sympy.integrals.integrals.Integral
    sympy.integrals.heurisch.components
    """
    f = sympify(f)
    # There are some functions that Heurisch cannot currently handle,
    # so do not even try.
    # Set _try_heurisch=True to skip this check
    if _try_heurisch is not True:
        if f.has(Abs, re, im, sign, Heaviside, DiracDelta, floor, ceiling, arg):
            return
    if not f.has_free(x):
        return f*x
    if not f.is_Add:
        indep, f = f.as_independent(x)
    else:
        indep = S.One
    rewritables = {
        (sin, cos, cot): tan,
        (sinh, cosh, coth): tanh,
    }
    if rewrite:
        for candidates, rule in rewritables.items():
            f = f.rewrite(candidates, rule)
    else:
        for candidates in rewritables.keys():
            if f.has(*candidates):
                break
        else:
            rewrite = True
    terms = components(f, x)
    dcache = DiffCache(x)
    if hints is not None:
        if not hints:
            a = Wild('a', exclude=[x])
            b = Wild('b', exclude=[x])
            c = Wild('c', exclude=[x])
            for g in set(terms):  # using copy of terms
                if g.is_Function:
                    if isinstance(g, li):
                        M = g.args[0].match(a*x**b)
                        if M is not None:
                            terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
                            #terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
                            #terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
                            #terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) )
                    elif isinstance(g, exp):
                        M = g.args[0].match(a*x**2)
                        if M is not None:
                            if M[a].is_positive:
                                terms.add(erfi(sqrt(M[a])*x))
                            else: # M[a].is_negative or unknown
                                terms.add(erf(sqrt(-M[a])*x))
                        M = g.args[0].match(a*x**2 + b*x + c)
                        if M is not None:
                            if M[a].is_positive:
                                terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))*
                                          erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a]))))
                            elif M[a].is_negative:
                                terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))*
                                          erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a]))))
                        M = g.args[0].match(a*log(x)**2)
                        if M is not None:
                            if M[a].is_positive:
                                terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a]))))
                            if M[a].is_negative:
                                terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a]))))
                elif g.is_Pow:
                    if g.exp.is_Rational and g.exp.q == 2:
                        M = g.base.match(a*x**2 + b)
                        if M is not None and M[b].is_positive:
                            if M[a].is_positive:
                                terms.add(asinh(sqrt(M[a]/M[b])*x))
                            elif M[a].is_negative:
                                terms.add(asin(sqrt(-M[a]/M[b])*x))
                        M = g.base.match(a*x**2 - b)
                        if M is not None and M[b].is_positive:
                            if M[a].is_positive:
                                dF = 1/sqrt(M[a]*x**2 - M[b])
                                F = log(2*sqrt(M[a])*sqrt(M[a]*x**2 - M[b]) + 2*M[a]*x)/sqrt(M[a])
                                dcache.cache[F] = dF  # hack: F.diff(x) doesn't automatically simplify to f
                                terms.add(F)
                            elif M[a].is_negative:
                                terms.add(-M[b]/2*sqrt(-M[a])*
                                           atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b])))
        else:
            terms |= set(hints)
    for g in set(terms):  # using copy of terms
        terms |= components(dcache.get_diff(g), x)
    # XXX: The commented line below makes heurisch more deterministic wrt
    # PYTHONHASHSEED and the iteration order of sets. There are other places
    # where sets are iterated over but this one is possibly the most important.
    # Theoretically the order here should not matter but different orderings
    # can expose potential bugs in the different code paths so potentially it
    # is better to keep the non-determinism.
    #
    # terms = list(ordered(terms))
    # TODO: caching is significant factor for why permutations work at all. Change this.
    V = _symbols('x', len(terms))
    # sort mapping expressions from largest to smallest (last is always x).
    mapping = list(reversed(list(zip(*ordered(                          #
        [(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) #
    rev_mapping = {v: k for k, v in mapping}                            #
    if mappings is None:                                                #
        # optimizing the number of permutations of mapping              #
        assert mapping[-1][0] == x # if not, find it and correct this comment
        unnecessary_permutations = [mapping.pop(-1)]
        # permute types of objects
        types = defaultdict(list)
        for i in mapping:
            e, _ = i
            types[type(e)].append(i)
        mapping = [types[i] for i in types]
        def _iter_mappings():
            for i in permutations(mapping):
                # make the expression of a given type be ordered
                yield [j for i in i for j in ordered(i)]
        mappings = _iter_mappings()
    else:
        unnecessary_permutations = unnecessary_permutations or []
    def _substitute(expr):
        return expr.subs(mapping)
    for mapping in mappings:
        mapping = list(mapping)
        mapping = mapping + unnecessary_permutations
        diffs = [ _substitute(dcache.get_diff(g)) for g in terms ]
        denoms = [ g.as_numer_denom()[1] for g in diffs ]
        if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V):
            denom = reduce(lambda p, q: lcm(p, q, *V), denoms)
            break
    else:
        if not rewrite:
            result = heurisch(f, x, rewrite=True, hints=hints,
                unnecessary_permutations=unnecessary_permutations)
            if result is not None:
                return indep*result
        return None
    numers = [ cancel(denom*g) for g in diffs ]
    def _derivation(h):
        return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
    def _deflation(p):
        for y in V:
            if not p.has(y):
                continue
            if _derivation(p) is not S.Zero:
                c, q = p.as_poly(y).primitive()
                return _deflation(c)*gcd(q, q.diff(y)).as_expr()
        return p
    def _splitter(p):
        for y in V:
            if not p.has(y):
                continue
            if _derivation(y) is not S.Zero:
                c, q = p.as_poly(y).primitive()
                q = q.as_expr()
                h = gcd(q, _derivation(q), y)
                s = quo(h, gcd(q, q.diff(y), y), y)
                c_split = _splitter(c)
                if s.as_poly(y).degree() == 0:
                    return (c_split[0], q * c_split[1])
                q_split = _splitter(cancel(q / s))
                return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
        return (S.One, p)
    special = {}
    for term in terms:
        if term.is_Function:
            if isinstance(term, tan):
                special[1 + _substitute(term)**2] = False
            elif isinstance(term, tanh):
                special[1 + _substitute(term)] = False
                special[1 - _substitute(term)] = False
            elif isinstance(term, LambertW):
                special[_substitute(term)] = True
    F = _substitute(f)
    P, Q = F.as_numer_denom()
    u_split = _splitter(denom)
    v_split = _splitter(Q)
    polys = set(list(v_split) + [ u_split[0] ] + list(special.keys()))
    s = u_split[0] * Mul(*[ k for k, v in special.items() if v ])
    polified = [ p.as_poly(*V) for p in [s, P, Q] ]
    if None in polified:
        return None
    #--- definitions for _integrate
    a, b, c = [ p.total_degree() for p in polified ]
    poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr()
    def _exponent(g):
        if g.is_Pow:
            if g.exp.is_Rational and g.exp.q != 1:
                if g.exp.p > 0:
                    return g.exp.p + g.exp.q - 1
                else:
                    return abs(g.exp.p + g.exp.q)
            else:
                return 1
        elif not g.is_Atom and g.args:
            return max(_exponent(h) for h in g.args)
        else:
            return 1
    A, B = _exponent(f), a + max(b, c)
    if A > 1 and B > 1:
        monoms = tuple(ordered(itermonomials(V, A + B - 1 + degree_offset)))
    else:
        monoms = tuple(ordered(itermonomials(V, A + B + degree_offset)))
    poly_coeffs = _symbols('A', len(monoms))
    poly_part = Add(*[ poly_coeffs[i]*monomial
        for i, monomial in enumerate(monoms) ])
    reducibles = set()
    for poly in ordered(polys):
        coeff, factors = factor_list(poly, *V)
        reducibles.add(coeff)
        reducibles.update(fact for fact, mul in factors)
    def _integrate(field=None):
        atans = set()
        pairs = set()
        if field == 'Q':
            irreducibles = set(reducibles)
        else:
            setV = set(V)
            irreducibles = set()
            for poly in ordered(reducibles):
                zV = setV & set(iterfreeargs(poly))
                for z in ordered(zV):
                    s = set(root_factors(poly, z, filter=field))
                    irreducibles |= s
                    break
        log_part, atan_part = [], []
        for poly in ordered(irreducibles):
            m = collect(poly, I, evaluate=False)
            y = m.get(I, S.Zero)
            if y:
                x = m.get(S.One, S.Zero)
                if x.has(I) or y.has(I):
                    continue  # nontrivial x + I*y
                pairs.add((x, y))
                irreducibles.remove(poly)
        while pairs:
            x, y = pairs.pop()
            if (x, -y) in pairs:
                pairs.remove((x, -y))
                # Choosing b with no minus sign
                if y.could_extract_minus_sign():
                    y = -y
                irreducibles.add(x*x + y*y)
                atans.add(atan(x/y))
            else:
                irreducibles.add(x + I*y)
        B = _symbols('B', len(irreducibles))
        C = _symbols('C', len(atans))
        # Note: the ordering matters here
        for poly, b in reversed(list(zip(ordered(irreducibles), B))):
            if poly.has(*V):
                poly_coeffs.append(b)
                log_part.append(b * log(poly))
        for poly, c in reversed(list(zip(ordered(atans), C))):
            if poly.has(*V):
                poly_coeffs.append(c)
                atan_part.append(c * poly)
        # TODO: Currently it's better to use symbolic expressions here instead
        # of rational functions, because it's simpler and FracElement doesn't
        # give big speed improvement yet. This is because cancellation is slow
        # due to slow polynomial GCD algorithms. If this gets improved then
        # revise this code.
        candidate = poly_part/poly_denom + Add(*log_part) + Add(*atan_part)
        h = F - _derivation(candidate) / denom
        raw_numer = h.as_numer_denom()[0]
        # Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field
        # that we have to determine. We can't use simply atoms() because log(3),
        # sqrt(y) and similar expressions can appear, leading to non-trivial
        # domains.
        syms = set(poly_coeffs) | set(V)
        non_syms = set()
        def find_non_syms(expr):
            if expr.is_Integer or expr.is_Rational:
                pass # ignore trivial numbers
            elif expr in syms:
                pass # ignore variables
            elif not expr.has_free(*syms):
                non_syms.add(expr)
            elif expr.is_Add or expr.is_Mul or expr.is_Pow:
                list(map(find_non_syms, expr.args))
            else:
                # TODO: Non-polynomial expression. This should have been
                # filtered out at an earlier stage.
                raise PolynomialError
        try:
            find_non_syms(raw_numer)
        except PolynomialError:
            return None
        else:
            ground, _ = construct_domain(non_syms, field=True)
        coeff_ring = PolyRing(poly_coeffs, ground)
        ring = PolyRing(V, coeff_ring)
        try:
            numer = ring.from_expr(raw_numer)
        except ValueError:
            raise PolynomialError
        solution = solve_lin_sys(numer.coeffs(), coeff_ring, _raw=False)
        if solution is None:
            return None
        else:
            return candidate.xreplace(solution).xreplace(
                dict(zip(poly_coeffs, [S.Zero]*len(poly_coeffs))))
    if all(isinstance(_, Symbol) for _ in V):
        more_free = F.free_symbols - set(V)
    else:
        Fd = F.as_dummy()
        more_free = Fd.xreplace(dict(zip(V, (Dummy() for _ in V)))
            ).free_symbols & Fd.free_symbols
    if not more_free:
        # all free generators are identified in V
        solution = _integrate('Q')
        if solution is None:
            solution = _integrate()
    else:
        solution = _integrate()
    if solution is not None:
        antideriv = solution.subs(rev_mapping)
        antideriv = cancel(antideriv).expand()
        if antideriv.is_Add:
            antideriv = antideriv.as_independent(x)[1]
        return indep*antideriv
    else:
        if retries >= 0:
            result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations)
            if result is not None:
                return indep*result
        return None
 
 | 
	heurisch 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	64 
 | 
	sympy/integrals/integrals.py 
 | 
	def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):
    """integrate(f, var, ...)
    .. deprecated:: 1.6
       Using ``integrate()`` with :class:`~.Poly` is deprecated. Use
       :meth:`.Poly.integrate` instead. See :ref:`deprecated-integrate-poly`.
    Explanation
    ===========
    Compute definite or indefinite integral of one or more variables
    using Risch-Norman algorithm and table lookup. This procedure is
    able to handle elementary algebraic and transcendental functions
    and also a huge class of special functions, including Airy,
    Bessel, Whittaker and Lambert.
    var can be:
    - a symbol                   -- indefinite integration
    - a tuple (symbol, a)        -- indefinite integration with result
                                    given with ``a`` replacing ``symbol``
    - a tuple (symbol, a, b)     -- definite integration
    Several variables can be specified, in which case the result is
    multiple integration. (If var is omitted and the integrand is
    univariate, the indefinite integral in that variable will be performed.)
    Indefinite integrals are returned without terms that are independent
    of the integration variables. (see examples)
    Definite improper integrals often entail delicate convergence
    conditions. Pass conds='piecewise', 'separate' or 'none' to have
    these returned, respectively, as a Piecewise function, as a separate
    result (i.e. result will be a tuple), or not at all (default is
    'piecewise').
    **Strategy**
    SymPy uses various approaches to definite integration. One method is to
    find an antiderivative for the integrand, and then use the fundamental
    theorem of calculus. Various functions are implemented to integrate
    polynomial, rational and trigonometric functions, and integrands
    containing DiracDelta terms.
    SymPy also implements the part of the Risch algorithm, which is a decision
    procedure for integrating elementary functions, i.e., the algorithm can
    either find an elementary antiderivative, or prove that one does not
    exist.  There is also a (very successful, albeit somewhat slow) general
    implementation of the heuristic Risch algorithm.  This algorithm will
    eventually be phased out as more of the full Risch algorithm is
    implemented. See the docstring of Integral._eval_integral() for more
    details on computing the antiderivative using algebraic methods.
    The option risch=True can be used to use only the (full) Risch algorithm.
    This is useful if you want to know if an elementary function has an
    elementary antiderivative.  If the indefinite Integral returned by this
    function is an instance of NonElementaryIntegral, that means that the
    Risch algorithm has proven that integral to be non-elementary.  Note that
    by default, additional methods (such as the Meijer G method outlined
    below) are tried on these integrals, as they may be expressible in terms
    of special functions, so if you only care about elementary answers, use
    risch=True.  Also note that an unevaluated Integral returned by this
    function is not necessarily a NonElementaryIntegral, even with risch=True,
    as it may just be an indication that the particular part of the Risch
    algorithm needed to integrate that function is not yet implemented.
    Another family of strategies comes from re-writing the integrand in
    terms of so-called Meijer G-functions. Indefinite integrals of a
    single G-function can always be computed, and the definite integral
    of a product of two G-functions can be computed from zero to
    infinity. Various strategies are implemented to rewrite integrands
    as G-functions, and use this information to compute integrals (see
    the ``meijerint`` module).
    The option manual=True can be used to use only an algorithm that tries
    to mimic integration by hand. This algorithm does not handle as many
    integrands as the other algorithms implemented but may return results in
    a more familiar form. The ``manualintegrate`` module has functions that
    return the steps used (see the module docstring for more information).
    In general, the algebraic methods work best for computing
    antiderivatives of (possibly complicated) combinations of elementary
    functions. The G-function methods work best for computing definite
    integrals from zero to infinity of moderately complicated
    combinations of special functions, or indefinite integrals of very
    simple combinations of special functions.
    The strategy employed by the integration code is as follows:
    - If computing a definite integral, and both limits are real,
      and at least one limit is +- oo, try the G-function method of
      definite integration first.
    - Try to find an antiderivative, using all available methods, ordered
      by performance (that is try fastest method first, slowest last; in
      particular polynomial integration is tried first, Meijer
      G-functions second to last, and heuristic Risch last).
    - If still not successful, try G-functions irrespective of the
      limits.
    The option meijerg=True, False, None can be used to, respectively:
    always use G-function methods and no others, never use G-function
    methods, or use all available methods (in order as described above).
    It defaults to None.
    Examples
    ========
    >>> from sympy import integrate, log, exp, oo
    >>> from sympy.abc import a, x, y
    >>> integrate(x*y, x)
    x**2*y/2
    >>> integrate(log(x), x)
    x*log(x) - x
    >>> integrate(log(x), (x, 1, a))
    a*log(a) - a + 1
    >>> integrate(x)
    x**2/2
    Terms that are independent of x are dropped by indefinite integration:
    >>> from sympy import sqrt
    >>> integrate(sqrt(1 + x), (x, 0, x))
    2*(x + 1)**(3/2)/3 - 2/3
    >>> integrate(sqrt(1 + x), x)
    2*(x + 1)**(3/2)/3
    >>> integrate(x*y)
    Traceback (most recent call last):
    ...
    ValueError: specify integration variables to integrate x*y
    Note that ``integrate(x)`` syntax is meant only for convenience
    in interactive sessions and should be avoided in library code.
    >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
    Piecewise((gamma(a + 1), re(a) > -1),
        (Integral(x**a*exp(-x), (x, 0, oo)), True))
    >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
    gamma(a + 1)
    >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
    (gamma(a + 1), re(a) > -1)
    See Also
    ========
    Integral, Integral.doit
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_integrate.txt 
 | 
	def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):
    """integrate(f, var, ...)
    .. deprecated:: 1.6
       Using ``integrate()`` with :class:`~.Poly` is deprecated. Use
       :meth:`.Poly.integrate` instead. See :ref:`deprecated-integrate-poly`.
    Explanation
    ===========
    Compute definite or indefinite integral of one or more variables
    using Risch-Norman algorithm and table lookup. This procedure is
    able to handle elementary algebraic and transcendental functions
    and also a huge class of special functions, including Airy,
    Bessel, Whittaker and Lambert.
    var can be:
    - a symbol                   -- indefinite integration
    - a tuple (symbol, a)        -- indefinite integration with result
                                    given with ``a`` replacing ``symbol``
    - a tuple (symbol, a, b)     -- definite integration
    Several variables can be specified, in which case the result is
    multiple integration. (If var is omitted and the integrand is
    univariate, the indefinite integral in that variable will be performed.)
    Indefinite integrals are returned without terms that are independent
    of the integration variables. (see examples)
    Definite improper integrals often entail delicate convergence
    conditions. Pass conds='piecewise', 'separate' or 'none' to have
    these returned, respectively, as a Piecewise function, as a separate
    result (i.e. result will be a tuple), or not at all (default is
    'piecewise').
    **Strategy**
    SymPy uses various approaches to definite integration. One method is to
    find an antiderivative for the integrand, and then use the fundamental
    theorem of calculus. Various functions are implemented to integrate
    polynomial, rational and trigonometric functions, and integrands
    containing DiracDelta terms.
    SymPy also implements the part of the Risch algorithm, which is a decision
    procedure for integrating elementary functions, i.e., the algorithm can
    either find an elementary antiderivative, or prove that one does not
    exist.  There is also a (very successful, albeit somewhat slow) general
    implementation of the heuristic Risch algorithm.  This algorithm will
    eventually be phased out as more of the full Risch algorithm is
    implemented. See the docstring of Integral._eval_integral() for more
    details on computing the antiderivative using algebraic methods.
    The option risch=True can be used to use only the (full) Risch algorithm.
    This is useful if you want to know if an elementary function has an
    elementary antiderivative.  If the indefinite Integral returned by this
    function is an instance of NonElementaryIntegral, that means that the
    Risch algorithm has proven that integral to be non-elementary.  Note that
    by default, additional methods (such as the Meijer G method outlined
    below) are tried on these integrals, as they may be expressible in terms
    of special functions, so if you only care about elementary answers, use
    risch=True.  Also note that an unevaluated Integral returned by this
    function is not necessarily a NonElementaryIntegral, even with risch=True,
    as it may just be an indication that the particular part of the Risch
    algorithm needed to integrate that function is not yet implemented.
    Another family of strategies comes from re-writing the integrand in
    terms of so-called Meijer G-functions. Indefinite integrals of a
    single G-function can always be computed, and the definite integral
    of a product of two G-functions can be computed from zero to
    infinity. Various strategies are implemented to rewrite integrands
    as G-functions, and use this information to compute integrals (see
    the ``meijerint`` module).
    The option manual=True can be used to use only an algorithm that tries
    to mimic integration by hand. This algorithm does not handle as many
    integrands as the other algorithms implemented but may return results in
    a more familiar form. The ``manualintegrate`` module has functions that
    return the steps used (see the module docstring for more information).
    In general, the algebraic methods work best for computing
    antiderivatives of (possibly complicated) combinations of elementary
    functions. The G-function methods work best for computing definite
    integrals from zero to infinity of moderately complicated
    combinations of special functions, or indefinite integrals of very
    simple combinations of special functions.
    The strategy employed by the integration code is as follows:
    - If computing a definite integral, and both limits are real,
      and at least one limit is +- oo, try the G-function method of
      definite integration first.
    - Try to find an antiderivative, using all available methods, ordered
      by performance (that is try fastest method first, slowest last; in
      particular polynomial integration is tried first, Meijer
      G-functions second to last, and heuristic Risch last).
    - If still not successful, try G-functions irrespective of the
      limits.
    The option meijerg=True, False, None can be used to, respectively:
    always use G-function methods and no others, never use G-function
    methods, or use all available methods (in order as described above).
    It defaults to None.
    Examples
    ========
    >>> from sympy import integrate, log, exp, oo
    >>> from sympy.abc import a, x, y
    >>> integrate(x*y, x)
    x**2*y/2
    >>> integrate(log(x), x)
    x*log(x) - x
    >>> integrate(log(x), (x, 1, a))
    a*log(a) - a + 1
    >>> integrate(x)
    x**2/2
    Terms that are independent of x are dropped by indefinite integration:
    >>> from sympy import sqrt
    >>> integrate(sqrt(1 + x), (x, 0, x))
    2*(x + 1)**(3/2)/3 - 2/3
    >>> integrate(sqrt(1 + x), x)
    2*(x + 1)**(3/2)/3
    >>> integrate(x*y)
    Traceback (most recent call last):
    ...
    ValueError: specify integration variables to integrate x*y
    Note that ``integrate(x)`` syntax is meant only for convenience
    in interactive sessions and should be avoided in library code.
    >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
    Piecewise((gamma(a + 1), re(a) > -1),
        (Integral(x**a*exp(-x), (x, 0, oo)), True))
    >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
    gamma(a + 1)
    >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
    (gamma(a + 1), re(a) > -1)
    See Also
    ========
    Integral, Integral.doit
    """
    doit_flags = {
        'deep': False,
        'meijerg': meijerg,
        'conds': conds,
        'risch': risch,
        'heurisch': heurisch,
        'manual': manual
        }
    integral = Integral(*args, **kwargs)
    if isinstance(integral, Integral):
        return integral.doit(**doit_flags)
    else:
        new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a
            for a in integral.args]
        return integral.func(*new_args)
 
 | 
	integrate 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	66 
 | 
	sympy/geometry/util.py 
 | 
	def intersection(*entities, pairwise=False, **kwargs):
    """The intersection of a collection of GeometryEntity instances.
    Parameters
    ==========
    entities : sequence of GeometryEntity
    pairwise (keyword argument) : Can be either True or False
    Returns
    =======
    intersection : list of GeometryEntity
    Raises
    ======
    NotImplementedError
        When unable to calculate intersection.
    Notes
    =====
    The intersection of any geometrical entity with itself should return
    a list with one item: the entity in question.
    An intersection requires two or more entities. If only a single
    entity is given then the function will return an empty list.
    It is possible for `intersection` to miss intersections that one
    knows exists because the required quantities were not fully
    simplified internally.
    Reals should be converted to Rationals, e.g. Rational(str(real_num))
    or else failures due to floating point issues may result.
    Case 1: When the keyword argument 'pairwise' is False (default value):
    In this case, the function returns a list of intersections common to
    all entities.
    Case 2: When the keyword argument 'pairwise' is True:
    In this case, the functions returns a list intersections that occur
    between any pair of entities.
    See Also
    ========
    sympy.geometry.entity.GeometryEntity.intersection
    Examples
    ========
    >>> from sympy import Ray, Circle, intersection
    >>> c = Circle((0, 1), 1)
    >>> intersection(c, c.center)
    []
    >>> right = Ray((0, 0), (1, 0))
    >>> up = Ray((0, 0), (0, 1))
    >>> intersection(c, right, up)
    [Point2D(0, 0)]
    >>> intersection(c, right, up, pairwise=True)
    [Point2D(0, 0), Point2D(0, 2)]
    >>> left = Ray((1, 0), (0, 0))
    >>> intersection(right, left)
    [Segment2D(Point2D(0, 0), Point2D(1, 0))]
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_intersection.txt 
 | 
	def intersection(*entities, pairwise=False, **kwargs):
    """The intersection of a collection of GeometryEntity instances.
    Parameters
    ==========
    entities : sequence of GeometryEntity
    pairwise (keyword argument) : Can be either True or False
    Returns
    =======
    intersection : list of GeometryEntity
    Raises
    ======
    NotImplementedError
        When unable to calculate intersection.
    Notes
    =====
    The intersection of any geometrical entity with itself should return
    a list with one item: the entity in question.
    An intersection requires two or more entities. If only a single
    entity is given then the function will return an empty list.
    It is possible for `intersection` to miss intersections that one
    knows exists because the required quantities were not fully
    simplified internally.
    Reals should be converted to Rationals, e.g. Rational(str(real_num))
    or else failures due to floating point issues may result.
    Case 1: When the keyword argument 'pairwise' is False (default value):
    In this case, the function returns a list of intersections common to
    all entities.
    Case 2: When the keyword argument 'pairwise' is True:
    In this case, the functions returns a list intersections that occur
    between any pair of entities.
    See Also
    ========
    sympy.geometry.entity.GeometryEntity.intersection
    Examples
    ========
    >>> from sympy import Ray, Circle, intersection
    >>> c = Circle((0, 1), 1)
    >>> intersection(c, c.center)
    []
    >>> right = Ray((0, 0), (1, 0))
    >>> up = Ray((0, 0), (0, 1))
    >>> intersection(c, right, up)
    [Point2D(0, 0)]
    >>> intersection(c, right, up, pairwise=True)
    [Point2D(0, 0), Point2D(0, 2)]
    >>> left = Ray((1, 0), (0, 0))
    >>> intersection(right, left)
    [Segment2D(Point2D(0, 0), Point2D(1, 0))]
    """
    if len(entities) <= 1:
        return []
    entities = list(entities)
    prec = None
    for i, e in enumerate(entities):
        if not isinstance(e, GeometryEntity):
            # entities may be an immutable tuple
            e = Point(e)
        # convert to exact Rationals
        d = {}
        for f in e.atoms(Float):
            prec = f._prec if prec is None else min(f._prec, prec)
            d.setdefault(f, nsimplify(f, rational=True))
        entities[i] = e.xreplace(d)
    if not pairwise:
        # find the intersection common to all objects
        res = entities[0].intersection(entities[1])
        for entity in entities[2:]:
            newres = []
            for x in res:
                newres.extend(x.intersection(entity))
            res = newres
    else:
        # find all pairwise intersections
        ans = []
        for j in range(len(entities)):
            for k in range(j + 1, len(entities)):
                ans.extend(intersection(entities[j], entities[k]))
        res = list(ordered(set(ans)))
    # convert back to Floats
    if prec is not None:
        p = prec_to_dps(prec)
        res = [i.n(p) for i in res]
    return res
 
 | 
	intersection 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	70 
 | 
	sympy/utilities/lambdify.py 
 | 
	def lambdify(args, expr, modules=None, printer=None, use_imps=True,
             dummify=False, cse=False, docstring_limit=1000):
    """Convert a SymPy expression into a function that allows for fast
    numeric evaluation.
    .. warning::
       This function uses ``exec``, and thus should not be used on
       unsanitized input.
    .. deprecated:: 1.7
       Passing a set for the *args* parameter is deprecated as sets are
       unordered. Use an ordered iterable such as a list or tuple.
    Explanation
    ===========
    For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an
    equivalent NumPy function that numerically evaluates it:
    >>> from sympy import sin, cos, symbols, lambdify
    >>> import numpy as np
    >>> x = symbols('x')
    >>> expr = sin(x) + cos(x)
    >>> expr
    sin(x) + cos(x)
    >>> f = lambdify(x, expr, 'numpy')
    >>> a = np.array([1, 2])
    >>> f(a)
    [1.38177329 0.49315059]
    The primary purpose of this function is to provide a bridge from SymPy
    expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath,
    and tensorflow. In general, SymPy functions do not work with objects from
    other libraries, such as NumPy arrays, and functions from numeric
    libraries like NumPy or mpmath do not work on SymPy expressions.
    ``lambdify`` bridges the two by converting a SymPy expression to an
    equivalent numeric function.
    The basic workflow with ``lambdify`` is to first create a SymPy expression
    representing whatever mathematical function you wish to evaluate. This
    should be done using only SymPy functions and expressions. Then, use
    ``lambdify`` to convert this to an equivalent function for numerical
    evaluation. For instance, above we created ``expr`` using the SymPy symbol
    ``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an
    equivalent NumPy function ``f``, and called it on a NumPy array ``a``.
    Parameters
    ==========
    args : List[Symbol]
        A variable or a list of variables whose nesting represents the
        nesting of the arguments that will be passed to the function.
        Variables can be symbols, undefined functions, or matrix symbols.
        >>> from sympy import Eq
        >>> from sympy.abc import x, y, z
        The list of variables should match the structure of how the
        arguments will be passed to the function. Simply enclose the
        parameters as they will be passed in a list.
        To call a function like ``f(x)`` then ``[x]``
        should be the first argument to ``lambdify``; for this
        case a single ``x`` can also be used:
        >>> f = lambdify(x, x + 1)
        >>> f(1)
        2
        >>> f = lambdify([x], x + 1)
        >>> f(1)
        2
        To call a function like ``f(x, y)`` then ``[x, y]`` will
        be the first argument of the ``lambdify``:
        >>> f = lambdify([x, y], x + y)
        >>> f(1, 1)
        2
        To call a function with a single 3-element tuple like
        ``f((x, y, z))`` then ``[(x, y, z)]`` will be the first
        argument of the ``lambdify``:
        >>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2))
        >>> f((3, 4, 5))
        True
        If two args will be passed and the first is a scalar but
        the second is a tuple with two arguments then the items
        in the list should match that structure:
        >>> f = lambdify([x, (y, z)], x + y + z)
        >>> f(1, (2, 3))
        6
    expr : Expr
        An expression, list of expressions, or matrix to be evaluated.
        Lists may be nested.
        If the expression is a list, the output will also be a list.
        >>> f = lambdify(x, [x, [x + 1, x + 2]])
        >>> f(1)
        [1, [2, 3]]
        If it is a matrix, an array will be returned (for the NumPy module).
        >>> from sympy import Matrix
        >>> f = lambdify(x, Matrix([x, x + 1]))
        >>> f(1)
        [[1]
        [2]]
        Note that the argument order here (variables then expression) is used
        to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works
        (roughly) like ``lambda x: expr``
        (see :ref:`lambdify-how-it-works` below).
    modules : str, optional
        Specifies the numeric library to use.
        If not specified, *modules* defaults to:
        - ``["scipy", "numpy"]`` if SciPy is installed
        - ``["numpy"]`` if only NumPy is installed
        - ``["math", "mpmath", "sympy"]`` if neither is installed.
        That is, SymPy functions are replaced as far as possible by
        either ``scipy`` or ``numpy`` functions if available, and Python's
        standard library ``math``, or ``mpmath`` functions otherwise.
        *modules* can be one of the following types:
        - The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``,
          ``"scipy"``, ``"sympy"``, or ``"tensorflow"`` or ``"jax"``. This uses the
          corresponding printer and namespace mapping for that module.
        - A module (e.g., ``math``). This uses the global namespace of the
          module. If the module is one of the above known modules, it will
          also use the corresponding printer and namespace mapping
          (i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``).
        - A dictionary that maps names of SymPy functions to arbitrary
          functions
          (e.g., ``{'sin': custom_sin}``).
        - A list that contains a mix of the arguments above, with higher
          priority given to entries appearing first
          (e.g., to use the NumPy module but override the ``sin`` function
          with a custom version, you can use
          ``[{'sin': custom_sin}, 'numpy']``).
    dummify : bool, optional
        Whether or not the variables in the provided expression that are not
        valid Python identifiers are substituted with dummy symbols.
        This allows for undefined functions like ``Function('f')(t)`` to be
        supplied as arguments. By default, the variables are only dummified
        if they are not valid Python identifiers.
        Set ``dummify=True`` to replace all arguments with dummy symbols
        (if ``args`` is not a string) - for example, to ensure that the
        arguments do not redefine any built-in names.
    cse : bool, or callable, optional
        Large expressions can be computed more efficiently when
        common subexpressions are identified and precomputed before
        being used multiple time. Finding the subexpressions will make
        creation of the 'lambdify' function slower, however.
        When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default)
        the user may pass a function matching the ``cse`` signature.
    docstring_limit : int or None
        When lambdifying large expressions, a significant proportion of the time
        spent inside ``lambdify`` is spent producing a string representation of
        the expression for use in the automatically generated docstring of the
        returned function. For expressions containing hundreds or more nodes the
        resulting docstring often becomes so long and dense that it is difficult
        to read. To reduce the runtime of lambdify, the rendering of the full
        expression inside the docstring can be disabled.
        When ``None``, the full expression is rendered in the docstring. When
        ``0`` or a negative ``int``, an ellipsis is rendering in the docstring
        instead of the expression. When a strictly positive ``int``, if the
        number of nodes in the expression exceeds ``docstring_limit`` an
        ellipsis is rendered in the docstring, otherwise a string representation
        of the expression is rendered as normal. The default is ``1000``.
    Examples
    ========
    >>> from sympy.utilities.lambdify import implemented_function
    >>> from sympy import sqrt, sin, Matrix
    >>> from sympy import Function
    >>> from sympy.abc import w, x, y, z
    >>> f = lambdify(x, x**2)
    >>> f(2)
    4
    >>> f = lambdify((x, y, z), [z, y, x])
    >>> f(1,2,3)
    [3, 2, 1]
    >>> f = lambdify(x, sqrt(x))
    >>> f(4)
    2.0
    >>> f = lambdify((x, y), sin(x*y)**2)
    >>> f(0, 5)
    0.0
    >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
    >>> row(1, 2)
    Matrix([[1, 3]])
    ``lambdify`` can be used to translate SymPy expressions into mpmath
    functions. This may be preferable to using ``evalf`` (which uses mpmath on
    the backend) in some cases.
    >>> f = lambdify(x, sin(x), 'mpmath')
    >>> f(1)
    0.8414709848078965
    Tuple arguments are handled and the lambdified function should
    be called with the same type of arguments as were used to create
    the function:
    >>> f = lambdify((x, (y, z)), x + y)
    >>> f(1, (2, 4))
    3
    The ``flatten`` function can be used to always work with flattened
    arguments:
    >>> from sympy.utilities.iterables import flatten
    >>> args = w, (x, (y, z))
    >>> vals = 1, (2, (3, 4))
    >>> f = lambdify(flatten(args), w + x + y + z)
    >>> f(*flatten(vals))
    10
    Functions present in ``expr`` can also carry their own numerical
    implementations, in a callable attached to the ``_imp_`` attribute. This
    can be used with undefined functions using the ``implemented_function``
    factory:
    >>> f = implemented_function(Function('f'), lambda x: x+1)
    >>> func = lambdify(x, f(x))
    >>> func(4)
    5
    ``lambdify`` always prefers ``_imp_`` implementations to implementations
    in other namespaces, unless the ``use_imps`` input parameter is False.
    Usage with Tensorflow:
    >>> import tensorflow as tf
    >>> from sympy import Max, sin, lambdify
    >>> from sympy.abc import x
    >>> f = Max(x, sin(x))
    >>> func = lambdify(x, f, 'tensorflow')
    After tensorflow v2, eager execution is enabled by default.
    If you want to get the compatible result across tensorflow v1 and v2
    as same as this tutorial, run this line.
    >>> tf.compat.v1.enable_eager_execution()
    If you have eager execution enabled, you can get the result out
    immediately as you can use numpy.
    If you pass tensorflow objects, you may get an ``EagerTensor``
    object instead of value.
    >>> result = func(tf.constant(1.0))
    >>> print(result)
    tf.Tensor(1.0, shape=(), dtype=float32)
    >>> print(result.__class__)
    <class 'tensorflow.python.framework.ops.EagerTensor'>
    You can use ``.numpy()`` to get the numpy value of the tensor.
    >>> result.numpy()
    1.0
    >>> var = tf.Variable(2.0)
    >>> result = func(var) # also works for tf.Variable and tf.Placeholder
    >>> result.numpy()
    2.0
    And it works with any shape array.
    >>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
    >>> result = func(tensor)
    >>> result.numpy()
    [[1. 2.]
     [3. 4.]]
    Notes
    =====
    - For functions involving large array calculations, numexpr can provide a
      significant speedup over numpy. Please note that the available functions
      for numexpr are more limited than numpy but can be expanded with
      ``implemented_function`` and user defined subclasses of Function. If
      specified, numexpr may be the only option in modules. The official list
      of numexpr functions can be found at:
      https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions
    - In the above examples, the generated functions can accept scalar
      values or numpy arrays as arguments.  However, in some cases
      the generated function relies on the input being a numpy array:
      >>> import numpy
      >>> from sympy import Piecewise
      >>> from sympy.testing.pytest import ignore_warnings
      >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
      >>> with ignore_warnings(RuntimeWarning):
      ...     f(numpy.array([-1, 0, 1, 2]))
      [-1.   0.   1.   0.5]
      >>> f(0)
      Traceback (most recent call last):
          ...
      ZeroDivisionError: division by zero
      In such cases, the input should be wrapped in a numpy array:
      >>> with ignore_warnings(RuntimeWarning):
      ...     float(f(numpy.array([0])))
      0.0
      Or if numpy functionality is not required another module can be used:
      >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
      >>> f(0)
      0
    .. _lambdify-how-it-works:
    How it works
    ============
    When using this function, it helps a great deal to have an idea of what it
    is doing. At its core, lambdify is nothing more than a namespace
    translation, on top of a special printer that makes some corner cases work
    properly.
    To understand lambdify, first we must properly understand how Python
    namespaces work. Say we had two files. One called ``sin_cos_sympy.py``,
    with
    .. code:: python
        # sin_cos_sympy.py
        from sympy.functions.elementary.trigonometric import (cos, sin)
        def sin_cos(x):
            return sin(x) + cos(x)
    and one called ``sin_cos_numpy.py`` with
    .. code:: python
        # sin_cos_numpy.py
        from numpy import sin, cos
        def sin_cos(x):
            return sin(x) + cos(x)
    The two files define an identical function ``sin_cos``. However, in the
    first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and
    ``cos``. In the second, they are defined as the NumPy versions.
    If we were to import the first file and use the ``sin_cos`` function, we
    would get something like
    >>> from sin_cos_sympy import sin_cos # doctest: +SKIP
    >>> sin_cos(1) # doctest: +SKIP
    cos(1) + sin(1)
    On the other hand, if we imported ``sin_cos`` from the second file, we
    would get
    >>> from sin_cos_numpy import sin_cos # doctest: +SKIP
    >>> sin_cos(1) # doctest: +SKIP
    1.38177329068
    In the first case we got a symbolic output, because it used the symbolic
    ``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric
    result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions
    from NumPy. But notice that the versions of ``sin`` and ``cos`` that were
    used was not inherent to the ``sin_cos`` function definition. Both
    ``sin_cos`` definitions are exactly the same. Rather, it was based on the
    names defined at the module where the ``sin_cos`` function was defined.
    The key point here is that when function in Python references a name that
    is not defined in the function, that name is looked up in the "global"
    namespace of the module where that function is defined.
    Now, in Python, we can emulate this behavior without actually writing a
    file to disk using the ``exec`` function. ``exec`` takes a string
    containing a block of Python code, and a dictionary that should contain
    the global variables of the module. It then executes the code "in" that
    dictionary, as if it were the module globals. The following is equivalent
    to the ``sin_cos`` defined in ``sin_cos_sympy.py``:
    >>> import sympy
    >>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos}
    >>> exec('''
    ... def sin_cos(x):
    ...     return sin(x) + cos(x)
    ... ''', module_dictionary)
    >>> sin_cos = module_dictionary['sin_cos']
    >>> sin_cos(1)
    cos(1) + sin(1)
    and similarly with ``sin_cos_numpy``:
    >>> import numpy
    >>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos}
    >>> exec('''
    ... def sin_cos(x):
    ...     return sin(x) + cos(x)
    ... ''', module_dictionary)
    >>> sin_cos = module_dictionary['sin_cos']
    >>> sin_cos(1)
    1.38177329068
    So now we can get an idea of how ``lambdify`` works. The name "lambdify"
    comes from the fact that we can think of something like ``lambdify(x,
    sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where
    ``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why
    the symbols argument is first in ``lambdify``, as opposed to most SymPy
    functions where it comes after the expression: to better mimic the
    ``lambda`` keyword.
    ``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and
    1. Converts it to a string
    2. Creates a module globals dictionary based on the modules that are
       passed in (by default, it uses the NumPy module)
    3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the
       list of variables separated by commas, and ``{expr}`` is the string
       created in step 1., then ``exec``s that string with the module globals
       namespace and returns ``func``.
    In fact, functions returned by ``lambdify`` support inspection. So you can
    see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you
    are using IPython or the Jupyter notebook.
    >>> f = lambdify(x, sin(x) + cos(x))
    >>> import inspect
    >>> print(inspect.getsource(f))
    def _lambdifygenerated(x):
        return sin(x) + cos(x)
    This shows us the source code of the function, but not the namespace it
    was defined in. We can inspect that by looking at the ``__globals__``
    attribute of ``f``:
    >>> f.__globals__['sin']
    <ufunc 'sin'>
    >>> f.__globals__['cos']
    <ufunc 'cos'>
    >>> f.__globals__['sin'] is numpy.sin
    True
    This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be
    ``numpy.sin`` and ``numpy.cos``.
    Note that there are some convenience layers in each of these steps, but at
    the core, this is how ``lambdify`` works. Step 1 is done using the
    ``LambdaPrinter`` printers defined in the printing module (see
    :mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions
    to define how they should be converted to a string for different modules.
    You can change which printer ``lambdify`` uses by passing a custom printer
    in to the ``printer`` argument.
    Step 2 is augmented by certain translations. There are default
    translations for each module, but you can provide your own by passing a
    list to the ``modules`` argument. For instance,
    >>> def mysin(x):
    ...     print('taking the sin of', x)
    ...     return numpy.sin(x)
    ...
    >>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy'])
    >>> f(1)
    taking the sin of 1
    0.8414709848078965
    The globals dictionary is generated from the list by merging the
    dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The
    merging is done so that earlier items take precedence, which is why
    ``mysin`` is used above instead of ``numpy.sin``.
    If you want to modify the way ``lambdify`` works for a given function, it
    is usually easiest to do so by modifying the globals dictionary as such.
    In more complicated cases, it may be necessary to create and pass in a
    custom printer.
    Finally, step 3 is augmented with certain convenience operations, such as
    the addition of a docstring.
    Understanding how ``lambdify`` works can make it easier to avoid certain
    gotchas when using it. For instance, a common mistake is to create a
    lambdified function for one module (say, NumPy), and pass it objects from
    another (say, a SymPy expression).
    For instance, say we create
    >>> from sympy.abc import x
    >>> f = lambdify(x, x + 1, 'numpy')
    Now if we pass in a NumPy array, we get that array plus 1
    >>> import numpy
    >>> a = numpy.array([1, 2])
    >>> f(a)
    [2 3]
    But what happens if you make the mistake of passing in a SymPy expression
    instead of a NumPy array:
    >>> f(x + 1)
    x + 2
    This worked, but it was only by accident. Now take a different lambdified
    function:
    >>> from sympy import sin
    >>> g = lambdify(x, x + sin(x), 'numpy')
    This works as expected on NumPy arrays:
    >>> g(a)
    [1.84147098 2.90929743]
    But if we try to pass in a SymPy expression, it fails
    >>> g(x + 1)
    Traceback (most recent call last):
    ...
    TypeError: loop of ufunc does not support argument 0 of type Add which has
               no callable sin method
    Now, let's look at what happened. The reason this fails is that ``g``
    calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not
    know how to operate on a SymPy object. **As a general rule, NumPy
    functions do not know how to operate on SymPy expressions, and SymPy
    functions do not know how to operate on NumPy arrays. This is why lambdify
    exists: to provide a bridge between SymPy and NumPy.**
    However, why is it that ``f`` did work? That's because ``f`` does not call
    any functions, it only adds 1. So the resulting function that is created,
    ``def _lambdifygenerated(x): return x + 1`` does not depend on the globals
    namespace it is defined in. Thus it works, but only by accident. A future
    version of ``lambdify`` may remove this behavior.
    Be aware that certain implementation details described here may change in
    future versions of SymPy. The API of passing in custom modules and
    printers will not change, but the details of how a lambda function is
    created may change. However, the basic idea will remain the same, and
    understanding it will be helpful to understanding the behavior of
    lambdify.
    **In general: you should create lambdified functions for one module (say,
    NumPy), and only pass it input types that are compatible with that module
    (say, NumPy arrays).** Remember that by default, if the ``module``
    argument is not provided, ``lambdify`` creates functions using the NumPy
    and SciPy namespaces.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_lambdify.txt 
 | 
	def lambdify(args, expr, modules=None, printer=None, use_imps=True,
             dummify=False, cse=False, docstring_limit=1000):
    """Convert a SymPy expression into a function that allows for fast
    numeric evaluation.
    .. warning::
       This function uses ``exec``, and thus should not be used on
       unsanitized input.
    .. deprecated:: 1.7
       Passing a set for the *args* parameter is deprecated as sets are
       unordered. Use an ordered iterable such as a list or tuple.
    Explanation
    ===========
    For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an
    equivalent NumPy function that numerically evaluates it:
    >>> from sympy import sin, cos, symbols, lambdify
    >>> import numpy as np
    >>> x = symbols('x')
    >>> expr = sin(x) + cos(x)
    >>> expr
    sin(x) + cos(x)
    >>> f = lambdify(x, expr, 'numpy')
    >>> a = np.array([1, 2])
    >>> f(a)
    [1.38177329 0.49315059]
    The primary purpose of this function is to provide a bridge from SymPy
    expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath,
    and tensorflow. In general, SymPy functions do not work with objects from
    other libraries, such as NumPy arrays, and functions from numeric
    libraries like NumPy or mpmath do not work on SymPy expressions.
    ``lambdify`` bridges the two by converting a SymPy expression to an
    equivalent numeric function.
    The basic workflow with ``lambdify`` is to first create a SymPy expression
    representing whatever mathematical function you wish to evaluate. This
    should be done using only SymPy functions and expressions. Then, use
    ``lambdify`` to convert this to an equivalent function for numerical
    evaluation. For instance, above we created ``expr`` using the SymPy symbol
    ``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an
    equivalent NumPy function ``f``, and called it on a NumPy array ``a``.
    Parameters
    ==========
    args : List[Symbol]
        A variable or a list of variables whose nesting represents the
        nesting of the arguments that will be passed to the function.
        Variables can be symbols, undefined functions, or matrix symbols.
        >>> from sympy import Eq
        >>> from sympy.abc import x, y, z
        The list of variables should match the structure of how the
        arguments will be passed to the function. Simply enclose the
        parameters as they will be passed in a list.
        To call a function like ``f(x)`` then ``[x]``
        should be the first argument to ``lambdify``; for this
        case a single ``x`` can also be used:
        >>> f = lambdify(x, x + 1)
        >>> f(1)
        2
        >>> f = lambdify([x], x + 1)
        >>> f(1)
        2
        To call a function like ``f(x, y)`` then ``[x, y]`` will
        be the first argument of the ``lambdify``:
        >>> f = lambdify([x, y], x + y)
        >>> f(1, 1)
        2
        To call a function with a single 3-element tuple like
        ``f((x, y, z))`` then ``[(x, y, z)]`` will be the first
        argument of the ``lambdify``:
        >>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2))
        >>> f((3, 4, 5))
        True
        If two args will be passed and the first is a scalar but
        the second is a tuple with two arguments then the items
        in the list should match that structure:
        >>> f = lambdify([x, (y, z)], x + y + z)
        >>> f(1, (2, 3))
        6
    expr : Expr
        An expression, list of expressions, or matrix to be evaluated.
        Lists may be nested.
        If the expression is a list, the output will also be a list.
        >>> f = lambdify(x, [x, [x + 1, x + 2]])
        >>> f(1)
        [1, [2, 3]]
        If it is a matrix, an array will be returned (for the NumPy module).
        >>> from sympy import Matrix
        >>> f = lambdify(x, Matrix([x, x + 1]))
        >>> f(1)
        [[1]
        [2]]
        Note that the argument order here (variables then expression) is used
        to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works
        (roughly) like ``lambda x: expr``
        (see :ref:`lambdify-how-it-works` below).
    modules : str, optional
        Specifies the numeric library to use.
        If not specified, *modules* defaults to:
        - ``["scipy", "numpy"]`` if SciPy is installed
        - ``["numpy"]`` if only NumPy is installed
        - ``["math", "mpmath", "sympy"]`` if neither is installed.
        That is, SymPy functions are replaced as far as possible by
        either ``scipy`` or ``numpy`` functions if available, and Python's
        standard library ``math``, or ``mpmath`` functions otherwise.
        *modules* can be one of the following types:
        - The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``,
          ``"scipy"``, ``"sympy"``, or ``"tensorflow"`` or ``"jax"``. This uses the
          corresponding printer and namespace mapping for that module.
        - A module (e.g., ``math``). This uses the global namespace of the
          module. If the module is one of the above known modules, it will
          also use the corresponding printer and namespace mapping
          (i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``).
        - A dictionary that maps names of SymPy functions to arbitrary
          functions
          (e.g., ``{'sin': custom_sin}``).
        - A list that contains a mix of the arguments above, with higher
          priority given to entries appearing first
          (e.g., to use the NumPy module but override the ``sin`` function
          with a custom version, you can use
          ``[{'sin': custom_sin}, 'numpy']``).
    dummify : bool, optional
        Whether or not the variables in the provided expression that are not
        valid Python identifiers are substituted with dummy symbols.
        This allows for undefined functions like ``Function('f')(t)`` to be
        supplied as arguments. By default, the variables are only dummified
        if they are not valid Python identifiers.
        Set ``dummify=True`` to replace all arguments with dummy symbols
        (if ``args`` is not a string) - for example, to ensure that the
        arguments do not redefine any built-in names.
    cse : bool, or callable, optional
        Large expressions can be computed more efficiently when
        common subexpressions are identified and precomputed before
        being used multiple time. Finding the subexpressions will make
        creation of the 'lambdify' function slower, however.
        When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default)
        the user may pass a function matching the ``cse`` signature.
    docstring_limit : int or None
        When lambdifying large expressions, a significant proportion of the time
        spent inside ``lambdify`` is spent producing a string representation of
        the expression for use in the automatically generated docstring of the
        returned function. For expressions containing hundreds or more nodes the
        resulting docstring often becomes so long and dense that it is difficult
        to read. To reduce the runtime of lambdify, the rendering of the full
        expression inside the docstring can be disabled.
        When ``None``, the full expression is rendered in the docstring. When
        ``0`` or a negative ``int``, an ellipsis is rendering in the docstring
        instead of the expression. When a strictly positive ``int``, if the
        number of nodes in the expression exceeds ``docstring_limit`` an
        ellipsis is rendered in the docstring, otherwise a string representation
        of the expression is rendered as normal. The default is ``1000``.
    Examples
    ========
    >>> from sympy.utilities.lambdify import implemented_function
    >>> from sympy import sqrt, sin, Matrix
    >>> from sympy import Function
    >>> from sympy.abc import w, x, y, z
    >>> f = lambdify(x, x**2)
    >>> f(2)
    4
    >>> f = lambdify((x, y, z), [z, y, x])
    >>> f(1,2,3)
    [3, 2, 1]
    >>> f = lambdify(x, sqrt(x))
    >>> f(4)
    2.0
    >>> f = lambdify((x, y), sin(x*y)**2)
    >>> f(0, 5)
    0.0
    >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
    >>> row(1, 2)
    Matrix([[1, 3]])
    ``lambdify`` can be used to translate SymPy expressions into mpmath
    functions. This may be preferable to using ``evalf`` (which uses mpmath on
    the backend) in some cases.
    >>> f = lambdify(x, sin(x), 'mpmath')
    >>> f(1)
    0.8414709848078965
    Tuple arguments are handled and the lambdified function should
    be called with the same type of arguments as were used to create
    the function:
    >>> f = lambdify((x, (y, z)), x + y)
    >>> f(1, (2, 4))
    3
    The ``flatten`` function can be used to always work with flattened
    arguments:
    >>> from sympy.utilities.iterables import flatten
    >>> args = w, (x, (y, z))
    >>> vals = 1, (2, (3, 4))
    >>> f = lambdify(flatten(args), w + x + y + z)
    >>> f(*flatten(vals))
    10
    Functions present in ``expr`` can also carry their own numerical
    implementations, in a callable attached to the ``_imp_`` attribute. This
    can be used with undefined functions using the ``implemented_function``
    factory:
    >>> f = implemented_function(Function('f'), lambda x: x+1)
    >>> func = lambdify(x, f(x))
    >>> func(4)
    5
    ``lambdify`` always prefers ``_imp_`` implementations to implementations
    in other namespaces, unless the ``use_imps`` input parameter is False.
    Usage with Tensorflow:
    >>> import tensorflow as tf
    >>> from sympy import Max, sin, lambdify
    >>> from sympy.abc import x
    >>> f = Max(x, sin(x))
    >>> func = lambdify(x, f, 'tensorflow')
    After tensorflow v2, eager execution is enabled by default.
    If you want to get the compatible result across tensorflow v1 and v2
    as same as this tutorial, run this line.
    >>> tf.compat.v1.enable_eager_execution()
    If you have eager execution enabled, you can get the result out
    immediately as you can use numpy.
    If you pass tensorflow objects, you may get an ``EagerTensor``
    object instead of value.
    >>> result = func(tf.constant(1.0))
    >>> print(result)
    tf.Tensor(1.0, shape=(), dtype=float32)
    >>> print(result.__class__)
    <class 'tensorflow.python.framework.ops.EagerTensor'>
    You can use ``.numpy()`` to get the numpy value of the tensor.
    >>> result.numpy()
    1.0
    >>> var = tf.Variable(2.0)
    >>> result = func(var) # also works for tf.Variable and tf.Placeholder
    >>> result.numpy()
    2.0
    And it works with any shape array.
    >>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
    >>> result = func(tensor)
    >>> result.numpy()
    [[1. 2.]
     [3. 4.]]
    Notes
    =====
    - For functions involving large array calculations, numexpr can provide a
      significant speedup over numpy. Please note that the available functions
      for numexpr are more limited than numpy but can be expanded with
      ``implemented_function`` and user defined subclasses of Function. If
      specified, numexpr may be the only option in modules. The official list
      of numexpr functions can be found at:
      https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions
    - In the above examples, the generated functions can accept scalar
      values or numpy arrays as arguments.  However, in some cases
      the generated function relies on the input being a numpy array:
      >>> import numpy
      >>> from sympy import Piecewise
      >>> from sympy.testing.pytest import ignore_warnings
      >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy")
      >>> with ignore_warnings(RuntimeWarning):
      ...     f(numpy.array([-1, 0, 1, 2]))
      [-1.   0.   1.   0.5]
      >>> f(0)
      Traceback (most recent call last):
          ...
      ZeroDivisionError: division by zero
      In such cases, the input should be wrapped in a numpy array:
      >>> with ignore_warnings(RuntimeWarning):
      ...     float(f(numpy.array([0])))
      0.0
      Or if numpy functionality is not required another module can be used:
      >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math")
      >>> f(0)
      0
    .. _lambdify-how-it-works:
    How it works
    ============
    When using this function, it helps a great deal to have an idea of what it
    is doing. At its core, lambdify is nothing more than a namespace
    translation, on top of a special printer that makes some corner cases work
    properly.
    To understand lambdify, first we must properly understand how Python
    namespaces work. Say we had two files. One called ``sin_cos_sympy.py``,
    with
    .. code:: python
        # sin_cos_sympy.py
        from sympy.functions.elementary.trigonometric import (cos, sin)
        def sin_cos(x):
            return sin(x) + cos(x)
    and one called ``sin_cos_numpy.py`` with
    .. code:: python
        # sin_cos_numpy.py
        from numpy import sin, cos
        def sin_cos(x):
            return sin(x) + cos(x)
    The two files define an identical function ``sin_cos``. However, in the
    first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and
    ``cos``. In the second, they are defined as the NumPy versions.
    If we were to import the first file and use the ``sin_cos`` function, we
    would get something like
    >>> from sin_cos_sympy import sin_cos # doctest: +SKIP
    >>> sin_cos(1) # doctest: +SKIP
    cos(1) + sin(1)
    On the other hand, if we imported ``sin_cos`` from the second file, we
    would get
    >>> from sin_cos_numpy import sin_cos # doctest: +SKIP
    >>> sin_cos(1) # doctest: +SKIP
    1.38177329068
    In the first case we got a symbolic output, because it used the symbolic
    ``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric
    result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions
    from NumPy. But notice that the versions of ``sin`` and ``cos`` that were
    used was not inherent to the ``sin_cos`` function definition. Both
    ``sin_cos`` definitions are exactly the same. Rather, it was based on the
    names defined at the module where the ``sin_cos`` function was defined.
    The key point here is that when function in Python references a name that
    is not defined in the function, that name is looked up in the "global"
    namespace of the module where that function is defined.
    Now, in Python, we can emulate this behavior without actually writing a
    file to disk using the ``exec`` function. ``exec`` takes a string
    containing a block of Python code, and a dictionary that should contain
    the global variables of the module. It then executes the code "in" that
    dictionary, as if it were the module globals. The following is equivalent
    to the ``sin_cos`` defined in ``sin_cos_sympy.py``:
    >>> import sympy
    >>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos}
    >>> exec('''
    ... def sin_cos(x):
    ...     return sin(x) + cos(x)
    ... ''', module_dictionary)
    >>> sin_cos = module_dictionary['sin_cos']
    >>> sin_cos(1)
    cos(1) + sin(1)
    and similarly with ``sin_cos_numpy``:
    >>> import numpy
    >>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos}
    >>> exec('''
    ... def sin_cos(x):
    ...     return sin(x) + cos(x)
    ... ''', module_dictionary)
    >>> sin_cos = module_dictionary['sin_cos']
    >>> sin_cos(1)
    1.38177329068
    So now we can get an idea of how ``lambdify`` works. The name "lambdify"
    comes from the fact that we can think of something like ``lambdify(x,
    sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where
    ``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why
    the symbols argument is first in ``lambdify``, as opposed to most SymPy
    functions where it comes after the expression: to better mimic the
    ``lambda`` keyword.
    ``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and
    1. Converts it to a string
    2. Creates a module globals dictionary based on the modules that are
       passed in (by default, it uses the NumPy module)
    3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the
       list of variables separated by commas, and ``{expr}`` is the string
       created in step 1., then ``exec``s that string with the module globals
       namespace and returns ``func``.
    In fact, functions returned by ``lambdify`` support inspection. So you can
    see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you
    are using IPython or the Jupyter notebook.
    >>> f = lambdify(x, sin(x) + cos(x))
    >>> import inspect
    >>> print(inspect.getsource(f))
    def _lambdifygenerated(x):
        return sin(x) + cos(x)
    This shows us the source code of the function, but not the namespace it
    was defined in. We can inspect that by looking at the ``__globals__``
    attribute of ``f``:
    >>> f.__globals__['sin']
    <ufunc 'sin'>
    >>> f.__globals__['cos']
    <ufunc 'cos'>
    >>> f.__globals__['sin'] is numpy.sin
    True
    This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be
    ``numpy.sin`` and ``numpy.cos``.
    Note that there are some convenience layers in each of these steps, but at
    the core, this is how ``lambdify`` works. Step 1 is done using the
    ``LambdaPrinter`` printers defined in the printing module (see
    :mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions
    to define how they should be converted to a string for different modules.
    You can change which printer ``lambdify`` uses by passing a custom printer
    in to the ``printer`` argument.
    Step 2 is augmented by certain translations. There are default
    translations for each module, but you can provide your own by passing a
    list to the ``modules`` argument. For instance,
    >>> def mysin(x):
    ...     print('taking the sin of', x)
    ...     return numpy.sin(x)
    ...
    >>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy'])
    >>> f(1)
    taking the sin of 1
    0.8414709848078965
    The globals dictionary is generated from the list by merging the
    dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The
    merging is done so that earlier items take precedence, which is why
    ``mysin`` is used above instead of ``numpy.sin``.
    If you want to modify the way ``lambdify`` works for a given function, it
    is usually easiest to do so by modifying the globals dictionary as such.
    In more complicated cases, it may be necessary to create and pass in a
    custom printer.
    Finally, step 3 is augmented with certain convenience operations, such as
    the addition of a docstring.
    Understanding how ``lambdify`` works can make it easier to avoid certain
    gotchas when using it. For instance, a common mistake is to create a
    lambdified function for one module (say, NumPy), and pass it objects from
    another (say, a SymPy expression).
    For instance, say we create
    >>> from sympy.abc import x
    >>> f = lambdify(x, x + 1, 'numpy')
    Now if we pass in a NumPy array, we get that array plus 1
    >>> import numpy
    >>> a = numpy.array([1, 2])
    >>> f(a)
    [2 3]
    But what happens if you make the mistake of passing in a SymPy expression
    instead of a NumPy array:
    >>> f(x + 1)
    x + 2
    This worked, but it was only by accident. Now take a different lambdified
    function:
    >>> from sympy import sin
    >>> g = lambdify(x, x + sin(x), 'numpy')
    This works as expected on NumPy arrays:
    >>> g(a)
    [1.84147098 2.90929743]
    But if we try to pass in a SymPy expression, it fails
    >>> g(x + 1)
    Traceback (most recent call last):
    ...
    TypeError: loop of ufunc does not support argument 0 of type Add which has
               no callable sin method
    Now, let's look at what happened. The reason this fails is that ``g``
    calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not
    know how to operate on a SymPy object. **As a general rule, NumPy
    functions do not know how to operate on SymPy expressions, and SymPy
    functions do not know how to operate on NumPy arrays. This is why lambdify
    exists: to provide a bridge between SymPy and NumPy.**
    However, why is it that ``f`` did work? That's because ``f`` does not call
    any functions, it only adds 1. So the resulting function that is created,
    ``def _lambdifygenerated(x): return x + 1`` does not depend on the globals
    namespace it is defined in. Thus it works, but only by accident. A future
    version of ``lambdify`` may remove this behavior.
    Be aware that certain implementation details described here may change in
    future versions of SymPy. The API of passing in custom modules and
    printers will not change, but the details of how a lambda function is
    created may change. However, the basic idea will remain the same, and
    understanding it will be helpful to understanding the behavior of
    lambdify.
    **In general: you should create lambdified functions for one module (say,
    NumPy), and only pass it input types that are compatible with that module
    (say, NumPy arrays).** Remember that by default, if the ``module``
    argument is not provided, ``lambdify`` creates functions using the NumPy
    and SciPy namespaces.
    """
    from sympy.core.symbol import Symbol
    from sympy.core.expr import Expr
    # If the user hasn't specified any modules, use what is available.
    if modules is None:
        try:
            _import("scipy")
        except ImportError:
            try:
                _import("numpy")
            except ImportError:
                # Use either numpy (if available) or python.math where possible.
                # XXX: This leads to different behaviour on different systems and
                #      might be the reason for irreproducible errors.
                modules = ["math", "mpmath", "sympy"]
            else:
                modules = ["numpy"]
        else:
            modules = ["numpy", "scipy"]
    # Get the needed namespaces.
    namespaces = []
    # First find any function implementations
    if use_imps:
        namespaces.append(_imp_namespace(expr))
    # Check for dict before iterating
    if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
        namespaces.append(modules)
    else:
        # consistency check
        if _module_present('numexpr', modules) and len(modules) > 1:
            raise TypeError("numexpr must be the only item in 'modules'")
        namespaces += list(modules)
    # fill namespace with first having highest priority
    namespace = {}
    for m in namespaces[::-1]:
        buf = _get_namespace(m)
        namespace.update(buf)
    if hasattr(expr, "atoms"):
        #Try if you can extract symbols from the expression.
        #Move on if expr.atoms in not implemented.
        syms = expr.atoms(Symbol)
        for term in syms:
            namespace.update({str(term): term})
    if printer is None:
        if _module_present('mpmath', namespaces):
            from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore
        elif _module_present('scipy', namespaces):
            from sympy.printing.numpy import SciPyPrinter as Printer # type: ignore
        elif _module_present('numpy', namespaces):
            from sympy.printing.numpy import NumPyPrinter as Printer # type: ignore
        elif _module_present('cupy', namespaces):
            from sympy.printing.numpy import CuPyPrinter as Printer # type: ignore
        elif _module_present('jax', namespaces):
            from sympy.printing.numpy import JaxPrinter as Printer # type: ignore
        elif _module_present('numexpr', namespaces):
            from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore
        elif _module_present('tensorflow', namespaces):
            from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore
        elif _module_present('sympy', namespaces):
            from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore
        else:
            from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore
        user_functions = {}
        for m in namespaces[::-1]:
            if isinstance(m, dict):
                for k in m:
                    user_functions[k] = k
        printer = Printer({'fully_qualified_modules': False, 'inline': True,
                           'allow_unknown_functions': True,
                           'user_functions': user_functions})
    if isinstance(args, set):
        sympy_deprecation_warning(
            """
Passing the function arguments to lambdify() as a set is deprecated. This
leads to unpredictable results since sets are unordered. Instead, use a list
or tuple for the function arguments.
            """,
            deprecated_since_version="1.6.3",
            active_deprecations_target="deprecated-lambdify-arguments-set",
                )
    # Get the names of the args, for creating a docstring
    iterable_args = (args,) if isinstance(args, Expr) else args
    names = []
    # Grab the callers frame, for getting the names by inspection (if needed)
    callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore
    for n, var in enumerate(iterable_args):
        if hasattr(var, 'name'):
            names.append(var.name)
        else:
            # It's an iterable. Try to get name by inspection of calling frame.
            name_list = [var_name for var_name, var_val in callers_local_vars
                    if var_val is var]
            if len(name_list) == 1:
                names.append(name_list[0])
            else:
                # Cannot infer name with certainty. arg_# will have to do.
                names.append('arg_' + str(n))
    # Create the function definition code and execute it
    funcname = '_lambdifygenerated'
    if _module_present('tensorflow', namespaces):
        funcprinter = _TensorflowEvaluatorPrinter(printer, dummify)
    else:
        funcprinter = _EvaluatorPrinter(printer, dummify)
    if cse == True:
        from sympy.simplify.cse_main import cse as _cse
        cses, _expr = _cse(expr, list=False)
    elif callable(cse):
        cses, _expr = cse(expr)
    else:
        cses, _expr = (), expr
    funcstr = funcprinter.doprint(funcname, iterable_args, _expr, cses=cses)
    # Collect the module imports from the code printers.
    imp_mod_lines = []
    for mod, keys in (getattr(printer, 'module_imports', None) or {}).items():
        for k in keys:
            if k not in namespace:
                ln = "from %s import %s" % (mod, k)
                try:
                    exec(ln, {}, namespace)
                except ImportError:
                    # Tensorflow 2.0 has issues with importing a specific
                    # function from its submodule.
                    # https://github.com/tensorflow/tensorflow/issues/33022
                    ln = "%s = %s.%s" % (k, mod, k)
                    exec(ln, {}, namespace)
                imp_mod_lines.append(ln)
    # Provide lambda expression with builtins, and compatible implementation of range
    namespace.update({'builtins':builtins, 'range':range})
    funclocals = {}
    global _lambdify_generated_counter
    filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter
    _lambdify_generated_counter += 1
    c = compile(funcstr, filename, 'exec')
    exec(c, namespace, funclocals)
    # mtime has to be None or else linecache.checkcache will remove it
    linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore
    func = funclocals[funcname]
    # Apply the docstring
    sig = "func({})".format(", ".join(str(i) for i in names))
    sig = textwrap.fill(sig, subsequent_indent=' '*8)
    if _too_large_for_docstring(expr, docstring_limit):
        expr_str = "EXPRESSION REDACTED DUE TO LENGTH, (see lambdify's `docstring_limit`)"
        src_str = "SOURCE CODE REDACTED DUE TO LENGTH, (see lambdify's `docstring_limit`)"
    else:
        expr_str = str(expr)
        if len(expr_str) > 78:
            expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
        src_str = funcstr
    func.__doc__ = (
        "Created with lambdify. Signature:\n\n"
        "{sig}\n\n"
        "Expression:\n\n"
        "{expr}\n\n"
        "Source code:\n\n"
        "{src}\n\n"
        "Imported modules:\n\n"
        "{imp_mods}"
        ).format(sig=sig, expr=expr_str, src=src_str, imp_mods='\n'.join(imp_mod_lines))
    return func
 
 | 
	lambdify 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	71 
 | 
	sympy/crypto/crypto.py 
 | 
	def lfsr_connection_polynomial(s):
    """
    This function computes the LFSR connection polynomial.
    Parameters
    ==========
    s
        A sequence of elements of even length, with entries in a finite
        field.
    Returns
    =======
    C(x)
        The connection polynomial of a minimal LFSR yielding s.
        This implements the algorithm in section 3 of J. L. Massey's
        article [M]_.
    Examples
    ========
    >>> from sympy.crypto.crypto import (
    ...     lfsr_sequence, lfsr_connection_polynomial)
    >>> from sympy.polys.domains import FF
    >>> F = FF(2)
    >>> fill = [F(1), F(1), F(0), F(1)]
    >>> key = [F(1), F(0), F(0), F(1)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**4 + x + 1
    >>> fill = [F(1), F(0), F(0), F(1)]
    >>> key = [F(1), F(1), F(0), F(1)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**3 + 1
    >>> fill = [F(1), F(0), F(1)]
    >>> key = [F(1), F(1), F(0)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**3 + x**2 + 1
    >>> fill = [F(1), F(0), F(1)]
    >>> key = [F(1), F(0), F(1)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**3 + x + 1
    References
    ==========
    .. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
        IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
        Jan 1969.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_lfsr_connection_polynomial.txt 
 | 
	def lfsr_connection_polynomial(s):
    """
    This function computes the LFSR connection polynomial.
    Parameters
    ==========
    s
        A sequence of elements of even length, with entries in a finite
        field.
    Returns
    =======
    C(x)
        The connection polynomial of a minimal LFSR yielding s.
        This implements the algorithm in section 3 of J. L. Massey's
        article [M]_.
    Examples
    ========
    >>> from sympy.crypto.crypto import (
    ...     lfsr_sequence, lfsr_connection_polynomial)
    >>> from sympy.polys.domains import FF
    >>> F = FF(2)
    >>> fill = [F(1), F(1), F(0), F(1)]
    >>> key = [F(1), F(0), F(0), F(1)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**4 + x + 1
    >>> fill = [F(1), F(0), F(0), F(1)]
    >>> key = [F(1), F(1), F(0), F(1)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**3 + 1
    >>> fill = [F(1), F(0), F(1)]
    >>> key = [F(1), F(1), F(0)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**3 + x**2 + 1
    >>> fill = [F(1), F(0), F(1)]
    >>> key = [F(1), F(0), F(1)]
    >>> s = lfsr_sequence(key, fill, 20)
    >>> lfsr_connection_polynomial(s)
    x**3 + x + 1
    References
    ==========
    .. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding."
        IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127,
        Jan 1969.
    """
    # Initialization:
    p = s[0].modulus()
    x = Symbol("x")
    C = 1*x**0
    B = 1*x**0
    m = 1
    b = 1*x**0
    L = 0
    N = 0
    while N < len(s):
        if L > 0:
            dC = Poly(C).degree()
            r = min(L + 1, dC + 1)
            coeffsC = [C.subs(x, 0)] + [C.coeff(x**i)
                for i in range(1, dC + 1)]
            d = (int(s[N]) + sum(coeffsC[i]*int(s[N - i])
                for i in range(1, r))) % p
        if L == 0:
            d = int(s[N])*x**0
        if d == 0:
            m += 1
            N += 1
        if d > 0:
            if 2*L > N:
                C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
                m += 1
                N += 1
            else:
                T = C
                C = (C - d*((b**(p - 2)) % p)*x**m*B).expand()
                L = N + 1 - L
                m = 1
                b = d
                B = T
                N += 1
    dC = Poly(C).degree()
    coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)]
    return sum(coeffsC[i] % p*x**i for i in range(dC + 1)
        if coeffsC[i] is not None)
 
 | 
	lfsr_connection_polynomial 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	72 
 | 
	sympy/utilities/iterables.py 
 | 
	def multiset_partitions(multiset, m=None):
    """
    Return unique partitions of the given multiset (in list form).
    If ``m`` is None, all multisets will be returned, otherwise only
    partitions with ``m`` parts will be returned.
    If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1]
    will be supplied.
    Examples
    ========
    >>> from sympy.utilities.iterables import multiset_partitions
    >>> list(multiset_partitions([1, 2, 3, 4], 2))
    [[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
    [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
    [[1], [2, 3, 4]]]
    >>> list(multiset_partitions([1, 2, 3, 4], 1))
    [[[1, 2, 3, 4]]]
    Only unique partitions are returned and these will be returned in a
    canonical order regardless of the order of the input:
    >>> a = [1, 2, 2, 1]
    >>> ans = list(multiset_partitions(a, 2))
    >>> a.sort()
    >>> list(multiset_partitions(a, 2)) == ans
    True
    >>> a = range(3, 1, -1)
    >>> (list(multiset_partitions(a)) ==
    ...  list(multiset_partitions(sorted(a))))
    True
    If m is omitted then all partitions will be returned:
    >>> list(multiset_partitions([1, 1, 2]))
    [[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]]
    >>> list(multiset_partitions([1]*3))
    [[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
    Counting
    ========
    The number of partitions of a set is given by the bell number:
    >>> from sympy import bell
    >>> len(list(multiset_partitions(5))) == bell(5) == 52
    True
    The number of partitions of length k from a set of size n is given by the
    Stirling Number of the 2nd kind:
    >>> from sympy.functions.combinatorial.numbers import stirling
    >>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15
    True
    These comments on counting apply to *sets*, not multisets.
    Notes
    =====
    When all the elements are the same in the multiset, the order
    of the returned partitions is determined by the ``partitions``
    routine. If one is counting partitions then it is better to use
    the ``nT`` function.
    See Also
    ========
    partitions
    sympy.combinatorics.partitions.Partition
    sympy.combinatorics.partitions.IntegerPartition
    sympy.functions.combinatorial.numbers.nT
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_multiset_partitions.txt 
 | 
	def multiset_partitions(multiset, m=None):
    """
    Return unique partitions of the given multiset (in list form).
    If ``m`` is None, all multisets will be returned, otherwise only
    partitions with ``m`` parts will be returned.
    If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1]
    will be supplied.
    Examples
    ========
    >>> from sympy.utilities.iterables import multiset_partitions
    >>> list(multiset_partitions([1, 2, 3, 4], 2))
    [[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
    [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
    [[1], [2, 3, 4]]]
    >>> list(multiset_partitions([1, 2, 3, 4], 1))
    [[[1, 2, 3, 4]]]
    Only unique partitions are returned and these will be returned in a
    canonical order regardless of the order of the input:
    >>> a = [1, 2, 2, 1]
    >>> ans = list(multiset_partitions(a, 2))
    >>> a.sort()
    >>> list(multiset_partitions(a, 2)) == ans
    True
    >>> a = range(3, 1, -1)
    >>> (list(multiset_partitions(a)) ==
    ...  list(multiset_partitions(sorted(a))))
    True
    If m is omitted then all partitions will be returned:
    >>> list(multiset_partitions([1, 1, 2]))
    [[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]]
    >>> list(multiset_partitions([1]*3))
    [[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
    Counting
    ========
    The number of partitions of a set is given by the bell number:
    >>> from sympy import bell
    >>> len(list(multiset_partitions(5))) == bell(5) == 52
    True
    The number of partitions of length k from a set of size n is given by the
    Stirling Number of the 2nd kind:
    >>> from sympy.functions.combinatorial.numbers import stirling
    >>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15
    True
    These comments on counting apply to *sets*, not multisets.
    Notes
    =====
    When all the elements are the same in the multiset, the order
    of the returned partitions is determined by the ``partitions``
    routine. If one is counting partitions then it is better to use
    the ``nT`` function.
    See Also
    ========
    partitions
    sympy.combinatorics.partitions.Partition
    sympy.combinatorics.partitions.IntegerPartition
    sympy.functions.combinatorial.numbers.nT
    """
    # This function looks at the supplied input and dispatches to
    # several special-case routines as they apply.
    if isinstance(multiset, int):
        n = multiset
        if m and m > n:
            return
        multiset = list(range(n))
        if m == 1:
            yield [multiset[:]]
            return
        # If m is not None, it can sometimes be faster to use
        # MultisetPartitionTraverser.enum_range() even for inputs
        # which are sets.  Since the _set_partitions code is quite
        # fast, this is only advantageous when the overall set
        # partitions outnumber those with the desired number of parts
        # by a large factor.  (At least 60.)  Such a switch is not
        # currently implemented.
        for nc, q in _set_partitions(n):
            if m is None or nc == m:
                rv = [[] for i in range(nc)]
                for i in range(n):
                    rv[q[i]].append(multiset[i])
                yield rv
        return
    if len(multiset) == 1 and isinstance(multiset, str):
        multiset = [multiset]
    if not has_variety(multiset):
        # Only one component, repeated n times.  The resulting
        # partitions correspond to partitions of integer n.
        n = len(multiset)
        if m and m > n:
            return
        if m == 1:
            yield [multiset[:]]
            return
        x = multiset[:1]
        for size, p in partitions(n, m, size=True):
            if m is None or size == m:
                rv = []
                for k in sorted(p):
                    rv.extend([x*k]*p[k])
                yield rv
    else:
        from sympy.core.sorting import ordered
        multiset = list(ordered(multiset))
        n = len(multiset)
        if m and m > n:
            return
        if m == 1:
            yield [multiset[:]]
            return
        # Split the information of the multiset into two lists -
        # one of the elements themselves, and one (of the same length)
        # giving the number of repeats for the corresponding element.
        elements, multiplicities = zip(*group(multiset, False))
        if len(elements) < len(multiset):
            # General case - multiset with more than one distinct element
            # and at least one element repeated more than once.
            if m:
                mpt = MultisetPartitionTraverser()
                for state in mpt.enum_range(multiplicities, m-1, m):
                    yield list_visitor(state, elements)
            else:
                for state in multiset_partitions_taocp(multiplicities):
                    yield list_visitor(state, elements)
        else:
            # Set partitions case - no repeated elements. Pretty much
            # same as int argument case above, with same possible, but
            # currently unimplemented optimization for some cases when
            # m is not None
            for nc, q in _set_partitions(n):
                if m is None or nc == m:
                    rv = [[] for i in range(nc)]
                    for i in range(n):
                        rv[q[i]].append(i)
                    yield [[multiset[j] for j in i] for i in rv]
 
 | 
	multiset_partitions 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	73 
 | 
	sympy/utilities/enumerative.py 
 | 
	def multiset_partitions_taocp(multiplicities):
    """Enumerates partitions of a multiset.
    Parameters
    ==========
    multiplicities
         list of integer multiplicities of the components of the multiset.
    Yields
    ======
    state
        Internal data structure which encodes a particular partition.
        This output is then usually processed by a visitor function
        which combines the information from this data structure with
        the components themselves to produce an actual partition.
        Unless they wish to create their own visitor function, users will
        have little need to look inside this data structure.  But, for
        reference, it is a 3-element list with components:
        f
            is a frame array, which is used to divide pstack into parts.
        lpart
            points to the base of the topmost part.
        pstack
            is an array of PartComponent objects.
        The ``state`` output offers a peek into the internal data
        structures of the enumeration function.  The client should
        treat this as read-only; any modification of the data
        structure will cause unpredictable (and almost certainly
        incorrect) results.  Also, the components of ``state`` are
        modified in place at each iteration.  Hence, the visitor must
        be called at each loop iteration.  Accumulating the ``state``
        instances and processing them later will not work.
    Examples
    ========
    >>> from sympy.utilities.enumerative import list_visitor
    >>> from sympy.utilities.enumerative import multiset_partitions_taocp
    >>> # variables components and multiplicities represent the multiset 'abb'
    >>> components = 'ab'
    >>> multiplicities = [1, 2]
    >>> states = multiset_partitions_taocp(multiplicities)
    >>> list(list_visitor(state, components) for state in states)
    [[['a', 'b', 'b']],
    [['a', 'b'], ['b']],
    [['a'], ['b', 'b']],
    [['a'], ['b'], ['b']]]
    See Also
    ========
    sympy.utilities.iterables.multiset_partitions: Takes a multiset
        as input and directly yields multiset partitions.  It
        dispatches to a number of functions, including this one, for
        implementation.  Most users will find it more convenient to
        use than multiset_partitions_taocp.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_multiset_partitions_taocp.txt 
 | 
	def multiset_partitions_taocp(multiplicities):
    """Enumerates partitions of a multiset.
    Parameters
    ==========
    multiplicities
         list of integer multiplicities of the components of the multiset.
    Yields
    ======
    state
        Internal data structure which encodes a particular partition.
        This output is then usually processed by a visitor function
        which combines the information from this data structure with
        the components themselves to produce an actual partition.
        Unless they wish to create their own visitor function, users will
        have little need to look inside this data structure.  But, for
        reference, it is a 3-element list with components:
        f
            is a frame array, which is used to divide pstack into parts.
        lpart
            points to the base of the topmost part.
        pstack
            is an array of PartComponent objects.
        The ``state`` output offers a peek into the internal data
        structures of the enumeration function.  The client should
        treat this as read-only; any modification of the data
        structure will cause unpredictable (and almost certainly
        incorrect) results.  Also, the components of ``state`` are
        modified in place at each iteration.  Hence, the visitor must
        be called at each loop iteration.  Accumulating the ``state``
        instances and processing them later will not work.
    Examples
    ========
    >>> from sympy.utilities.enumerative import list_visitor
    >>> from sympy.utilities.enumerative import multiset_partitions_taocp
    >>> # variables components and multiplicities represent the multiset 'abb'
    >>> components = 'ab'
    >>> multiplicities = [1, 2]
    >>> states = multiset_partitions_taocp(multiplicities)
    >>> list(list_visitor(state, components) for state in states)
    [[['a', 'b', 'b']],
    [['a', 'b'], ['b']],
    [['a'], ['b', 'b']],
    [['a'], ['b'], ['b']]]
    See Also
    ========
    sympy.utilities.iterables.multiset_partitions: Takes a multiset
        as input and directly yields multiset partitions.  It
        dispatches to a number of functions, including this one, for
        implementation.  Most users will find it more convenient to
        use than multiset_partitions_taocp.
    """
    # Important variables.
    # m is the number of components, i.e., number of distinct elements
    m = len(multiplicities)
    # n is the cardinality, total number of elements whether or not distinct
    n = sum(multiplicities)
    # The main data structure, f segments pstack into parts.  See
    # list_visitor() for example code indicating how this internal
    # state corresponds to a partition.
    # Note: allocation of space for stack is conservative.  Knuth's
    # exercise 7.2.1.5.68 gives some indication of how to tighten this
    # bound, but this is not implemented.
    pstack = [PartComponent() for i in range(n * m + 1)]
    f = [0] * (n + 1)
    # Step M1 in Knuth (Initialize)
    # Initial state - entire multiset in one part.
    for j in range(m):
        ps = pstack[j]
        ps.c = j
        ps.u = multiplicities[j]
        ps.v = multiplicities[j]
    # Other variables
    f[0] = 0
    a = 0
    lpart = 0
    f[1] = m
    b = m  # in general, current stack frame is from a to b - 1
    while True:
        while True:
            # Step M2 (Subtract v from u)
            k = b
            x = False
            for j in range(a, b):
                pstack[k].u = pstack[j].u - pstack[j].v
                if pstack[k].u == 0:
                    x = True
                elif not x:
                    pstack[k].c = pstack[j].c
                    pstack[k].v = min(pstack[j].v, pstack[k].u)
                    x = pstack[k].u < pstack[j].v
                    k = k + 1
                else:  # x is True
                    pstack[k].c = pstack[j].c
                    pstack[k].v = pstack[k].u
                    k = k + 1
                # Note: x is True iff v has changed
            # Step M3 (Push if nonzero.)
            if k > b:
                a = b
                b = k
                lpart = lpart + 1
                f[lpart + 1] = b
                # Return to M2
            else:
                break  # Continue to M4
        # M4  Visit a partition
        state = [f, lpart, pstack]
        yield state
        # M5 (Decrease v)
        while True:
            j = b-1
            while (pstack[j].v == 0):
                j = j - 1
            if j == a and pstack[j].v == 1:
                # M6 (Backtrack)
                if lpart == 0:
                    return
                lpart = lpart - 1
                b = a
                a = f[lpart]
                # Return to M5
            else:
                pstack[j].v = pstack[j].v - 1
                for k in range(j + 1, b):
                    pstack[k].v = pstack[k].u
                break  # GOTO M2
 
 | 
	multiset_partitions_taocp 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	74 
 | 
	sympy/codegen/algorithms.py 
 | 
	def newtons_method(expr, wrt, atol=1e-12, delta=None, *, rtol=4e-16, debug=False,
                   itermax=None, counter=None, delta_fn=lambda e, x: -e/e.diff(x),
                   cse=False, handle_nan=None,
                   bounds=None):
    """ Generates an AST for Newton-Raphson method (a root-finding algorithm).
    Explanation
    ===========
    Returns an abstract syntax tree (AST) based on ``sympy.codegen.ast`` for Netwon's
    method of root-finding.
    Parameters
    ==========
    expr : expression
    wrt : Symbol
        With respect to, i.e. what is the variable.
    atol : number or expression
        Absolute tolerance (stopping criterion)
    rtol : number or expression
        Relative tolerance (stopping criterion)
    delta : Symbol
        Will be a ``Dummy`` if ``None``.
    debug : bool
        Whether to print convergence information during iterations
    itermax : number or expr
        Maximum number of iterations.
    counter : Symbol
        Will be a ``Dummy`` if ``None``.
    delta_fn: Callable[[Expr, Symbol], Expr]
        computes the step, default is newtons method. For e.g. Halley's method
        use delta_fn=lambda e, x: -2*e*e.diff(x)/(2*e.diff(x)**2 - e*e.diff(x, 2))
    cse: bool
        Perform common sub-expression elimination on delta expression
    handle_nan: Token
        How to handle occurrence of not-a-number (NaN).
    bounds: Optional[tuple[Expr, Expr]]
        Perform optimization within bounds
    Examples
    ========
    >>> from sympy import symbols, cos
    >>> from sympy.codegen.ast import Assignment
    >>> from sympy.codegen.algorithms import newtons_method
    >>> x, dx, atol = symbols('x dx atol')
    >>> expr = cos(x) - x**3
    >>> algo = newtons_method(expr, x, atol=atol, delta=dx)
    >>> algo.has(Assignment(dx, -expr/expr.diff(x)))
    True
    References
    ==========
    .. [1] https://en.wikipedia.org/wiki/Newton%27s_method
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_newtons_method.txt 
 | 
	def newtons_method(expr, wrt, atol=1e-12, delta=None, *, rtol=4e-16, debug=False,
                   itermax=None, counter=None, delta_fn=lambda e, x: -e/e.diff(x),
                   cse=False, handle_nan=None,
                   bounds=None):
    """ Generates an AST for Newton-Raphson method (a root-finding algorithm).
    Explanation
    ===========
    Returns an abstract syntax tree (AST) based on ``sympy.codegen.ast`` for Netwon's
    method of root-finding.
    Parameters
    ==========
    expr : expression
    wrt : Symbol
        With respect to, i.e. what is the variable.
    atol : number or expression
        Absolute tolerance (stopping criterion)
    rtol : number or expression
        Relative tolerance (stopping criterion)
    delta : Symbol
        Will be a ``Dummy`` if ``None``.
    debug : bool
        Whether to print convergence information during iterations
    itermax : number or expr
        Maximum number of iterations.
    counter : Symbol
        Will be a ``Dummy`` if ``None``.
    delta_fn: Callable[[Expr, Symbol], Expr]
        computes the step, default is newtons method. For e.g. Halley's method
        use delta_fn=lambda e, x: -2*e*e.diff(x)/(2*e.diff(x)**2 - e*e.diff(x, 2))
    cse: bool
        Perform common sub-expression elimination on delta expression
    handle_nan: Token
        How to handle occurrence of not-a-number (NaN).
    bounds: Optional[tuple[Expr, Expr]]
        Perform optimization within bounds
    Examples
    ========
    >>> from sympy import symbols, cos
    >>> from sympy.codegen.ast import Assignment
    >>> from sympy.codegen.algorithms import newtons_method
    >>> x, dx, atol = symbols('x dx atol')
    >>> expr = cos(x) - x**3
    >>> algo = newtons_method(expr, x, atol=atol, delta=dx)
    >>> algo.has(Assignment(dx, -expr/expr.diff(x)))
    True
    References
    ==========
    .. [1] https://en.wikipedia.org/wiki/Newton%27s_method
    """
    if delta is None:
        delta = Dummy()
        Wrapper = Scope
        name_d = 'delta'
    else:
        Wrapper = lambda x: x
        name_d = delta.name
    delta_expr = delta_fn(expr, wrt)
    if cse:
        from sympy.simplify.cse_main import cse
        cses, (red,) = cse([delta_expr.factor()])
        whl_bdy = [Assignment(dum, sub_e) for dum, sub_e in cses]
        whl_bdy += [Assignment(delta, red)]
    else:
        whl_bdy = [Assignment(delta, delta_expr)]
    if handle_nan is not None:
        whl_bdy += [While(isnan(delta), CodeBlock(handle_nan, break_))]
    whl_bdy += [AddAugmentedAssignment(wrt, delta)]
    if bounds is not None:
        whl_bdy += [Assignment(wrt, Min(Max(wrt, bounds[0]), bounds[1]))]
    if debug:
        prnt = Print([wrt, delta], r"{}=%12.5g {}=%12.5g\n".format(wrt.name, name_d))
        whl_bdy += [prnt]
    req = Gt(Abs(delta), atol + rtol*Abs(wrt))
    declars = [Declaration(Variable(delta, type=real, value=oo))]
    if itermax is not None:
        counter = counter or Dummy(integer=True)
        v_counter = Variable.deduced(counter, 0)
        declars.append(Declaration(v_counter))
        whl_bdy.append(AddAugmentedAssignment(counter, 1))
        req = And(req, Lt(counter, itermax))
    whl = While(req, CodeBlock(*whl_bdy))
    blck = declars
    if debug:
        blck.append(Print([wrt], r"{}=%12.5g\n".format(wrt.name)))
    blck += [whl]
    return Wrapper(CodeBlock(*blck))
 
 | 
	newtons_method 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	75 
 | 
	sympy/utilities/iterables.py 
 | 
	def partitions(n, m=None, k=None, size=False):
    """Generate all partitions of positive integer, n.
    Each partition is represented as a dictionary, mapping an integer
    to the number of copies of that integer in the partition.  For example,
    the first partition of 4 returned is {4: 1}, "4: one of them".
    Parameters
    ==========
    n : int
    m : int, optional
        limits number of parts in partition (mnemonic: m, maximum parts)
    k : int, optional
        limits the numbers that are kept in the partition (mnemonic: k, keys)
    size : bool, default: False
        If ``True``, (M, P) is returned where M is the sum of the
        multiplicities and P is the generated partition.
        If ``False``, only the generated partition is returned.
    Examples
    ========
    >>> from sympy.utilities.iterables import partitions
    The numbers appearing in the partition (the key of the returned dict)
    are limited with k:
    >>> for p in partitions(6, k=2):  # doctest: +SKIP
    ...     print(p)
    {2: 3}
    {1: 2, 2: 2}
    {1: 4, 2: 1}
    {1: 6}
    The maximum number of parts in the partition (the sum of the values in
    the returned dict) are limited with m (default value, None, gives
    partitions from 1 through n):
    >>> for p in partitions(6, m=2):  # doctest: +SKIP
    ...     print(p)
    ...
    {6: 1}
    {1: 1, 5: 1}
    {2: 1, 4: 1}
    {3: 2}
    References
    ==========
    .. [1] modified from Tim Peter's version to allow for k and m values:
           https://code.activestate.com/recipes/218332-generator-for-integer-partitions/
    See Also
    ========
    sympy.combinatorics.partitions.Partition
    sympy.combinatorics.partitions.IntegerPartition
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_partitions.txt 
 | 
	def partitions(n, m=None, k=None, size=False):
    """Generate all partitions of positive integer, n.
    Each partition is represented as a dictionary, mapping an integer
    to the number of copies of that integer in the partition.  For example,
    the first partition of 4 returned is {4: 1}, "4: one of them".
    Parameters
    ==========
    n : int
    m : int, optional
        limits number of parts in partition (mnemonic: m, maximum parts)
    k : int, optional
        limits the numbers that are kept in the partition (mnemonic: k, keys)
    size : bool, default: False
        If ``True``, (M, P) is returned where M is the sum of the
        multiplicities and P is the generated partition.
        If ``False``, only the generated partition is returned.
    Examples
    ========
    >>> from sympy.utilities.iterables import partitions
    The numbers appearing in the partition (the key of the returned dict)
    are limited with k:
    >>> for p in partitions(6, k=2):  # doctest: +SKIP
    ...     print(p)
    {2: 3}
    {1: 2, 2: 2}
    {1: 4, 2: 1}
    {1: 6}
    The maximum number of parts in the partition (the sum of the values in
    the returned dict) are limited with m (default value, None, gives
    partitions from 1 through n):
    >>> for p in partitions(6, m=2):  # doctest: +SKIP
    ...     print(p)
    ...
    {6: 1}
    {1: 1, 5: 1}
    {2: 1, 4: 1}
    {3: 2}
    References
    ==========
    .. [1] modified from Tim Peter's version to allow for k and m values:
           https://code.activestate.com/recipes/218332-generator-for-integer-partitions/
    See Also
    ========
    sympy.combinatorics.partitions.Partition
    sympy.combinatorics.partitions.IntegerPartition
    """
    if (n <= 0 or
        m is not None and m < 1 or
        k is not None and k < 1 or
        m and k and m*k < n):
        # the empty set is the only way to handle these inputs
        # and returning {} to represent it is consistent with
        # the counting convention, e.g. nT(0) == 1.
        if size:
            yield 0, {}
        else:
            yield {}
        return
    if m is None:
        m = n
    else:
        m = min(m, n)
    k = min(k or n, n)
    n, m, k = as_int(n), as_int(m), as_int(k)
    q, r = divmod(n, k)
    ms = {k: q}
    keys = [k]  # ms.keys(), from largest to smallest
    if r:
        ms[r] = 1
        keys.append(r)
    room = m - q - bool(r)
    if size:
        yield sum(ms.values()), ms.copy()
    else:
        yield ms.copy()
    while keys != [1]:
        # Reuse any 1's.
        if keys[-1] == 1:
            del keys[-1]
            reuse = ms.pop(1)
            room += reuse
        else:
            reuse = 0
        while 1:
            # Let i be the smallest key larger than 1.  Reuse one
            # instance of i.
            i = keys[-1]
            newcount = ms[i] = ms[i] - 1
            reuse += i
            if newcount == 0:
                del keys[-1], ms[i]
            room += 1
            # Break the remainder into pieces of size i-1.
            i -= 1
            q, r = divmod(reuse, i)
            need = q + bool(r)
            if need > room:
                if not keys:
                    return
                continue
            ms[i] = q
            keys.append(i)
            if r:
                ms[r] = 1
                keys.append(r)
            break
        room -= need
        if size:
            yield sum(ms.values()), ms.copy()
        else:
            yield ms.copy()
 
 | 
	partitions 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	77 
 | 
	sympy/calculus/util.py 
 | 
	def periodicity(f, symbol, check=False):
    """
    Tests the given function for periodicity in the given symbol.
    Parameters
    ==========
    f : :py:class:`~.Expr`
        The concerned function.
    symbol : :py:class:`~.Symbol`
        The variable for which the period is to be determined.
    check : bool, optional
        The flag to verify whether the value being returned is a period or not.
    Returns
    =======
    period
        The period of the function is returned.
        ``None`` is returned when the function is aperiodic or has a complex period.
        The value of $0$ is returned as the period of a constant function.
    Raises
    ======
    NotImplementedError
        The value of the period computed cannot be verified.
    Notes
    =====
    Currently, we do not support functions with a complex period.
    The period of functions having complex periodic values such
    as ``exp``, ``sinh`` is evaluated to ``None``.
    The value returned might not be the "fundamental" period of the given
    function i.e. it may not be the smallest periodic value of the function.
    The verification of the period through the ``check`` flag is not reliable
    due to internal simplification of the given expression. Hence, it is set
    to ``False`` by default.
    Examples
    ========
    >>> from sympy import periodicity, Symbol, sin, cos, tan, exp
    >>> x = Symbol('x')
    >>> f = sin(x) + sin(2*x) + sin(3*x)
    >>> periodicity(f, x)
    2*pi
    >>> periodicity(sin(x)*cos(x), x)
    pi
    >>> periodicity(exp(tan(2*x) - 1), x)
    pi/2
    >>> periodicity(sin(4*x)**cos(2*x), x)
    pi
    >>> periodicity(exp(x), x)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_periodicity.txt 
 | 
	def periodicity(f, symbol, check=False):
    """
    Tests the given function for periodicity in the given symbol.
    Parameters
    ==========
    f : :py:class:`~.Expr`
        The concerned function.
    symbol : :py:class:`~.Symbol`
        The variable for which the period is to be determined.
    check : bool, optional
        The flag to verify whether the value being returned is a period or not.
    Returns
    =======
    period
        The period of the function is returned.
        ``None`` is returned when the function is aperiodic or has a complex period.
        The value of $0$ is returned as the period of a constant function.
    Raises
    ======
    NotImplementedError
        The value of the period computed cannot be verified.
    Notes
    =====
    Currently, we do not support functions with a complex period.
    The period of functions having complex periodic values such
    as ``exp``, ``sinh`` is evaluated to ``None``.
    The value returned might not be the "fundamental" period of the given
    function i.e. it may not be the smallest periodic value of the function.
    The verification of the period through the ``check`` flag is not reliable
    due to internal simplification of the given expression. Hence, it is set
    to ``False`` by default.
    Examples
    ========
    >>> from sympy import periodicity, Symbol, sin, cos, tan, exp
    >>> x = Symbol('x')
    >>> f = sin(x) + sin(2*x) + sin(3*x)
    >>> periodicity(f, x)
    2*pi
    >>> periodicity(sin(x)*cos(x), x)
    pi
    >>> periodicity(exp(tan(2*x) - 1), x)
    pi/2
    >>> periodicity(sin(4*x)**cos(2*x), x)
    pi
    >>> periodicity(exp(x), x)
    """
    if symbol.kind is not NumberKind:
        raise NotImplementedError("Cannot use symbol of kind %s" % symbol.kind)
    temp = Dummy('x', real=True)
    f = f.subs(symbol, temp)
    symbol = temp
    def _check(orig_f, period):
        '''Return the checked period or raise an error.'''
        new_f = orig_f.subs(symbol, symbol + period)
        if new_f.equals(orig_f):
            return period
        else:
            raise NotImplementedError(filldedent('''
                The period of the given function cannot be verified.
                When `%s` was replaced with `%s + %s` in `%s`, the result
                was `%s` which was not recognized as being the same as
                the original function.
                So either the period was wrong or the two forms were
                not recognized as being equal.
                Set check=False to obtain the value.''' %
                (symbol, symbol, period, orig_f, new_f)))
    orig_f = f
    period = None
    if isinstance(f, Relational):
        f = f.lhs - f.rhs
    f = f.simplify()
    if symbol not in f.free_symbols:
        return S.Zero
    if isinstance(f, TrigonometricFunction):
        try:
            period = f.period(symbol)
        except NotImplementedError:
            pass
    if isinstance(f, Abs):
        arg = f.args[0]
        if isinstance(arg, (sec, csc, cos)):
            # all but tan and cot might have a
            # a period that is half as large
            # so recast as sin
            arg = sin(arg.args[0])
        period = periodicity(arg, symbol)
        if period is not None and isinstance(arg, sin):
            # the argument of Abs was a trigonometric other than
            # cot or tan; test to see if the half-period
            # is valid. Abs(arg) has behaviour equivalent to
            # orig_f, so use that for test:
            orig_f = Abs(arg)
            try:
                return _check(orig_f, period/2)
            except NotImplementedError as err:
                if check:
                    raise NotImplementedError(err)
            # else let new orig_f and period be
            # checked below
    if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1):
        f = Pow(S.Exp1, expand_mul(f.exp))
        if im(f) != 0:
            period_real = periodicity(re(f), symbol)
            period_imag = periodicity(im(f), symbol)
            if period_real is not None and period_imag is not None:
                period = lcim([period_real, period_imag])
    if f.is_Pow and f.base != S.Exp1:
        base, expo = f.args
        base_has_sym = base.has(symbol)
        expo_has_sym = expo.has(symbol)
        if base_has_sym and not expo_has_sym:
            period = periodicity(base, symbol)
        elif expo_has_sym and not base_has_sym:
            period = periodicity(expo, symbol)
        else:
            period = _periodicity(f.args, symbol)
    elif f.is_Mul:
        coeff, g = f.as_independent(symbol, as_Add=False)
        if isinstance(g, TrigonometricFunction) or not equal_valued(coeff, 1):
            period = periodicity(g, symbol)
        else:
            period = _periodicity(g.args, symbol)
    elif f.is_Add:
        k, g = f.as_independent(symbol)
        if k is not S.Zero:
            return periodicity(g, symbol)
        period = _periodicity(g.args, symbol)
    elif isinstance(f, Mod):
        a, n = f.args
        if a == symbol:
            period = n
        elif isinstance(a, TrigonometricFunction):
            period = periodicity(a, symbol)
        #check if 'f' is linear in 'symbol'
        elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and
            symbol not in n.free_symbols):
                period = Abs(n / a.diff(symbol))
    elif isinstance(f, Piecewise):
        pass  # not handling Piecewise yet as the return type is not favorable
    elif period is None:
        from sympy.solvers.decompogen import compogen, decompogen
        g_s = decompogen(f, symbol)
        num_of_gs = len(g_s)
        if num_of_gs > 1:
            for index, g in enumerate(reversed(g_s)):
                start_index = num_of_gs - 1 - index
                g = compogen(g_s[start_index:], symbol)
                if g not in (orig_f, f): # Fix for issue 12620
                    period = periodicity(g, symbol)
                    if period is not None:
                        break
    if period is not None:
        if check:
            return _check(orig_f, period)
        return period
    return None
 
 | 
	periodicity 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	79 
 | 
	sympy/plotting/plot.py 
 | 
	def plot(*args, show=True, **kwargs):
    """Plots a function of a single variable as a curve.
    Parameters
    ==========
    args :
        The first argument is the expression representing the function
        of single variable to be plotted.
        The last argument is a 3-tuple denoting the range of the free
        variable. e.g. ``(x, 0, 5)``
        Typical usage examples are in the following:
        - Plotting a single expression with a single range.
            ``plot(expr, range, **kwargs)``
        - Plotting a single expression with the default range (-10, 10).
            ``plot(expr, **kwargs)``
        - Plotting multiple expressions with a single range.
            ``plot(expr1, expr2, ..., range, **kwargs)``
        - Plotting multiple expressions with multiple ranges.
            ``plot((expr1, range1), (expr2, range2), ..., **kwargs)``
        It is best practice to specify range explicitly because default
        range may change in the future if a more advanced default range
        detection algorithm is implemented.
    show : bool, optional
        The default value is set to ``True``. Set show to ``False`` and
        the function will not display the plot. The returned instance of
        the ``Plot`` class can then be used to save or display the plot
        by calling the ``save()`` and ``show()`` methods respectively.
    line_color : string, or float, or function, optional
        Specifies the color for the plot.
        See ``Plot`` to see how to set color for the plots.
        Note that by setting ``line_color``, it would be applied simultaneously
        to all the series.
    title : str, optional
        Title of the plot. It is set to the latex representation of
        the expression, if the plot has only one expression.
    label : str, optional
        The label of the expression in the plot. It will be used when
        called with ``legend``. Default is the name of the expression.
        e.g. ``sin(x)``
    xlabel : str or expression, optional
        Label for the x-axis.
    ylabel : str or expression, optional
        Label for the y-axis.
    xscale : 'linear' or 'log', optional
        Sets the scaling of the x-axis.
    yscale : 'linear' or 'log', optional
        Sets the scaling of the y-axis.
    axis_center : (float, float), optional
        Tuple of two floats denoting the coordinates of the center or
        {'center', 'auto'}
    xlim : (float, float), optional
        Denotes the x-axis limits, ``(min, max)```.
    ylim : (float, float), optional
        Denotes the y-axis limits, ``(min, max)```.
    annotations : list, optional
        A list of dictionaries specifying the type of annotation
        required. The keys in the dictionary should be equivalent
        to the arguments of the :external:mod:`matplotlib`'s
        :external:meth:`~matplotlib.axes.Axes.annotate` method.
    markers : list, optional
        A list of dictionaries specifying the type the markers required.
        The keys in the dictionary should be equivalent to the arguments
        of the :external:mod:`matplotlib`'s :external:func:`~matplotlib.pyplot.plot()` function
        along with the marker related keyworded arguments.
    rectangles : list, optional
        A list of dictionaries specifying the dimensions of the
        rectangles to be plotted. The keys in the dictionary should be
        equivalent to the arguments of the :external:mod:`matplotlib`'s
        :external:class:`~matplotlib.patches.Rectangle` class.
    fill : dict, optional
        A dictionary specifying the type of color filling required in
        the plot. The keys in the dictionary should be equivalent to the
        arguments of the :external:mod:`matplotlib`'s
        :external:meth:`~matplotlib.axes.Axes.fill_between` method.
    adaptive : bool, optional
        The default value is set to ``True``. Set adaptive to ``False``
        and specify ``n`` if uniform sampling is required.
        The plotting uses an adaptive algorithm which samples
        recursively to accurately plot. The adaptive algorithm uses a
        random point near the midpoint of two points that has to be
        further sampled. Hence the same plots can appear slightly
        different.
    depth : int, optional
        Recursion depth of the adaptive algorithm. A depth of value
        `n` samples a maximum of `2^{n}` points.
        If the ``adaptive`` flag is set to ``False``, this will be
        ignored.
    n : int, optional
        Used when the ``adaptive`` is set to ``False``. The function
        is uniformly sampled at ``n`` number of points. If the ``adaptive``
        flag is set to ``True``, this will be ignored.
        This keyword argument replaces ``nb_of_points``, which should be
        considered deprecated.
    size : (float, float), optional
        A tuple in the form (width, height) in inches to specify the size of
        the overall figure. The default value is set to ``None``, meaning
        the size will be set by the default backend.
    Examples
    ========
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> from sympy import symbols
       >>> from sympy.plotting import plot
       >>> x = symbols('x')
    Single Plot
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot(x**2, (x, -5, 5))
       Plot object containing:
       [0]: cartesian line: x**2 for x over (-5.0, 5.0)
    Multiple plots with single range.
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot(x, x**2, x**3, (x, -5, 5))
       Plot object containing:
       [0]: cartesian line: x for x over (-5.0, 5.0)
       [1]: cartesian line: x**2 for x over (-5.0, 5.0)
       [2]: cartesian line: x**3 for x over (-5.0, 5.0)
    Multiple plots with different ranges.
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
       Plot object containing:
       [0]: cartesian line: x**2 for x over (-6.0, 6.0)
       [1]: cartesian line: x for x over (-5.0, 5.0)
    No adaptive sampling.
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot(x**2, adaptive=False, n=400)
       Plot object containing:
       [0]: cartesian line: x**2 for x over (-10.0, 10.0)
    See Also
    ========
    Plot, LineOver1DRangeSeries
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_plot.txt 
 | 
	def plot(*args, show=True, **kwargs):
    """Plots a function of a single variable as a curve.
    Parameters
    ==========
    args :
        The first argument is the expression representing the function
        of single variable to be plotted.
        The last argument is a 3-tuple denoting the range of the free
        variable. e.g. ``(x, 0, 5)``
        Typical usage examples are in the following:
        - Plotting a single expression with a single range.
            ``plot(expr, range, **kwargs)``
        - Plotting a single expression with the default range (-10, 10).
            ``plot(expr, **kwargs)``
        - Plotting multiple expressions with a single range.
            ``plot(expr1, expr2, ..., range, **kwargs)``
        - Plotting multiple expressions with multiple ranges.
            ``plot((expr1, range1), (expr2, range2), ..., **kwargs)``
        It is best practice to specify range explicitly because default
        range may change in the future if a more advanced default range
        detection algorithm is implemented.
    show : bool, optional
        The default value is set to ``True``. Set show to ``False`` and
        the function will not display the plot. The returned instance of
        the ``Plot`` class can then be used to save or display the plot
        by calling the ``save()`` and ``show()`` methods respectively.
    line_color : string, or float, or function, optional
        Specifies the color for the plot.
        See ``Plot`` to see how to set color for the plots.
        Note that by setting ``line_color``, it would be applied simultaneously
        to all the series.
    title : str, optional
        Title of the plot. It is set to the latex representation of
        the expression, if the plot has only one expression.
    label : str, optional
        The label of the expression in the plot. It will be used when
        called with ``legend``. Default is the name of the expression.
        e.g. ``sin(x)``
    xlabel : str or expression, optional
        Label for the x-axis.
    ylabel : str or expression, optional
        Label for the y-axis.
    xscale : 'linear' or 'log', optional
        Sets the scaling of the x-axis.
    yscale : 'linear' or 'log', optional
        Sets the scaling of the y-axis.
    axis_center : (float, float), optional
        Tuple of two floats denoting the coordinates of the center or
        {'center', 'auto'}
    xlim : (float, float), optional
        Denotes the x-axis limits, ``(min, max)```.
    ylim : (float, float), optional
        Denotes the y-axis limits, ``(min, max)```.
    annotations : list, optional
        A list of dictionaries specifying the type of annotation
        required. The keys in the dictionary should be equivalent
        to the arguments of the :external:mod:`matplotlib`'s
        :external:meth:`~matplotlib.axes.Axes.annotate` method.
    markers : list, optional
        A list of dictionaries specifying the type the markers required.
        The keys in the dictionary should be equivalent to the arguments
        of the :external:mod:`matplotlib`'s :external:func:`~matplotlib.pyplot.plot()` function
        along with the marker related keyworded arguments.
    rectangles : list, optional
        A list of dictionaries specifying the dimensions of the
        rectangles to be plotted. The keys in the dictionary should be
        equivalent to the arguments of the :external:mod:`matplotlib`'s
        :external:class:`~matplotlib.patches.Rectangle` class.
    fill : dict, optional
        A dictionary specifying the type of color filling required in
        the plot. The keys in the dictionary should be equivalent to the
        arguments of the :external:mod:`matplotlib`'s
        :external:meth:`~matplotlib.axes.Axes.fill_between` method.
    adaptive : bool, optional
        The default value is set to ``True``. Set adaptive to ``False``
        and specify ``n`` if uniform sampling is required.
        The plotting uses an adaptive algorithm which samples
        recursively to accurately plot. The adaptive algorithm uses a
        random point near the midpoint of two points that has to be
        further sampled. Hence the same plots can appear slightly
        different.
    depth : int, optional
        Recursion depth of the adaptive algorithm. A depth of value
        `n` samples a maximum of `2^{n}` points.
        If the ``adaptive`` flag is set to ``False``, this will be
        ignored.
    n : int, optional
        Used when the ``adaptive`` is set to ``False``. The function
        is uniformly sampled at ``n`` number of points. If the ``adaptive``
        flag is set to ``True``, this will be ignored.
        This keyword argument replaces ``nb_of_points``, which should be
        considered deprecated.
    size : (float, float), optional
        A tuple in the form (width, height) in inches to specify the size of
        the overall figure. The default value is set to ``None``, meaning
        the size will be set by the default backend.
    Examples
    ========
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> from sympy import symbols
       >>> from sympy.plotting import plot
       >>> x = symbols('x')
    Single Plot
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot(x**2, (x, -5, 5))
       Plot object containing:
       [0]: cartesian line: x**2 for x over (-5.0, 5.0)
    Multiple plots with single range.
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot(x, x**2, x**3, (x, -5, 5))
       Plot object containing:
       [0]: cartesian line: x for x over (-5.0, 5.0)
       [1]: cartesian line: x**2 for x over (-5.0, 5.0)
       [2]: cartesian line: x**3 for x over (-5.0, 5.0)
    Multiple plots with different ranges.
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
       Plot object containing:
       [0]: cartesian line: x**2 for x over (-6.0, 6.0)
       [1]: cartesian line: x for x over (-5.0, 5.0)
    No adaptive sampling.
    .. plot::
       :context: close-figs
       :format: doctest
       :include-source: True
       >>> plot(x**2, adaptive=False, n=400)
       Plot object containing:
       [0]: cartesian line: x**2 for x over (-10.0, 10.0)
    See Also
    ========
    Plot, LineOver1DRangeSeries
    """
    args = _plot_sympify(args)
    plot_expr = _check_arguments(args, 1, 1, **kwargs)
    params = kwargs.get("params", None)
    free = set()
    for p in plot_expr:
        if not isinstance(p[1][0], str):
            free |= {p[1][0]}
        else:
            free |= {Symbol(p[1][0])}
    if params:
        free = free.difference(params.keys())
    x = free.pop() if free else Symbol("x")
    kwargs.setdefault('xlabel', x)
    kwargs.setdefault('ylabel', Function('f')(x))
    labels = kwargs.pop("label", [])
    rendering_kw = kwargs.pop("rendering_kw", None)
    series = _build_line_series(*plot_expr, **kwargs)
    _set_labels(series, labels, rendering_kw)
    plots = plot_factory(*series, **kwargs)
    if show:
        plots.show()
    return plots
 
 | 
	plot 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	80 
 | 
	sympy/ntheory/factor_.py 
 | 
	def pollard_pm1(n, B=10, a=2, retries=0, seed=1234):
    """
    Use Pollard's p-1 method to try to extract a nontrivial factor
    of ``n``. Either a divisor (perhaps composite) or ``None`` is returned.
    The value of ``a`` is the base that is used in the test gcd(a**M - 1, n).
    The default is 2.  If ``retries`` > 0 then if no factor is found after the
    first attempt, a new ``a`` will be generated randomly (using the ``seed``)
    and the process repeated.
    Note: the value of M is lcm(1..B) = reduce(ilcm, range(2, B + 1)).
    A search is made for factors next to even numbers having a power smoothness
    less than ``B``. Choosing a larger B increases the likelihood of finding a
    larger factor but takes longer. Whether a factor of n is found or not
    depends on ``a`` and the power smoothness of the even number just less than
    the factor p (hence the name p - 1).
    Although some discussion of what constitutes a good ``a`` some
    descriptions are hard to interpret. At the modular.math site referenced
    below it is stated that if gcd(a**M - 1, n) = N then a**M % q**r is 1
    for every prime power divisor of N. But consider the following:
        >>> from sympy.ntheory.factor_ import smoothness_p, pollard_pm1
        >>> n=257*1009
        >>> smoothness_p(n)
        (-1, [(257, (1, 2, 256)), (1009, (1, 7, 16))])
    So we should (and can) find a root with B=16:
        >>> pollard_pm1(n, B=16, a=3)
        1009
    If we attempt to increase B to 256 we find that it does not work:
        >>> pollard_pm1(n, B=256)
        >>>
    But if the value of ``a`` is changed we find that only multiples of
    257 work, e.g.:
        >>> pollard_pm1(n, B=256, a=257)
        1009
    Checking different ``a`` values shows that all the ones that did not
    work had a gcd value not equal to ``n`` but equal to one of the
    factors:
        >>> from sympy import ilcm, igcd, factorint, Pow
        >>> M = 1
        >>> for i in range(2, 256):
        ...     M = ilcm(M, i)
        ...
        >>> set([igcd(pow(a, M, n) - 1, n) for a in range(2, 256) if
        ...      igcd(pow(a, M, n) - 1, n) != n])
        {1009}
    But does aM % d for every divisor of n give 1?
        >>> aM = pow(255, M, n)
        >>> [(d, aM%Pow(*d.args)) for d in factorint(n, visual=True).args]
        [(257**1, 1), (1009**1, 1)]
    No, only one of them. So perhaps the principle is that a root will
    be found for a given value of B provided that:
    1) the power smoothness of the p - 1 value next to the root
       does not exceed B
    2) a**M % p != 1 for any of the divisors of n.
    By trying more than one ``a`` it is possible that one of them
    will yield a factor.
    Examples
    ========
    With the default smoothness bound, this number cannot be cracked:
        >>> from sympy.ntheory import pollard_pm1
        >>> pollard_pm1(21477639576571)
    Increasing the smoothness bound helps:
        >>> pollard_pm1(21477639576571, B=2000)
        4410317
    Looking at the smoothness of the factors of this number we find:
        >>> from sympy.ntheory.factor_ import smoothness_p, factorint
        >>> print(smoothness_p(21477639576571, visual=1))
        p**i=4410317**1 has p-1 B=1787, B-pow=1787
        p**i=4869863**1 has p-1 B=2434931, B-pow=2434931
    The B and B-pow are the same for the p - 1 factorizations of the divisors
    because those factorizations had a very large prime factor:
        >>> factorint(4410317 - 1)
        {2: 2, 617: 1, 1787: 1}
        >>> factorint(4869863-1)
        {2: 1, 2434931: 1}
    Note that until B reaches the B-pow value of 1787, the number is not cracked;
        >>> pollard_pm1(21477639576571, B=1786)
        >>> pollard_pm1(21477639576571, B=1787)
        4410317
    The B value has to do with the factors of the number next to the divisor,
    not the divisors themselves. A worst case scenario is that the number next
    to the factor p has a large prime divisisor or is a perfect power. If these
    conditions apply then the power-smoothness will be about p/2 or p. The more
    realistic is that there will be a large prime factor next to p requiring
    a B value on the order of p/2. Although primes may have been searched for
    up to this level, the p/2 is a factor of p - 1, something that we do not
    know. The modular.math reference below states that 15% of numbers in the
    range of 10**15 to 15**15 + 10**4 are 10**6 power smooth so a B of 10**6
    will fail 85% of the time in that range. From 10**8 to 10**8 + 10**3 the
    percentages are nearly reversed...but in that range the simple trial
    division is quite fast.
    References
    ==========
    .. [1] Richard Crandall & Carl Pomerance (2005), "Prime Numbers:
           A Computational Perspective", Springer, 2nd edition, 236-238
    .. [2] https://web.archive.org/web/20150716201437/http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node81.html
    .. [3] https://www.cs.toronto.edu/~yuvalf/Factorization.pdf
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_pollard_pm1.txt 
 | 
	def pollard_pm1(n, B=10, a=2, retries=0, seed=1234):
    """
    Use Pollard's p-1 method to try to extract a nontrivial factor
    of ``n``. Either a divisor (perhaps composite) or ``None`` is returned.
    The value of ``a`` is the base that is used in the test gcd(a**M - 1, n).
    The default is 2.  If ``retries`` > 0 then if no factor is found after the
    first attempt, a new ``a`` will be generated randomly (using the ``seed``)
    and the process repeated.
    Note: the value of M is lcm(1..B) = reduce(ilcm, range(2, B + 1)).
    A search is made for factors next to even numbers having a power smoothness
    less than ``B``. Choosing a larger B increases the likelihood of finding a
    larger factor but takes longer. Whether a factor of n is found or not
    depends on ``a`` and the power smoothness of the even number just less than
    the factor p (hence the name p - 1).
    Although some discussion of what constitutes a good ``a`` some
    descriptions are hard to interpret. At the modular.math site referenced
    below it is stated that if gcd(a**M - 1, n) = N then a**M % q**r is 1
    for every prime power divisor of N. But consider the following:
        >>> from sympy.ntheory.factor_ import smoothness_p, pollard_pm1
        >>> n=257*1009
        >>> smoothness_p(n)
        (-1, [(257, (1, 2, 256)), (1009, (1, 7, 16))])
    So we should (and can) find a root with B=16:
        >>> pollard_pm1(n, B=16, a=3)
        1009
    If we attempt to increase B to 256 we find that it does not work:
        >>> pollard_pm1(n, B=256)
        >>>
    But if the value of ``a`` is changed we find that only multiples of
    257 work, e.g.:
        >>> pollard_pm1(n, B=256, a=257)
        1009
    Checking different ``a`` values shows that all the ones that did not
    work had a gcd value not equal to ``n`` but equal to one of the
    factors:
        >>> from sympy import ilcm, igcd, factorint, Pow
        >>> M = 1
        >>> for i in range(2, 256):
        ...     M = ilcm(M, i)
        ...
        >>> set([igcd(pow(a, M, n) - 1, n) for a in range(2, 256) if
        ...      igcd(pow(a, M, n) - 1, n) != n])
        {1009}
    But does aM % d for every divisor of n give 1?
        >>> aM = pow(255, M, n)
        >>> [(d, aM%Pow(*d.args)) for d in factorint(n, visual=True).args]
        [(257**1, 1), (1009**1, 1)]
    No, only one of them. So perhaps the principle is that a root will
    be found for a given value of B provided that:
    1) the power smoothness of the p - 1 value next to the root
       does not exceed B
    2) a**M % p != 1 for any of the divisors of n.
    By trying more than one ``a`` it is possible that one of them
    will yield a factor.
    Examples
    ========
    With the default smoothness bound, this number cannot be cracked:
        >>> from sympy.ntheory import pollard_pm1
        >>> pollard_pm1(21477639576571)
    Increasing the smoothness bound helps:
        >>> pollard_pm1(21477639576571, B=2000)
        4410317
    Looking at the smoothness of the factors of this number we find:
        >>> from sympy.ntheory.factor_ import smoothness_p, factorint
        >>> print(smoothness_p(21477639576571, visual=1))
        p**i=4410317**1 has p-1 B=1787, B-pow=1787
        p**i=4869863**1 has p-1 B=2434931, B-pow=2434931
    The B and B-pow are the same for the p - 1 factorizations of the divisors
    because those factorizations had a very large prime factor:
        >>> factorint(4410317 - 1)
        {2: 2, 617: 1, 1787: 1}
        >>> factorint(4869863-1)
        {2: 1, 2434931: 1}
    Note that until B reaches the B-pow value of 1787, the number is not cracked;
        >>> pollard_pm1(21477639576571, B=1786)
        >>> pollard_pm1(21477639576571, B=1787)
        4410317
    The B value has to do with the factors of the number next to the divisor,
    not the divisors themselves. A worst case scenario is that the number next
    to the factor p has a large prime divisisor or is a perfect power. If these
    conditions apply then the power-smoothness will be about p/2 or p. The more
    realistic is that there will be a large prime factor next to p requiring
    a B value on the order of p/2. Although primes may have been searched for
    up to this level, the p/2 is a factor of p - 1, something that we do not
    know. The modular.math reference below states that 15% of numbers in the
    range of 10**15 to 15**15 + 10**4 are 10**6 power smooth so a B of 10**6
    will fail 85% of the time in that range. From 10**8 to 10**8 + 10**3 the
    percentages are nearly reversed...but in that range the simple trial
    division is quite fast.
    References
    ==========
    .. [1] Richard Crandall & Carl Pomerance (2005), "Prime Numbers:
           A Computational Perspective", Springer, 2nd edition, 236-238
    .. [2] https://web.archive.org/web/20150716201437/http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node81.html
    .. [3] https://www.cs.toronto.edu/~yuvalf/Factorization.pdf
    """
    n = int(n)
    if n < 4 or B < 3:
        raise ValueError('pollard_pm1 should receive n > 3 and B > 2')
    randint = _randint(seed + B)
    # computing a**lcm(1,2,3,..B) % n for B > 2
    # it looks weird, but it's right: primes run [2, B]
    # and the answer's not right until the loop is done.
    for i in range(retries + 1):
        aM = a
        for p in sieve.primerange(2, B + 1):
            e = int(math.log(B, p))
            aM = pow(aM, pow(p, e), n)
        g = gcd(aM - 1, n)
        if 1 < g < n:
            return int(g)
        # get a new a:
        # since the exponent, lcm(1..B), is even, if we allow 'a' to be 'n-1'
        # then (n - 1)**even % n will be 1 which will give a g of 0 and 1 will
        # give a zero, too, so we set the range as [2, n-2]. Some references
        # say 'a' should be coprime to n, but either will detect factors.
        a = randint(2, n - 2)
 
 | 
	pollard_pm1 
 | 
	Repo-Level 
 | 
					
	sympy 
 | 
	81 
 | 
	sympy/simplify/powsimp.py 
 | 
	def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
    """
    Reduce expression by combining powers with similar bases and exponents.
    Explanation
    ===========
    If ``deep`` is ``True`` then powsimp() will also simplify arguments of
    functions. By default ``deep`` is set to ``False``.
    If ``force`` is ``True`` then bases will be combined without checking for
    assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
    if x and y are both negative.
    You can make powsimp() only combine bases or only combine exponents by
    changing combine='base' or combine='exp'.  By default, combine='all',
    which does both.  combine='base' will only combine::
         a   a          a                          2x      x
        x * y  =>  (x*y)   as well as things like 2   =>  4
    and combine='exp' will only combine
    ::
         a   b      (a + b)
        x * x  =>  x
    combine='exp' will strictly only combine exponents in the way that used
    to be automatic.  Also use deep=True if you need the old behavior.
    When combine='all', 'exp' is evaluated first.  Consider the first
    example below for when there could be an ambiguity relating to this.
    This is done so things like the second example can be completely
    combined.  If you want 'base' combined first, do something like
    powsimp(powsimp(expr, combine='base'), combine='exp').
    Examples
    ========
    >>> from sympy import powsimp, exp, log, symbols
    >>> from sympy.abc import x, y, z, n
    >>> powsimp(x**y*x**z*y**z, combine='all')
    x**(y + z)*y**z
    >>> powsimp(x**y*x**z*y**z, combine='exp')
    x**(y + z)*y**z
    >>> powsimp(x**y*x**z*y**z, combine='base', force=True)
    x**y*(x*y)**z
    >>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
    (n*x)**(y + z)
    >>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
    n**(y + z)*x**(y + z)
    >>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
    (n*x)**y*(n*x)**z
    >>> x, y = symbols('x y', positive=True)
    >>> powsimp(log(exp(x)*exp(y)))
    log(exp(x)*exp(y))
    >>> powsimp(log(exp(x)*exp(y)), deep=True)
    x + y
    Radicals with Mul bases will be combined if combine='exp'
    >>> from sympy import sqrt
    >>> x, y = symbols('x y')
    Two radicals are automatically joined through Mul:
    >>> a=sqrt(x*sqrt(y))
    >>> a*a**3 == a**4
    True
    But if an integer power of that radical has been
    autoexpanded then Mul does not join the resulting factors:
    >>> a**4 # auto expands to a Mul, no longer a Pow
    x**2*y
    >>> _*a # so Mul doesn't combine them
    x**2*y*sqrt(x*sqrt(y))
    >>> powsimp(_) # but powsimp will
    (x*sqrt(y))**(5/2)
    >>> powsimp(x*y*a) # but won't when doing so would violate assumptions
    x*y*sqrt(x*sqrt(y))
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_powsimp.txt 
 | 
	def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops):
    """
    Reduce expression by combining powers with similar bases and exponents.
    Explanation
    ===========
    If ``deep`` is ``True`` then powsimp() will also simplify arguments of
    functions. By default ``deep`` is set to ``False``.
    If ``force`` is ``True`` then bases will be combined without checking for
    assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true
    if x and y are both negative.
    You can make powsimp() only combine bases or only combine exponents by
    changing combine='base' or combine='exp'.  By default, combine='all',
    which does both.  combine='base' will only combine::
         a   a          a                          2x      x
        x * y  =>  (x*y)   as well as things like 2   =>  4
    and combine='exp' will only combine
    ::
         a   b      (a + b)
        x * x  =>  x
    combine='exp' will strictly only combine exponents in the way that used
    to be automatic.  Also use deep=True if you need the old behavior.
    When combine='all', 'exp' is evaluated first.  Consider the first
    example below for when there could be an ambiguity relating to this.
    This is done so things like the second example can be completely
    combined.  If you want 'base' combined first, do something like
    powsimp(powsimp(expr, combine='base'), combine='exp').
    Examples
    ========
    >>> from sympy import powsimp, exp, log, symbols
    >>> from sympy.abc import x, y, z, n
    >>> powsimp(x**y*x**z*y**z, combine='all')
    x**(y + z)*y**z
    >>> powsimp(x**y*x**z*y**z, combine='exp')
    x**(y + z)*y**z
    >>> powsimp(x**y*x**z*y**z, combine='base', force=True)
    x**y*(x*y)**z
    >>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True)
    (n*x)**(y + z)
    >>> powsimp(x**z*x**y*n**z*n**y, combine='exp')
    n**(y + z)*x**(y + z)
    >>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True)
    (n*x)**y*(n*x)**z
    >>> x, y = symbols('x y', positive=True)
    >>> powsimp(log(exp(x)*exp(y)))
    log(exp(x)*exp(y))
    >>> powsimp(log(exp(x)*exp(y)), deep=True)
    x + y
    Radicals with Mul bases will be combined if combine='exp'
    >>> from sympy import sqrt
    >>> x, y = symbols('x y')
    Two radicals are automatically joined through Mul:
    >>> a=sqrt(x*sqrt(y))
    >>> a*a**3 == a**4
    True
    But if an integer power of that radical has been
    autoexpanded then Mul does not join the resulting factors:
    >>> a**4 # auto expands to a Mul, no longer a Pow
    x**2*y
    >>> _*a # so Mul doesn't combine them
    x**2*y*sqrt(x*sqrt(y))
    >>> powsimp(_) # but powsimp will
    (x*sqrt(y))**(5/2)
    >>> powsimp(x*y*a) # but won't when doing so would violate assumptions
    x*y*sqrt(x*sqrt(y))
    """
    def recurse(arg, **kwargs):
        _deep = kwargs.get('deep', deep)
        _combine = kwargs.get('combine', combine)
        _force = kwargs.get('force', force)
        _measure = kwargs.get('measure', measure)
        return powsimp(arg, _deep, _combine, _force, _measure)
    expr = sympify(expr)
    if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or (
            expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))):
        return expr
    if deep or expr.is_Add or expr.is_Mul and _y not in expr.args:
        expr = expr.func(*[recurse(w) for w in expr.args])
    if expr.is_Pow:
        return recurse(expr*_y, deep=False)/_y
    if not expr.is_Mul:
        return expr
    # handle the Mul
    if combine in ('exp', 'all'):
        # Collect base/exp data, while maintaining order in the
        # non-commutative parts of the product
        c_powers = defaultdict(list)
        nc_part = []
        newexpr = []
        coeff = S.One
        for term in expr.args:
            if term.is_Rational:
                coeff *= term
                continue
            if term.is_Pow:
                term = _denest_pow(term)
            if term.is_commutative:
                b, e = term.as_base_exp()
                if deep:
                    b, e = [recurse(i) for i in [b, e]]
                if b.is_Pow or isinstance(b, exp):
                    # don't let smthg like sqrt(x**a) split into x**a, 1/2
                    # or else it will be joined as x**(a/2) later
                    b, e = b**e, S.One
                c_powers[b].append(e)
            else:
                # This is the logic that combines exponents for equal,
                # but non-commutative bases: A**x*A**y == A**(x+y).
                if nc_part:
                    b1, e1 = nc_part[-1].as_base_exp()
                    b2, e2 = term.as_base_exp()
                    if (b1 == b2 and
                            e1.is_commutative and e2.is_commutative):
                        nc_part[-1] = Pow(b1, Add(e1, e2))
                        continue
                nc_part.append(term)
        # add up exponents of common bases
        for b, e in ordered(iter(c_powers.items())):
            # allow 2**x/4 -> 2**(x - 2); don't do this when b and e are
            # Numbers since autoevaluation will undo it, e.g.
            # 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4
            if (b and b.is_Rational and not all(ei.is_Number for ei in e) and \
                    coeff is not S.One and
                    b not in (S.One, S.NegativeOne)):
                m = multiplicity(abs(b), abs(coeff))
                if m:
                    e.append(m)
                    coeff /= b**m
            c_powers[b] = Add(*e)
        if coeff is not S.One:
            if coeff in c_powers:
                c_powers[coeff] += S.One
            else:
                c_powers[coeff] = S.One
        # convert to plain dictionary
        c_powers = dict(c_powers)
        # check for base and inverted base pairs
        be = list(c_powers.items())
        skip = set()  # skip if we already saw them
        for b, e in be:
            if b in skip:
                continue
            bpos = b.is_positive or b.is_polar
            if bpos:
                binv = 1/b
                if b != binv and binv in c_powers:
                    if b.as_numer_denom()[0] is S.One:
                        c_powers.pop(b)
                        c_powers[binv] -= e
                    else:
                        skip.add(binv)
                        e = c_powers.pop(binv)
                        c_powers[b] -= e
        # check for base and negated base pairs
        be = list(c_powers.items())
        _n = S.NegativeOne
        for b, e in be:
            if (b.is_Symbol or b.is_Add) and -b in c_powers and b in c_powers:
                if (b.is_positive is not None or e.is_integer):
                    if e.is_integer or b.is_negative:
                        c_powers[-b] += c_powers.pop(b)
                    else:  # (-b).is_positive so use its e
                        e = c_powers.pop(-b)
                        c_powers[b] += e
                    if _n in c_powers:
                        c_powers[_n] += e
                    else:
                        c_powers[_n] = e
        # filter c_powers and convert to a list
        c_powers = [(b, e) for b, e in c_powers.items() if e]
        # ==============================================================
        # check for Mul bases of Rational powers that can be combined with
        # separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) ->
        # (x*sqrt(x*y))**(3/2)
        # ---------------- helper functions
        def ratq(x):
            '''Return Rational part of x's exponent as it appears in the bkey.
            '''
            return bkey(x)[0][1]
        def bkey(b, e=None):
            '''Return (b**s, c.q), c.p where e -> c*s. If e is not given then
            it will be taken by using as_base_exp() on the input b.
            e.g.
                x**3/2 -> (x, 2), 3
                x**y -> (x**y, 1), 1
                x**(2*y/3) -> (x**y, 3), 2
                exp(x/2) -> (exp(a), 2), 1
            '''
            if e is not None:  # coming from c_powers or from below
                if e.is_Integer:
                    return (b, S.One), e
                elif e.is_Rational:
                    return (b, Integer(e.q)), Integer(e.p)
                else:
                    c, m = e.as_coeff_Mul(rational=True)
                    if c is not S.One:
                        if m.is_integer:
                            return (b, Integer(c.q)), m*Integer(c.p)
                        return (b**m, Integer(c.q)), Integer(c.p)
                    else:
                        return (b**e, S.One), S.One
            else:
                return bkey(*b.as_base_exp())
        def update(b):
            '''Decide what to do with base, b. If its exponent is now an
            integer multiple of the Rational denominator, then remove it
            and put the factors of its base in the common_b dictionary or
            update the existing bases if necessary. If it has been zeroed
            out, simply remove the base.
            '''
            newe, r = divmod(common_b[b], b[1])
            if not r:
                common_b.pop(b)
                if newe:
                    for m in Mul.make_args(b[0]**newe):
                        b, e = bkey(m)
                        if b not in common_b:
                            common_b[b] = 0
                        common_b[b] += e
                        if b[1] != 1:
                            bases.append(b)
        # ---------------- end of helper functions
        # assemble a dictionary of the factors having a Rational power
        common_b = {}
        done = []
        bases = []
        for b, e in c_powers:
            b, e = bkey(b, e)
            if b in common_b:
                common_b[b] = common_b[b] + e
            else:
                common_b[b] = e
            if b[1] != 1 and b[0].is_Mul:
                bases.append(b)
        bases.sort(key=default_sort_key)  # this makes tie-breaking canonical
        bases.sort(key=measure, reverse=True)  # handle longest first
        for base in bases:
            if base not in common_b:  # it may have been removed already
                continue
            b, exponent = base
            last = False  # True when no factor of base is a radical
            qlcm = 1  # the lcm of the radical denominators
            while True:
                bstart = b
                qstart = qlcm
                bb = []  # list of factors
                ee = []  # (factor's expo. and it's current value in common_b)
                for bi in Mul.make_args(b):
                    bib, bie = bkey(bi)
                    if bib not in common_b or common_b[bib] < bie:
                        ee = bb = []  # failed
                        break
                    ee.append([bie, common_b[bib]])
                    bb.append(bib)
                if ee:
                    # find the number of integral extractions possible
                    # e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1
                    min1 = ee[0][1]//ee[0][0]
                    for i in range(1, len(ee)):
                        rat = ee[i][1]//ee[i][0]
                        if rat < 1:
                            break
                        min1 = min(min1, rat)
                    else:
                        # update base factor counts
                        # e.g. if ee = [(2, 5), (3, 6)] then min1 = 2
                        # and the new base counts will be 5-2*2 and 6-2*3
                        for i in range(len(bb)):
                            common_b[bb[i]] -= min1*ee[i][0]
                            update(bb[i])
                        # update the count of the base
                        # e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y)
                        # will increase by 4 to give bkey (x*sqrt(y), 2, 5)
                        common_b[base] += min1*qstart*exponent
                if (last  # no more radicals in base
                    or len(common_b) == 1  # nothing left to join with
                    or all(k[1] == 1 for k in common_b)  # no rad's in common_b
                        ):
                    break
                # see what we can exponentiate base by to remove any radicals
                # so we know what to search for
                # e.g. if base were x**(1/2)*y**(1/3) then we should
                # exponentiate by 6 and look for powers of x and y in the ratio
                # of 2 to 3
                qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)])
                if qlcm == 1:
                    break  # we are done
                b = bstart**qlcm
                qlcm *= qstart
                if all(ratq(bi) == 1 for bi in Mul.make_args(b)):
                    last = True  # we are going to be done after this next pass
            # this base no longer can find anything to join with and
            # since it was longer than any other we are done with it
            b, q = base
            done.append((b, common_b.pop(base)*Rational(1, q)))
        # update c_powers and get ready to continue with powsimp
        c_powers = done
        # there may be terms still in common_b that were bases that were
        # identified as needing processing, so remove those, too
        for (b, q), e in common_b.items():
            if (b.is_Pow or isinstance(b, exp)) and \
                    q is not S.One and not b.exp.is_Rational:
                b, be = b.as_base_exp()
                b = b**(be/q)
            else:
                b = root(b, q)
            c_powers.append((b, e))
        check = len(c_powers)
        c_powers = dict(c_powers)
        assert len(c_powers) == check  # there should have been no duplicates
        # ==============================================================
        # rebuild the expression
        newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()]))
        if combine == 'exp':
            return expr.func(newexpr, expr.func(*nc_part))
        else:
            return recurse(expr.func(*nc_part), combine='base') * \
                recurse(newexpr, combine='base')
    elif combine == 'base':
        # Build c_powers and nc_part.  These must both be lists not
        # dicts because exp's are not combined.
        c_powers = []
        nc_part = []
        for term in expr.args:
            if term.is_commutative:
                c_powers.append(list(term.as_base_exp()))
            else:
                nc_part.append(term)
        # Pull out numerical coefficients from exponent if assumptions allow
        # e.g., 2**(2*x) => 4**x
        for i in range(len(c_powers)):
            b, e = c_powers[i]
            if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar):
                continue
            exp_c, exp_t = e.as_coeff_Mul(rational=True)
            if exp_c is not S.One and exp_t is not S.One:
                c_powers[i] = [Pow(b, exp_c), exp_t]
        # Combine bases whenever they have the same exponent and
        # assumptions allow
        # first gather the potential bases under the common exponent
        c_exp = defaultdict(list)
        for b, e in c_powers:
            if deep:
                e = recurse(e)
            if e.is_Add and (b.is_positive or e.is_integer):
                e = factor_terms(e)
                if _coeff_isneg(e):
                    e = -e
                    b = 1/b
            c_exp[e].append(b)
        del c_powers
        # Merge back in the results of the above to form a new product
        c_powers = defaultdict(list)
        for e in c_exp:
            bases = c_exp[e]
            # calculate the new base for e
            if len(bases) == 1:
                new_base = bases[0]
            elif e.is_integer or force:
                new_base = expr.func(*bases)
            else:
                # see which ones can be joined
                unk = []
                nonneg = []
                neg = []
                for bi in bases:
                    if bi.is_negative:
                        neg.append(bi)
                    elif bi.is_nonnegative:
                        nonneg.append(bi)
                    elif bi.is_polar:
                        nonneg.append(
                            bi)  # polar can be treated like non-negative
                    else:
                        unk.append(bi)
                if len(unk) == 1 and not neg or len(neg) == 1 and not unk:
                    # a single neg or a single unk can join the rest
                    nonneg.extend(unk + neg)
                    unk = neg = []
                elif neg:
                    # their negative signs cancel in groups of 2*q if we know
                    # that e = p/q else we have to treat them as unknown
                    israt = False
                    if e.is_Rational:
                        israt = True
                    else:
                        p, d = e.as_numer_denom()
                        if p.is_integer and d.is_integer:
                            israt = True
                    if israt:
                        neg = [-w for w in neg]
                        unk.extend([S.NegativeOne]*len(neg))
                    else:
                        unk.extend(neg)
                        neg = []
                    del israt
                # these shouldn't be joined
                for b in unk:
                    c_powers[b].append(e)
                # here is a new joined base
                new_base = expr.func(*(nonneg + neg))
                # if there are positive parts they will just get separated
                # again unless some change is made
                def _terms(e):
                    # return the number of terms of this expression
                    # when multiplied out -- assuming no joining of terms
                    if e.is_Add:
                        return sum(_terms(ai) for ai in e.args)
                    if e.is_Mul:
                        return prod([_terms(mi) for mi in e.args])
                    return 1
                xnew_base = expand_mul(new_base, deep=False)
                if len(Add.make_args(xnew_base)) < _terms(new_base):
                    new_base = factor_terms(xnew_base)
            c_powers[new_base].append(e)
        # break out the powers from c_powers now
        c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e]
        # we're done
        return expr.func(*(c_part + nc_part))
    else:
        raise ValueError("combine must be one of ('all', 'exp', 'base').")
 
 | 
	powsimp 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	82 
 | 
	sympy/series/formal.py 
 | 
	def rational_algorithm(f, x, k, order=4, full=False):
    """
    Rational algorithm for computing
    formula of coefficients of Formal Power Series
    of a function.
    Explanation
    ===========
    Applicable when f(x) or some derivative of f(x)
    is a rational function in x.
    :func:`rational_algorithm` uses :func:`~.apart` function for partial fraction
    decomposition. :func:`~.apart` by default uses 'undetermined coefficients
    method'. By setting ``full=True``, 'Bronstein's algorithm' can be used
    instead.
    Looks for derivative of a function up to 4'th order (by default).
    This can be overridden using order option.
    Parameters
    ==========
    x : Symbol
    order : int, optional
        Order of the derivative of ``f``, Default is 4.
    full : bool
    Returns
    =======
    formula : Expr
    ind : Expr
        Independent terms.
    order : int
    full : bool
    Examples
    ========
    >>> from sympy import log, atan
    >>> from sympy.series.formal import rational_algorithm as ra
    >>> from sympy.abc import x, k
    >>> ra(1 / (1 - x), x, k)
    (1, 0, 0)
    >>> ra(log(1 + x), x, k)
    (-1/((-1)**k*k), 0, 1)
    >>> ra(atan(x), x, k, full=True)
    ((-I/(2*(-I)**k) + I/(2*I**k))/k, 0, 1)
    Notes
    =====
    By setting ``full=True``, range of admissible functions to be solved using
    ``rational_algorithm`` can be increased. This option should be used
    carefully as it can significantly slow down the computation as ``doit`` is
    performed on the :class:`~.RootSum` object returned by the :func:`~.apart`
    function. Use ``full=False`` whenever possible.
    See Also
    ========
    sympy.polys.partfrac.apart
    References
    ==========
    .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf
    .. [2] Power Series in Computer Algebra - Wolfram Koepf
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_rational_algorithm.txt 
 | 
	def rational_algorithm(f, x, k, order=4, full=False):
    """
    Rational algorithm for computing
    formula of coefficients of Formal Power Series
    of a function.
    Explanation
    ===========
    Applicable when f(x) or some derivative of f(x)
    is a rational function in x.
    :func:`rational_algorithm` uses :func:`~.apart` function for partial fraction
    decomposition. :func:`~.apart` by default uses 'undetermined coefficients
    method'. By setting ``full=True``, 'Bronstein's algorithm' can be used
    instead.
    Looks for derivative of a function up to 4'th order (by default).
    This can be overridden using order option.
    Parameters
    ==========
    x : Symbol
    order : int, optional
        Order of the derivative of ``f``, Default is 4.
    full : bool
    Returns
    =======
    formula : Expr
    ind : Expr
        Independent terms.
    order : int
    full : bool
    Examples
    ========
    >>> from sympy import log, atan
    >>> from sympy.series.formal import rational_algorithm as ra
    >>> from sympy.abc import x, k
    >>> ra(1 / (1 - x), x, k)
    (1, 0, 0)
    >>> ra(log(1 + x), x, k)
    (-1/((-1)**k*k), 0, 1)
    >>> ra(atan(x), x, k, full=True)
    ((-I/(2*(-I)**k) + I/(2*I**k))/k, 0, 1)
    Notes
    =====
    By setting ``full=True``, range of admissible functions to be solved using
    ``rational_algorithm`` can be increased. This option should be used
    carefully as it can significantly slow down the computation as ``doit`` is
    performed on the :class:`~.RootSum` object returned by the :func:`~.apart`
    function. Use ``full=False`` whenever possible.
    See Also
    ========
    sympy.polys.partfrac.apart
    References
    ==========
    .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf
    .. [2] Power Series in Computer Algebra - Wolfram Koepf
    """
    from sympy.polys import RootSum, apart
    from sympy.integrals import integrate
    diff = f
    ds = []  # list of diff
    for i in range(order + 1):
        if i:
            diff = diff.diff(x)
        if diff.is_rational_function(x):
            coeff, sep = S.Zero, S.Zero
            terms = apart(diff, x, full=full)
            if terms.has(RootSum):
                terms = terms.doit()
            for t in Add.make_args(terms):
                num, den = t.as_numer_denom()
                if not den.has(x):
                    sep += t
                else:
                    if isinstance(den, Mul):
                        # m*(n*x - a)**j -> (n*x - a)**j
                        ind = den.as_independent(x)
                        den = ind[1]
                        num /= ind[0]
                    # (n*x - a)**j -> (x - b)
                    den, j = den.as_base_exp()
                    a, xterm = den.as_coeff_add(x)
                    # term -> m/x**n
                    if not a:
                        sep += t
                        continue
                    xc = xterm[0].coeff(x)
                    a /= -xc
                    num /= xc**j
                    ak = ((-1)**j * num *
                          binomial(j + k - 1, k).rewrite(factorial) /
                          a**(j + k))
                    coeff += ak
            # Hacky, better way?
            if coeff.is_zero:
                return None
            if (coeff.has(x) or coeff.has(zoo) or coeff.has(oo) or
                    coeff.has(nan)):
                return None
            for j in range(i):
                coeff = (coeff / (k + j + 1))
                sep = integrate(sep, x)
                sep += (ds.pop() - sep).limit(x, 0)  # constant of integration
            return (coeff.subs(k, k - i), sep, i)
        else:
            ds.append(diff)
    return None
 
 | 
	rational_algorithm 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	85 
 | 
	sympy/physics/optics/utils.py 
 | 
	def refraction_angle(incident, medium1, medium2, normal=None, plane=None):
    """
    This function calculates transmitted vector after refraction at planar
    surface. ``medium1`` and ``medium2`` can be ``Medium`` or any sympifiable object.
    If ``incident`` is a number then treated as angle of incidence (in radians)
    in which case refraction angle is returned.
    If ``incident`` is an object of `Ray3D`, `normal` also has to be an instance
    of `Ray3D` in order to get the output as a `Ray3D`. Please note that if
    plane of separation is not provided and normal is an instance of `Ray3D`,
    ``normal`` will be assumed to be intersecting incident ray at the plane of
    separation. This will not be the case when `normal` is a `Matrix` or
    any other sequence.
    If ``incident`` is an instance of `Ray3D` and `plane` has not been provided
    and ``normal`` is not `Ray3D`, output will be a `Matrix`.
    Parameters
    ==========
    incident : Matrix, Ray3D, sequence or a number
        Incident vector or angle of incidence
    medium1 : sympy.physics.optics.medium.Medium or sympifiable
        Medium 1 or its refractive index
    medium2 : sympy.physics.optics.medium.Medium or sympifiable
        Medium 2 or its refractive index
    normal : Matrix, Ray3D, or sequence
        Normal vector
    plane : Plane
        Plane of separation of the two media.
    Returns
    =======
    Returns an angle of refraction or a refracted ray depending on inputs.
    Examples
    ========
    >>> from sympy.physics.optics import refraction_angle
    >>> from sympy.geometry import Point3D, Ray3D, Plane
    >>> from sympy.matrices import Matrix
    >>> from sympy import symbols, pi
    >>> n = Matrix([0, 0, 1])
    >>> P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
    >>> r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
    >>> refraction_angle(r1, 1, 1, n)
    Matrix([
    [ 1],
    [ 1],
    [-1]])
    >>> refraction_angle(r1, 1, 1, plane=P)
    Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
    With different index of refraction of the two media
    >>> n1, n2 = symbols('n1, n2')
    >>> refraction_angle(r1, n1, n2, n)
    Matrix([
    [                                n1/n2],
    [                                n1/n2],
    [-sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)]])
    >>> refraction_angle(r1, n1, n2, plane=P)
    Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)))
    >>> round(refraction_angle(pi/6, 1.2, 1.5), 5)
    0.41152
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_refraction_angle.txt 
 | 
	def refraction_angle(incident, medium1, medium2, normal=None, plane=None):
    """
    This function calculates transmitted vector after refraction at planar
    surface. ``medium1`` and ``medium2`` can be ``Medium`` or any sympifiable object.
    If ``incident`` is a number then treated as angle of incidence (in radians)
    in which case refraction angle is returned.
    If ``incident`` is an object of `Ray3D`, `normal` also has to be an instance
    of `Ray3D` in order to get the output as a `Ray3D`. Please note that if
    plane of separation is not provided and normal is an instance of `Ray3D`,
    ``normal`` will be assumed to be intersecting incident ray at the plane of
    separation. This will not be the case when `normal` is a `Matrix` or
    any other sequence.
    If ``incident`` is an instance of `Ray3D` and `plane` has not been provided
    and ``normal`` is not `Ray3D`, output will be a `Matrix`.
    Parameters
    ==========
    incident : Matrix, Ray3D, sequence or a number
        Incident vector or angle of incidence
    medium1 : sympy.physics.optics.medium.Medium or sympifiable
        Medium 1 or its refractive index
    medium2 : sympy.physics.optics.medium.Medium or sympifiable
        Medium 2 or its refractive index
    normal : Matrix, Ray3D, or sequence
        Normal vector
    plane : Plane
        Plane of separation of the two media.
    Returns
    =======
    Returns an angle of refraction or a refracted ray depending on inputs.
    Examples
    ========
    >>> from sympy.physics.optics import refraction_angle
    >>> from sympy.geometry import Point3D, Ray3D, Plane
    >>> from sympy.matrices import Matrix
    >>> from sympy import symbols, pi
    >>> n = Matrix([0, 0, 1])
    >>> P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1])
    >>> r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0))
    >>> refraction_angle(r1, 1, 1, n)
    Matrix([
    [ 1],
    [ 1],
    [-1]])
    >>> refraction_angle(r1, 1, 1, plane=P)
    Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1))
    With different index of refraction of the two media
    >>> n1, n2 = symbols('n1, n2')
    >>> refraction_angle(r1, n1, n2, n)
    Matrix([
    [                                n1/n2],
    [                                n1/n2],
    [-sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)]])
    >>> refraction_angle(r1, n1, n2, plane=P)
    Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)))
    >>> round(refraction_angle(pi/6, 1.2, 1.5), 5)
    0.41152
    """
    n1 = refractive_index_of_medium(medium1)
    n2 = refractive_index_of_medium(medium2)
    # check if an incidence angle was supplied instead of a ray
    try:
        angle_of_incidence = float(incident)
    except TypeError:
        angle_of_incidence = None
    try:
        critical_angle_ = critical_angle(medium1, medium2)
    except (ValueError, TypeError):
        critical_angle_ = None
    if angle_of_incidence is not None:
        if normal is not None or plane is not None:
            raise ValueError('Normal/plane not allowed if incident is an angle')
        if not 0.0 <= angle_of_incidence < pi*0.5:
            raise ValueError('Angle of incidence not in range [0:pi/2)')
        if critical_angle_ and angle_of_incidence > critical_angle_:
            raise ValueError('Ray undergoes total internal reflection')
        return asin(n1*sin(angle_of_incidence)/n2)
    # Treat the incident as ray below
    # A flag to check whether to return Ray3D or not
    return_ray = False
    if plane is not None and normal is not None:
        raise ValueError("Either plane or normal is acceptable.")
    if not isinstance(incident, Matrix):
        if is_sequence(incident):
            _incident = Matrix(incident)
        elif isinstance(incident, Ray3D):
            _incident = Matrix(incident.direction_ratio)
        else:
            raise TypeError(
                "incident should be a Matrix, Ray3D, or sequence")
    else:
        _incident = incident
    # If plane is provided, get direction ratios of the normal
    # to the plane from the plane else go with `normal` param.
    if plane is not None:
        if not isinstance(plane, Plane):
            raise TypeError("plane should be an instance of geometry.plane.Plane")
        # If we have the plane, we can get the intersection
        # point of incident ray and the plane and thus return
        # an instance of Ray3D.
        if isinstance(incident, Ray3D):
            return_ray = True
            intersection_pt = plane.intersection(incident)[0]
        _normal = Matrix(plane.normal_vector)
    else:
        if not isinstance(normal, Matrix):
            if is_sequence(normal):
                _normal = Matrix(normal)
            elif isinstance(normal, Ray3D):
                _normal = Matrix(normal.direction_ratio)
                if isinstance(incident, Ray3D):
                    intersection_pt = intersection(incident, normal)
                    if len(intersection_pt) == 0:
                        raise ValueError(
                            "Normal isn't concurrent with the incident ray.")
                    else:
                        return_ray = True
                        intersection_pt = intersection_pt[0]
            else:
                raise TypeError(
                    "Normal should be a Matrix, Ray3D, or sequence")
        else:
            _normal = normal
    eta = n1/n2  # Relative index of refraction
    # Calculating magnitude of the vectors
    mag_incident = sqrt(sum(i**2 for i in _incident))
    mag_normal = sqrt(sum(i**2 for i in _normal))
    # Converting vectors to unit vectors by dividing
    # them with their magnitudes
    _incident /= mag_incident
    _normal /= mag_normal
    c1 = -_incident.dot(_normal)  # cos(angle_of_incidence)
    cs2 = 1 - eta**2*(1 - c1**2)  # cos(angle_of_refraction)**2
    if cs2.is_negative:  # This is the case of total internal reflection(TIR).
        return S.Zero
    drs = eta*_incident + (eta*c1 - sqrt(cs2))*_normal
    # Multiplying unit vector by its magnitude
    drs = drs*mag_incident
    if not return_ray:
        return drs
    else:
        return Ray3D(intersection_pt, direction_ratio=drs)
 
 | 
	refraction_angle 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	87 
 | 
	sympy/polys/matrices/sdm.py 
 | 
	def sdm_irref(A):
    """RREF and pivots of a sparse matrix *A*.
    Compute the reduced row echelon form (RREF) of the matrix *A* and return a
    list of the pivot columns. This routine does not work in place and leaves
    the original matrix *A* unmodified.
    The domain of the matrix must be a field.
    Examples
    ========
    This routine works with a dict of dicts sparse representation of a matrix:
    >>> from sympy import QQ
    >>> from sympy.polys.matrices.sdm import sdm_irref
    >>> A = {0: {0: QQ(1), 1: QQ(2)}, 1: {0: QQ(3), 1: QQ(4)}}
    >>> Arref, pivots, _ = sdm_irref(A)
    >>> Arref
    {0: {0: 1}, 1: {1: 1}}
    >>> pivots
    [0, 1]
    The analogous calculation with :py:class:`~.MutableDenseMatrix` would be
    >>> from sympy import Matrix
    >>> M = Matrix([[1, 2], [3, 4]])
    >>> Mrref, pivots = M.rref()
    >>> Mrref
    Matrix([
    [1, 0],
    [0, 1]])
    >>> pivots
    (0, 1)
    Notes
    =====
    The cost of this algorithm is determined purely by the nonzero elements of
    the matrix. No part of the cost of any step in this algorithm depends on
    the number of rows or columns in the matrix. No step depends even on the
    number of nonzero rows apart from the primary loop over those rows. The
    implementation is much faster than ddm_rref for sparse matrices. In fact
    at the time of writing it is also (slightly) faster than the dense
    implementation even if the input is a fully dense matrix so it seems to be
    faster in all cases.
    The elements of the matrix should support exact division with ``/``. For
    example elements of any domain that is a field (e.g. ``QQ``) should be
    fine. No attempt is made to handle inexact arithmetic.
    See Also
    ========
    sympy.polys.matrices.domainmatrix.DomainMatrix.rref
        The higher-level function that would normally be used to call this
        routine.
    sympy.polys.matrices.dense.ddm_irref
        The dense equivalent of this routine.
    sdm_rref_den
        Fraction-free version of this routine.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_sdm_irref.txt 
 | 
	def sdm_irref(A):
    """RREF and pivots of a sparse matrix *A*.
    Compute the reduced row echelon form (RREF) of the matrix *A* and return a
    list of the pivot columns. This routine does not work in place and leaves
    the original matrix *A* unmodified.
    The domain of the matrix must be a field.
    Examples
    ========
    This routine works with a dict of dicts sparse representation of a matrix:
    >>> from sympy import QQ
    >>> from sympy.polys.matrices.sdm import sdm_irref
    >>> A = {0: {0: QQ(1), 1: QQ(2)}, 1: {0: QQ(3), 1: QQ(4)}}
    >>> Arref, pivots, _ = sdm_irref(A)
    >>> Arref
    {0: {0: 1}, 1: {1: 1}}
    >>> pivots
    [0, 1]
    The analogous calculation with :py:class:`~.MutableDenseMatrix` would be
    >>> from sympy import Matrix
    >>> M = Matrix([[1, 2], [3, 4]])
    >>> Mrref, pivots = M.rref()
    >>> Mrref
    Matrix([
    [1, 0],
    [0, 1]])
    >>> pivots
    (0, 1)
    Notes
    =====
    The cost of this algorithm is determined purely by the nonzero elements of
    the matrix. No part of the cost of any step in this algorithm depends on
    the number of rows or columns in the matrix. No step depends even on the
    number of nonzero rows apart from the primary loop over those rows. The
    implementation is much faster than ddm_rref for sparse matrices. In fact
    at the time of writing it is also (slightly) faster than the dense
    implementation even if the input is a fully dense matrix so it seems to be
    faster in all cases.
    The elements of the matrix should support exact division with ``/``. For
    example elements of any domain that is a field (e.g. ``QQ``) should be
    fine. No attempt is made to handle inexact arithmetic.
    See Also
    ========
    sympy.polys.matrices.domainmatrix.DomainMatrix.rref
        The higher-level function that would normally be used to call this
        routine.
    sympy.polys.matrices.dense.ddm_irref
        The dense equivalent of this routine.
    sdm_rref_den
        Fraction-free version of this routine.
    """
    #
    # Any zeros in the matrix are not stored at all so an element is zero if
    # its row dict has no index at that key. A row is entirely zero if its
    # row index is not in the outer dict. Since rref reorders the rows and
    # removes zero rows we can completely discard the row indices. The first
    # step then copies the row dicts into a list sorted by the index of the
    # first nonzero column in each row.
    #
    # The algorithm then processes each row Ai one at a time. Previously seen
    # rows are used to cancel their pivot columns from Ai. Then a pivot from
    # Ai is chosen and is cancelled from all previously seen rows. At this
    # point Ai joins the previously seen rows. Once all rows are seen all
    # elimination has occurred and the rows are sorted by pivot column index.
    #
    # The previously seen rows are stored in two separate groups. The reduced
    # group consists of all rows that have been reduced to a single nonzero
    # element (the pivot). There is no need to attempt any further reduction
    # with these. Rows that still have other nonzeros need to be considered
    # when Ai is cancelled from the previously seen rows.
    #
    # A dict nonzerocolumns is used to map from a column index to a set of
    # previously seen rows that still have a nonzero element in that column.
    # This means that we can cancel the pivot from Ai into the previously seen
    # rows without needing to loop over each row that might have a zero in
    # that column.
    #
    # Row dicts sorted by index of first nonzero column
    # (Maybe sorting is not needed/useful.)
    Arows = sorted((Ai.copy() for Ai in A.values()), key=min)
    # Each processed row has an associated pivot column.
    # pivot_row_map maps from the pivot column index to the row dict.
    # This means that we can represent a set of rows purely as a set of their
    # pivot indices.
    pivot_row_map = {}
    # Set of pivot indices for rows that are fully reduced to a single nonzero.
    reduced_pivots = set()
    # Set of pivot indices for rows not fully reduced
    nonreduced_pivots = set()
    # Map from column index to a set of pivot indices representing the rows
    # that have a nonzero at that column.
    nonzero_columns = defaultdict(set)
    while Arows:
        # Select pivot element and row
        Ai = Arows.pop()
        # Nonzero columns from fully reduced pivot rows can be removed
        Ai = {j: Aij for j, Aij in Ai.items() if j not in reduced_pivots}
        # Others require full row cancellation
        for j in nonreduced_pivots & set(Ai):
            Aj = pivot_row_map[j]
            Aij = Ai[j]
            Ainz = set(Ai)
            Ajnz = set(Aj)
            for k in Ajnz - Ainz:
                Ai[k] = - Aij * Aj[k]
            Ai.pop(j)
            Ainz.remove(j)
            for k in Ajnz & Ainz:
                Aik = Ai[k] - Aij * Aj[k]
                if Aik:
                    Ai[k] = Aik
                else:
                    Ai.pop(k)
        # We have now cancelled previously seen pivots from Ai.
        # If it is zero then discard it.
        if not Ai:
            continue
        # Choose a pivot from Ai:
        j = min(Ai)
        Aij = Ai[j]
        pivot_row_map[j] = Ai
        Ainz = set(Ai)
        # Normalise the pivot row to make the pivot 1.
        #
        # This approach is slow for some domains. Cross cancellation might be
        # better for e.g. QQ(x) with division delayed to the final steps.
        Aijinv = Aij**-1
        for l in Ai:
            Ai[l] *= Aijinv
        # Use Aij to cancel column j from all previously seen rows
        for k in nonzero_columns.pop(j, ()):
            Ak = pivot_row_map[k]
            Akj = Ak[j]
            Aknz = set(Ak)
            for l in Ainz - Aknz:
                Ak[l] = - Akj * Ai[l]
                nonzero_columns[l].add(k)
            Ak.pop(j)
            Aknz.remove(j)
            for l in Ainz & Aknz:
                Akl = Ak[l] - Akj * Ai[l]
                if Akl:
                    Ak[l] = Akl
                else:
                    # Drop nonzero elements
                    Ak.pop(l)
                    if l != j:
                        nonzero_columns[l].remove(k)
            if len(Ak) == 1:
                reduced_pivots.add(k)
                nonreduced_pivots.remove(k)
        if len(Ai) == 1:
            reduced_pivots.add(j)
        else:
            nonreduced_pivots.add(j)
            for l in Ai:
                if l != j:
                    nonzero_columns[l].add(j)
    # All done!
    pivots = sorted(reduced_pivots | nonreduced_pivots)
    pivot2row = {p: n for n, p in enumerate(pivots)}
    nonzero_columns = {c: {pivot2row[p] for p in s} for c, s in nonzero_columns.items()}
    rows = [pivot_row_map[i] for i in pivots]
    rref = dict(enumerate(rows))
    return rref, pivots, nonzero_columns
 
 | 
	sdm_irref 
 | 
	Self-Contained 
 | 
					
	sympy 
 | 
	90 
 | 
	sympy/solvers/polysys.py 
 | 
	def solve_generic(polys, opt, strict=False):
    """
    Solve a generic system of polynomial equations.
    Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
    set F = { f_1, f_2, ..., f_n } of polynomial equations, using
    Groebner basis approach. For now only zero-dimensional systems
    are supported, which means F can have at most a finite number
    of solutions. If the basis contains only the ground, None is
    returned.
    The algorithm works by the fact that, supposing G is the basis
    of F with respect to an elimination order (here lexicographic
    order is used), G and F generate the same ideal, they have the
    same set of solutions. By the elimination property, if G is a
    reduced, zero-dimensional Groebner basis, then there exists an
    univariate polynomial in G (in its last variable). This can be
    solved by computing its roots. Substituting all computed roots
    for the last (eliminated) variable in other elements of G, new
    polynomial system is generated. Applying the above procedure
    recursively, a finite number of solutions can be found.
    The ability of finding all solutions by this procedure depends
    on the root finding algorithms. If no solutions were found, it
    means only that roots() failed, but the system is solvable. To
    overcome this difficulty use numerical algorithms instead.
    Parameters
    ==========
    polys: a list/tuple/set
        Listing all the polynomial equations that are needed to be solved
    opt: an Options object
        For specifying keyword arguments and generators
    strict: a boolean
        If strict is True, NotImplementedError will be raised if the solution
        is known to be incomplete
    Returns
    =======
    List[Tuple]
        a list of tuples with elements being solutions for the
        symbols in the order they were passed as gens
    None
        None is returned when the computed basis contains only the ground.
    References
    ==========
    .. [Buchberger01] B. Buchberger, Groebner Bases: A Short
    Introduction for Systems Theorists, In: R. Moreno-Diaz,
    B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
    February, 2001
    .. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
    and Algorithms, Springer, Second Edition, 1997, pp. 112
    Raises
    ========
    NotImplementedError
        If the system is not zero-dimensional (does not have a finite
        number of solutions)
    UnsolvableFactorError
        If ``strict`` is True and not all solution components are
        expressible in radicals
    Examples
    ========
    >>> from sympy import Poly, Options
    >>> from sympy.solvers.polysys import solve_generic
    >>> from sympy.abc import x, y
    >>> NewOption = Options((x, y), {'domain': 'ZZ'})
    >>> a = Poly(x - y + 5, x, y, domain='ZZ')
    >>> b = Poly(x + y - 3, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption)
    [(-1, 4)]
    >>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
    >>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption)
    [(11/3, 13/3)]
    >>> a = Poly(x**2 + y, x, y, domain='ZZ')
    >>> b = Poly(x + y*4, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption)
    [(0, 0), (1/4, -1/16)]
    >>> a = Poly(x**5 - x + y**3, x, y, domain='ZZ')
    >>> b = Poly(y**2 - 1, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption, strict=True)
    Traceback (most recent call last):
    ...
    UnsolvableFactorError
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_solve_generic.txt 
 | 
	def solve_generic(polys, opt, strict=False):
    """
    Solve a generic system of polynomial equations.
    Returns all possible solutions over C[x_1, x_2, ..., x_m] of a
    set F = { f_1, f_2, ..., f_n } of polynomial equations, using
    Groebner basis approach. For now only zero-dimensional systems
    are supported, which means F can have at most a finite number
    of solutions. If the basis contains only the ground, None is
    returned.
    The algorithm works by the fact that, supposing G is the basis
    of F with respect to an elimination order (here lexicographic
    order is used), G and F generate the same ideal, they have the
    same set of solutions. By the elimination property, if G is a
    reduced, zero-dimensional Groebner basis, then there exists an
    univariate polynomial in G (in its last variable). This can be
    solved by computing its roots. Substituting all computed roots
    for the last (eliminated) variable in other elements of G, new
    polynomial system is generated. Applying the above procedure
    recursively, a finite number of solutions can be found.
    The ability of finding all solutions by this procedure depends
    on the root finding algorithms. If no solutions were found, it
    means only that roots() failed, but the system is solvable. To
    overcome this difficulty use numerical algorithms instead.
    Parameters
    ==========
    polys: a list/tuple/set
        Listing all the polynomial equations that are needed to be solved
    opt: an Options object
        For specifying keyword arguments and generators
    strict: a boolean
        If strict is True, NotImplementedError will be raised if the solution
        is known to be incomplete
    Returns
    =======
    List[Tuple]
        a list of tuples with elements being solutions for the
        symbols in the order they were passed as gens
    None
        None is returned when the computed basis contains only the ground.
    References
    ==========
    .. [Buchberger01] B. Buchberger, Groebner Bases: A Short
    Introduction for Systems Theorists, In: R. Moreno-Diaz,
    B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01,
    February, 2001
    .. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties
    and Algorithms, Springer, Second Edition, 1997, pp. 112
    Raises
    ========
    NotImplementedError
        If the system is not zero-dimensional (does not have a finite
        number of solutions)
    UnsolvableFactorError
        If ``strict`` is True and not all solution components are
        expressible in radicals
    Examples
    ========
    >>> from sympy import Poly, Options
    >>> from sympy.solvers.polysys import solve_generic
    >>> from sympy.abc import x, y
    >>> NewOption = Options((x, y), {'domain': 'ZZ'})
    >>> a = Poly(x - y + 5, x, y, domain='ZZ')
    >>> b = Poly(x + y - 3, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption)
    [(-1, 4)]
    >>> a = Poly(x - 2*y + 5, x, y, domain='ZZ')
    >>> b = Poly(2*x - y - 3, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption)
    [(11/3, 13/3)]
    >>> a = Poly(x**2 + y, x, y, domain='ZZ')
    >>> b = Poly(x + y*4, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption)
    [(0, 0), (1/4, -1/16)]
    >>> a = Poly(x**5 - x + y**3, x, y, domain='ZZ')
    >>> b = Poly(y**2 - 1, x, y, domain='ZZ')
    >>> solve_generic([a, b], NewOption, strict=True)
    Traceback (most recent call last):
    ...
    UnsolvableFactorError
    """
    def _is_univariate(f):
        """Returns True if 'f' is univariate in its last variable. """
        for monom in f.monoms():
            if any(monom[:-1]):
                return False
        return True
    def _subs_root(f, gen, zero):
        """Replace generator with a root so that the result is nice. """
        p = f.as_expr({gen: zero})
        if f.degree(gen) >= 2:
            p = p.expand(deep=False)
        return p
    def _solve_reduced_system(system, gens, entry=False):
        """Recursively solves reduced polynomial systems. """
        if len(system) == len(gens) == 1:
            # the below line will produce UnsolvableFactorError if
            # strict=True and the solution from `roots` is incomplete
            zeros = list(roots(system[0], gens[-1], strict=strict).keys())
            return [(zero,) for zero in zeros]
        basis = groebner(system, gens, polys=True)
        if len(basis) == 1 and basis[0].is_ground:
            if not entry:
                return []
            else:
                return None
        univariate = list(filter(_is_univariate, basis))
        if len(basis) < len(gens):
            raise NotImplementedError(filldedent('''
                only zero-dimensional systems supported
                (finite number of solutions)
                '''))
        if len(univariate) == 1:
            f = univariate.pop()
        else:
            raise NotImplementedError(filldedent('''
                only zero-dimensional systems supported
                (finite number of solutions)
                '''))
        gens = f.gens
        gen = gens[-1]
        # the below line will produce UnsolvableFactorError if
        # strict=True and the solution from `roots` is incomplete
        zeros = list(roots(f.ltrim(gen), strict=strict).keys())
        if not zeros:
            return []
        if len(basis) == 1:
            return [(zero,) for zero in zeros]
        solutions = []
        for zero in zeros:
            new_system = []
            new_gens = gens[:-1]
            for b in basis[:-1]:
                eq = _subs_root(b, gen, zero)
                if eq is not S.Zero:
                    new_system.append(eq)
            for solution in _solve_reduced_system(new_system, new_gens):
                solutions.append(solution + (zero,))
        if solutions and len(solutions[0]) != len(gens):
            raise NotImplementedError(filldedent('''
                only zero-dimensional systems supported
                (finite number of solutions)
                '''))
        return solutions
    try:
        result = _solve_reduced_system(polys, opt.gens, entry=True)
    except CoercionFailed:
        raise NotImplementedError
    if result is not None:
        return sorted(result, key=default_sort_key)
 
 | 
	solve_generic 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	93 
 | 
	sympy/physics/secondquant.py 
 | 
	def substitute_dummies(expr, new_indices=False, pretty_indices={}):
    """
    Collect terms by substitution of dummy variables.
    Explanation
    ===========
    This routine allows simplification of Add expressions containing terms
    which differ only due to dummy variables.
    The idea is to substitute all dummy variables consistently depending on
    the structure of the term.  For each term, we obtain a sequence of all
    dummy variables, where the order is determined by the index range, what
    factors the index belongs to and its position in each factor.  See
    _get_ordered_dummies() for more information about the sorting of dummies.
    The index sequence is then substituted consistently in each term.
    Examples
    ========
    >>> from sympy import symbols, Function, Dummy
    >>> from sympy.physics.secondquant import substitute_dummies
    >>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy)
    >>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
    >>> f = Function('f')
    >>> expr = f(a,b) + f(c,d); expr
    f(_a, _b) + f(_c, _d)
    Since a, b, c and d are equivalent summation indices, the expression can be
    simplified to a single term (for which the dummy indices are still summed over)
    >>> substitute_dummies(expr)
    2*f(_a, _b)
    Controlling output:
    By default the dummy symbols that are already present in the expression
    will be reused in a different permutation.  However, if new_indices=True,
    new dummies will be generated and inserted.  The keyword 'pretty_indices'
    can be used to control this generation of new symbols.
    By default the new dummies will be generated on the form i_1, i_2, a_1,
    etc.  If you supply a dictionary with key:value pairs in the form:
        { index_group: string_of_letters }
    The letters will be used as labels for the new dummy symbols.  The
    index_groups must be one of 'above', 'below' or 'general'.
    >>> expr = f(a,b,i,j)
    >>> my_dummies = { 'above':'st', 'below':'uv' }
    >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
    f(_s, _t, _u, _v)
    If we run out of letters, or if there is no keyword for some index_group
    the default dummy generator will be used as a fallback:
    >>> p,q = symbols('p q', cls=Dummy)  # general indices
    >>> expr = f(p,q)
    >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
    f(_p_0, _p_1)
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_substitute_dummies.txt 
 | 
	def substitute_dummies(expr, new_indices=False, pretty_indices={}):
    """
    Collect terms by substitution of dummy variables.
    Explanation
    ===========
    This routine allows simplification of Add expressions containing terms
    which differ only due to dummy variables.
    The idea is to substitute all dummy variables consistently depending on
    the structure of the term.  For each term, we obtain a sequence of all
    dummy variables, where the order is determined by the index range, what
    factors the index belongs to and its position in each factor.  See
    _get_ordered_dummies() for more information about the sorting of dummies.
    The index sequence is then substituted consistently in each term.
    Examples
    ========
    >>> from sympy import symbols, Function, Dummy
    >>> from sympy.physics.secondquant import substitute_dummies
    >>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy)
    >>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
    >>> f = Function('f')
    >>> expr = f(a,b) + f(c,d); expr
    f(_a, _b) + f(_c, _d)
    Since a, b, c and d are equivalent summation indices, the expression can be
    simplified to a single term (for which the dummy indices are still summed over)
    >>> substitute_dummies(expr)
    2*f(_a, _b)
    Controlling output:
    By default the dummy symbols that are already present in the expression
    will be reused in a different permutation.  However, if new_indices=True,
    new dummies will be generated and inserted.  The keyword 'pretty_indices'
    can be used to control this generation of new symbols.
    By default the new dummies will be generated on the form i_1, i_2, a_1,
    etc.  If you supply a dictionary with key:value pairs in the form:
        { index_group: string_of_letters }
    The letters will be used as labels for the new dummy symbols.  The
    index_groups must be one of 'above', 'below' or 'general'.
    >>> expr = f(a,b,i,j)
    >>> my_dummies = { 'above':'st', 'below':'uv' }
    >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
    f(_s, _t, _u, _v)
    If we run out of letters, or if there is no keyword for some index_group
    the default dummy generator will be used as a fallback:
    >>> p,q = symbols('p q', cls=Dummy)  # general indices
    >>> expr = f(p,q)
    >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
    f(_p_0, _p_1)
    """
    # setup the replacing dummies
    if new_indices:
        letters_above = pretty_indices.get('above', "")
        letters_below = pretty_indices.get('below', "")
        letters_general = pretty_indices.get('general', "")
        len_above = len(letters_above)
        len_below = len(letters_below)
        len_general = len(letters_general)
        def _i(number):
            try:
                return letters_below[number]
            except IndexError:
                return 'i_' + str(number - len_below)
        def _a(number):
            try:
                return letters_above[number]
            except IndexError:
                return 'a_' + str(number - len_above)
        def _p(number):
            try:
                return letters_general[number]
            except IndexError:
                return 'p_' + str(number - len_general)
    aboves = []
    belows = []
    generals = []
    dummies = expr.atoms(Dummy)
    if not new_indices:
        dummies = sorted(dummies, key=default_sort_key)
    # generate lists with the dummies we will insert
    a = i = p = 0
    for d in dummies:
        assum = d.assumptions0
        if assum.get("above_fermi"):
            if new_indices:
                sym = _a(a)
                a += 1
            l1 = aboves
        elif assum.get("below_fermi"):
            if new_indices:
                sym = _i(i)
                i += 1
            l1 = belows
        else:
            if new_indices:
                sym = _p(p)
                p += 1
            l1 = generals
        if new_indices:
            l1.append(Dummy(sym, **assum))
        else:
            l1.append(d)
    expr = expr.expand()
    terms = Add.make_args(expr)
    new_terms = []
    for term in terms:
        i = iter(belows)
        a = iter(aboves)
        p = iter(generals)
        ordered = _get_ordered_dummies(term)
        subsdict = {}
        for d in ordered:
            if d.assumptions0.get('below_fermi'):
                subsdict[d] = next(i)
            elif d.assumptions0.get('above_fermi'):
                subsdict[d] = next(a)
            else:
                subsdict[d] = next(p)
        subslist = []
        final_subs = []
        for k, v in subsdict.items():
            if k == v:
                continue
            if v in subsdict:
                # We check if the sequence of substitutions end quickly.  In
                # that case, we can avoid temporary symbols if we ensure the
                # correct substitution order.
                if subsdict[v] in subsdict:
                    # (x, y) -> (y, x),  we need a temporary variable
                    x = Dummy('x')
                    subslist.append((k, x))
                    final_subs.append((x, v))
                else:
                    # (x, y) -> (y, a),  x->y must be done last
                    # but before temporary variables are resolved
                    final_subs.insert(0, (k, v))
            else:
                subslist.append((k, v))
        subslist.extend(final_subs)
        new_terms.append(term.subs(subslist))
    return Add(*new_terms)
 
 | 
	substitute_dummies 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	94 
 | 
	sympy/core/sympify.py 
 | 
	def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
        evaluate=None):
    """
    Converts an arbitrary expression to a type that can be used inside SymPy.
    Explanation
    ===========
    It will convert Python ints into instances of :class:`~.Integer`, floats
    into instances of :class:`~.Float`, etc. It is also able to coerce
    symbolic expressions which inherit from :class:`~.Basic`. This can be
    useful in cooperation with SAGE.
    .. warning::
        Note that this function uses ``eval``, and thus shouldn't be used on
        unsanitized input.
    If the argument is already a type that SymPy understands, it will do
    nothing but return that value. This can be used at the beginning of a
    function to ensure you are working with the correct type.
    Examples
    ========
    >>> from sympy import sympify
    >>> sympify(2).is_integer
    True
    >>> sympify(2).is_real
    True
    >>> sympify(2.0).is_real
    True
    >>> sympify("2.0").is_real
    True
    >>> sympify("2e-45").is_real
    True
    If the expression could not be converted, a SympifyError is raised.
    >>> sympify("x***2")
    Traceback (most recent call last):
    ...
    SympifyError: SympifyError: "could not parse 'x***2'"
    When attempting to parse non-Python syntax using ``sympify``, it raises a
    ``SympifyError``:
    >>> sympify("2x+1")
    Traceback (most recent call last):
    ...
    SympifyError: Sympify of expression 'could not parse '2x+1'' failed
    To parse non-Python syntax, use ``parse_expr`` from ``sympy.parsing.sympy_parser``.
    >>> from sympy.parsing.sympy_parser import parse_expr
    >>> parse_expr("2x+1", transformations="all")
    2*x + 1
    For more details about ``transformations``: see :func:`~sympy.parsing.sympy_parser.parse_expr`
    Locals
    ------
    The sympification happens with access to everything that is loaded
    by ``from sympy import *``; anything used in a string that is not
    defined by that import will be converted to a symbol. In the following,
    the ``bitcount`` function is treated as a symbol and the ``O`` is
    interpreted as the :class:`~.Order` object (used with series) and it raises
    an error when used improperly:
    >>> s = 'bitcount(42)'
    >>> sympify(s)
    bitcount(42)
    >>> sympify("O(x)")
    O(x)
    >>> sympify("O + 1")
    Traceback (most recent call last):
    ...
    TypeError: unbound method...
    In order to have ``bitcount`` be recognized it can be imported into a
    namespace dictionary and passed as locals:
    >>> ns = {}
    >>> exec('from sympy.core.evalf import bitcount', ns)
    >>> sympify(s, locals=ns)
    6
    In order to have the ``O`` interpreted as a Symbol, identify it as such
    in the namespace dictionary. This can be done in a variety of ways; all
    three of the following are possibilities:
    >>> from sympy import Symbol
    >>> ns["O"] = Symbol("O")  # method 1
    >>> exec('from sympy.abc import O', ns)  # method 2
    >>> ns.update(dict(O=Symbol("O")))  # method 3
    >>> sympify("O + 1", locals=ns)
    O + 1
    If you want *all* single-letter and Greek-letter variables to be symbols
    then you can use the clashing-symbols dictionaries that have been defined
    there as private variables: ``_clash1`` (single-letter variables),
    ``_clash2`` (the multi-letter Greek names) or ``_clash`` (both single and
    multi-letter names that are defined in ``abc``).
    >>> from sympy.abc import _clash1
    >>> set(_clash1)  # if this fails, see issue #23903
    {'E', 'I', 'N', 'O', 'Q', 'S'}
    >>> sympify('I & Q', _clash1)
    I & Q
    Strict
    ------
    If the option ``strict`` is set to ``True``, only the types for which an
    explicit conversion has been defined are converted. In the other
    cases, a SympifyError is raised.
    >>> print(sympify(None))
    None
    >>> sympify(None, strict=True)
    Traceback (most recent call last):
    ...
    SympifyError: SympifyError: None
    .. deprecated:: 1.6
       ``sympify(obj)`` automatically falls back to ``str(obj)`` when all
       other conversion methods fail, but this is deprecated. ``strict=True``
       will disable this deprecated behavior. See
       :ref:`deprecated-sympify-string-fallback`.
    Evaluation
    ----------
    If the option ``evaluate`` is set to ``False``, then arithmetic and
    operators will be converted into their SymPy equivalents and the
    ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
    be denested first. This is done via an AST transformation that replaces
    operators with their SymPy equivalents, so if an operand redefines any
    of those operations, the redefined operators will not be used. If
    argument a is not a string, the mathematical expression is evaluated
    before being passed to sympify, so adding ``evaluate=False`` will still
    return the evaluated result of expression.
    >>> sympify('2**2 / 3 + 5')
    19/3
    >>> sympify('2**2 / 3 + 5', evaluate=False)
    2**2/3 + 5
    >>> sympify('4/2+7', evaluate=True)
    9
    >>> sympify('4/2+7', evaluate=False)
    4/2 + 7
    >>> sympify(4/2+7, evaluate=False)
    9.00000000000000
    Extending
    ---------
    To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
    just define a ``_sympy_`` method to your class. You can do that even to
    classes that you do not own by subclassing or adding the method at runtime.
    >>> from sympy import Matrix
    >>> class MyList1(object):
    ...     def __iter__(self):
    ...         yield 1
    ...         yield 2
    ...         return
    ...     def __getitem__(self, i): return list(self)[i]
    ...     def _sympy_(self): return Matrix(self)
    >>> sympify(MyList1())
    Matrix([
    [1],
    [2]])
    If you do not have control over the class definition you could also use the
    ``converter`` global dictionary. The key is the class and the value is a
    function that takes a single argument and returns the desired SymPy
    object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
    >>> class MyList2(object):   # XXX Do not do this if you control the class!
    ...     def __iter__(self):  #     Use _sympy_!
    ...         yield 1
    ...         yield 2
    ...         return
    ...     def __getitem__(self, i): return list(self)[i]
    >>> from sympy.core.sympify import converter
    >>> converter[MyList2] = lambda x: Matrix(x)
    >>> sympify(MyList2())
    Matrix([
    [1],
    [2]])
    Notes
    =====
    The keywords ``rational`` and ``convert_xor`` are only used
    when the input is a string.
    convert_xor
    -----------
    >>> sympify('x^y',convert_xor=True)
    x**y
    >>> sympify('x^y',convert_xor=False)
    x ^ y
    rational
    --------
    >>> sympify('0.1',rational=False)
    0.1
    >>> sympify('0.1',rational=True)
    1/10
    Sometimes autosimplification during sympification results in expressions
    that are very different in structure than what was entered. Until such
    autosimplification is no longer done, the ``kernS`` function might be of
    some use. In the example below you can see how an expression reduces to
    $-1$ by autosimplification, but does not do so when ``kernS`` is used.
    >>> from sympy.core.sympify import kernS
    >>> from sympy.abc import x
    >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
    -1
    >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
    >>> sympify(s)
    -1
    >>> kernS(s)
    -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
    Parameters
    ==========
    a :
        - any object defined in SymPy
        - standard numeric Python types: ``int``, ``long``, ``float``, ``Decimal``
        - strings (like ``"0.09"``, ``"2e-19"`` or ``'sin(x)'``)
        - booleans, including ``None`` (will leave ``None`` unchanged)
        - dicts, lists, sets or tuples containing any of the above
    convert_xor : bool, optional
        If true, treats ``^`` as exponentiation.
        If False, treats ``^`` as XOR itself.
        Used only when input is a string.
    locals : any object defined in SymPy, optional
        In order to have strings be recognized it can be imported
        into a namespace dictionary and passed as locals.
    strict : bool, optional
        If the option strict is set to ``True``, only the types for which
        an explicit conversion has been defined are converted. In the
        other cases, a SympifyError is raised.
    rational : bool, optional
        If ``True``, converts floats into :class:`~.Rational`.
        If ``False``, it lets floats remain as it is.
        Used only when input is a string.
    evaluate : bool, optional
        If False, then arithmetic and operators will be converted into
        their SymPy equivalents. If True the expression will be evaluated
        and the result will be returned.
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_sympify.txt 
 | 
	def sympify(a, locals=None, convert_xor=True, strict=False, rational=False,
        evaluate=None):
    """
    Converts an arbitrary expression to a type that can be used inside SymPy.
    Explanation
    ===========
    It will convert Python ints into instances of :class:`~.Integer`, floats
    into instances of :class:`~.Float`, etc. It is also able to coerce
    symbolic expressions which inherit from :class:`~.Basic`. This can be
    useful in cooperation with SAGE.
    .. warning::
        Note that this function uses ``eval``, and thus shouldn't be used on
        unsanitized input.
    If the argument is already a type that SymPy understands, it will do
    nothing but return that value. This can be used at the beginning of a
    function to ensure you are working with the correct type.
    Examples
    ========
    >>> from sympy import sympify
    >>> sympify(2).is_integer
    True
    >>> sympify(2).is_real
    True
    >>> sympify(2.0).is_real
    True
    >>> sympify("2.0").is_real
    True
    >>> sympify("2e-45").is_real
    True
    If the expression could not be converted, a SympifyError is raised.
    >>> sympify("x***2")
    Traceback (most recent call last):
    ...
    SympifyError: SympifyError: "could not parse 'x***2'"
    When attempting to parse non-Python syntax using ``sympify``, it raises a
    ``SympifyError``:
    >>> sympify("2x+1")
    Traceback (most recent call last):
    ...
    SympifyError: Sympify of expression 'could not parse '2x+1'' failed
    To parse non-Python syntax, use ``parse_expr`` from ``sympy.parsing.sympy_parser``.
    >>> from sympy.parsing.sympy_parser import parse_expr
    >>> parse_expr("2x+1", transformations="all")
    2*x + 1
    For more details about ``transformations``: see :func:`~sympy.parsing.sympy_parser.parse_expr`
    Locals
    ------
    The sympification happens with access to everything that is loaded
    by ``from sympy import *``; anything used in a string that is not
    defined by that import will be converted to a symbol. In the following,
    the ``bitcount`` function is treated as a symbol and the ``O`` is
    interpreted as the :class:`~.Order` object (used with series) and it raises
    an error when used improperly:
    >>> s = 'bitcount(42)'
    >>> sympify(s)
    bitcount(42)
    >>> sympify("O(x)")
    O(x)
    >>> sympify("O + 1")
    Traceback (most recent call last):
    ...
    TypeError: unbound method...
    In order to have ``bitcount`` be recognized it can be imported into a
    namespace dictionary and passed as locals:
    >>> ns = {}
    >>> exec('from sympy.core.evalf import bitcount', ns)
    >>> sympify(s, locals=ns)
    6
    In order to have the ``O`` interpreted as a Symbol, identify it as such
    in the namespace dictionary. This can be done in a variety of ways; all
    three of the following are possibilities:
    >>> from sympy import Symbol
    >>> ns["O"] = Symbol("O")  # method 1
    >>> exec('from sympy.abc import O', ns)  # method 2
    >>> ns.update(dict(O=Symbol("O")))  # method 3
    >>> sympify("O + 1", locals=ns)
    O + 1
    If you want *all* single-letter and Greek-letter variables to be symbols
    then you can use the clashing-symbols dictionaries that have been defined
    there as private variables: ``_clash1`` (single-letter variables),
    ``_clash2`` (the multi-letter Greek names) or ``_clash`` (both single and
    multi-letter names that are defined in ``abc``).
    >>> from sympy.abc import _clash1
    >>> set(_clash1)  # if this fails, see issue #23903
    {'E', 'I', 'N', 'O', 'Q', 'S'}
    >>> sympify('I & Q', _clash1)
    I & Q
    Strict
    ------
    If the option ``strict`` is set to ``True``, only the types for which an
    explicit conversion has been defined are converted. In the other
    cases, a SympifyError is raised.
    >>> print(sympify(None))
    None
    >>> sympify(None, strict=True)
    Traceback (most recent call last):
    ...
    SympifyError: SympifyError: None
    .. deprecated:: 1.6
       ``sympify(obj)`` automatically falls back to ``str(obj)`` when all
       other conversion methods fail, but this is deprecated. ``strict=True``
       will disable this deprecated behavior. See
       :ref:`deprecated-sympify-string-fallback`.
    Evaluation
    ----------
    If the option ``evaluate`` is set to ``False``, then arithmetic and
    operators will be converted into their SymPy equivalents and the
    ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
    be denested first. This is done via an AST transformation that replaces
    operators with their SymPy equivalents, so if an operand redefines any
    of those operations, the redefined operators will not be used. If
    argument a is not a string, the mathematical expression is evaluated
    before being passed to sympify, so adding ``evaluate=False`` will still
    return the evaluated result of expression.
    >>> sympify('2**2 / 3 + 5')
    19/3
    >>> sympify('2**2 / 3 + 5', evaluate=False)
    2**2/3 + 5
    >>> sympify('4/2+7', evaluate=True)
    9
    >>> sympify('4/2+7', evaluate=False)
    4/2 + 7
    >>> sympify(4/2+7, evaluate=False)
    9.00000000000000
    Extending
    ---------
    To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
    just define a ``_sympy_`` method to your class. You can do that even to
    classes that you do not own by subclassing or adding the method at runtime.
    >>> from sympy import Matrix
    >>> class MyList1(object):
    ...     def __iter__(self):
    ...         yield 1
    ...         yield 2
    ...         return
    ...     def __getitem__(self, i): return list(self)[i]
    ...     def _sympy_(self): return Matrix(self)
    >>> sympify(MyList1())
    Matrix([
    [1],
    [2]])
    If you do not have control over the class definition you could also use the
    ``converter`` global dictionary. The key is the class and the value is a
    function that takes a single argument and returns the desired SymPy
    object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.
    >>> class MyList2(object):   # XXX Do not do this if you control the class!
    ...     def __iter__(self):  #     Use _sympy_!
    ...         yield 1
    ...         yield 2
    ...         return
    ...     def __getitem__(self, i): return list(self)[i]
    >>> from sympy.core.sympify import converter
    >>> converter[MyList2] = lambda x: Matrix(x)
    >>> sympify(MyList2())
    Matrix([
    [1],
    [2]])
    Notes
    =====
    The keywords ``rational`` and ``convert_xor`` are only used
    when the input is a string.
    convert_xor
    -----------
    >>> sympify('x^y',convert_xor=True)
    x**y
    >>> sympify('x^y',convert_xor=False)
    x ^ y
    rational
    --------
    >>> sympify('0.1',rational=False)
    0.1
    >>> sympify('0.1',rational=True)
    1/10
    Sometimes autosimplification during sympification results in expressions
    that are very different in structure than what was entered. Until such
    autosimplification is no longer done, the ``kernS`` function might be of
    some use. In the example below you can see how an expression reduces to
    $-1$ by autosimplification, but does not do so when ``kernS`` is used.
    >>> from sympy.core.sympify import kernS
    >>> from sympy.abc import x
    >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
    -1
    >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
    >>> sympify(s)
    -1
    >>> kernS(s)
    -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
    Parameters
    ==========
    a :
        - any object defined in SymPy
        - standard numeric Python types: ``int``, ``long``, ``float``, ``Decimal``
        - strings (like ``"0.09"``, ``"2e-19"`` or ``'sin(x)'``)
        - booleans, including ``None`` (will leave ``None`` unchanged)
        - dicts, lists, sets or tuples containing any of the above
    convert_xor : bool, optional
        If true, treats ``^`` as exponentiation.
        If False, treats ``^`` as XOR itself.
        Used only when input is a string.
    locals : any object defined in SymPy, optional
        In order to have strings be recognized it can be imported
        into a namespace dictionary and passed as locals.
    strict : bool, optional
        If the option strict is set to ``True``, only the types for which
        an explicit conversion has been defined are converted. In the
        other cases, a SympifyError is raised.
    rational : bool, optional
        If ``True``, converts floats into :class:`~.Rational`.
        If ``False``, it lets floats remain as it is.
        Used only when input is a string.
    evaluate : bool, optional
        If False, then arithmetic and operators will be converted into
        their SymPy equivalents. If True the expression will be evaluated
        and the result will be returned.
    """
    # XXX: If a is a Basic subclass rather than instance (e.g. sin rather than
    # sin(x)) then a.__sympy__ will be the property. Only on the instance will
    # a.__sympy__ give the *value* of the property (True). Since sympify(sin)
    # was used for a long time we allow it to pass. However if strict=True as
    # is the case in internal calls to _sympify then we only allow
    # is_sympy=True.
    #
    # https://github.com/sympy/sympy/issues/20124
    is_sympy = getattr(a, '__sympy__', None)
    if is_sympy is True:
        return a
    elif is_sympy is not None:
        if not strict:
            return a
        else:
            raise SympifyError(a)
    if isinstance(a, CantSympify):
        raise SympifyError(a)
    cls = getattr(a, "__class__", None)
    #Check if there exists a converter for any of the types in the mro
    for superclass in getmro(cls):
        #First check for user defined converters
        conv = _external_converter.get(superclass)
        if conv is None:
            #if none exists, check for SymPy defined converters
            conv = _sympy_converter.get(superclass)
        if conv is not None:
            return conv(a)
    if cls is type(None):
        if strict:
            raise SympifyError(a)
        else:
            return a
    if evaluate is None:
        evaluate = global_parameters.evaluate
    # Support for basic numpy datatypes
    if _is_numpy_instance(a):
        import numpy as np
        if np.isscalar(a):
            return _convert_numpy_types(a, locals=locals,
                convert_xor=convert_xor, strict=strict, rational=rational,
                evaluate=evaluate)
    _sympy_ = getattr(a, "_sympy_", None)
    if _sympy_ is not None:
        return a._sympy_()
    if not strict:
        # Put numpy array conversion _before_ float/int, see
        # <https://github.com/sympy/sympy/issues/13924>.
        flat = getattr(a, "flat", None)
        if flat is not None:
            shape = getattr(a, "shape", None)
            if shape is not None:
                from sympy.tensor.array import Array
                return Array(a.flat, a.shape)  # works with e.g. NumPy arrays
    if not isinstance(a, str):
        if _is_numpy_instance(a):
            import numpy as np
            assert not isinstance(a, np.number)
            if isinstance(a, np.ndarray):
                # Scalar arrays (those with zero dimensions) have sympify
                # called on the scalar element.
                if a.ndim == 0:
                    try:
                        return sympify(a.item(),
                                       locals=locals,
                                       convert_xor=convert_xor,
                                       strict=strict,
                                       rational=rational,
                                       evaluate=evaluate)
                    except SympifyError:
                        pass
        elif hasattr(a, '__float__'):
            # float and int can coerce size-one numpy arrays to their lone
            # element.  See issue https://github.com/numpy/numpy/issues/10404.
            return sympify(float(a))
        elif hasattr(a, '__int__'):
            return sympify(int(a))
    if strict:
        raise SympifyError(a)
    if iterable(a):
        try:
            return type(a)([sympify(x, locals=locals, convert_xor=convert_xor,
                rational=rational, evaluate=evaluate) for x in a])
        except TypeError:
            # Not all iterables are rebuildable with their type.
            pass
    if not isinstance(a, str):
        raise SympifyError('cannot sympify object of type %r' % type(a))
    from sympy.parsing.sympy_parser import (parse_expr, TokenError,
                                            standard_transformations)
    from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
    from sympy.parsing.sympy_parser import rationalize as t_rationalize
    transformations = standard_transformations
    if rational:
        transformations += (t_rationalize,)
    if convert_xor:
        transformations += (t_convert_xor,)
    try:
        a = a.replace('\n', '')
        expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
    except (TokenError, SyntaxError) as exc:
        raise SympifyError('could not parse %r' % a, exc)
    return expr
 
 | 
	sympify 
 | 
	File-Level 
 | 
					
	sympy 
 | 
	96 
 | 
	sympy/solvers/solvers.py 
 | 
	def unrad(eq, *syms, **flags):
    """
    Remove radicals with symbolic arguments and return (eq, cov),
    None, or raise an error.
    Explanation
    ===========
    None is returned if there are no radicals to remove.
    NotImplementedError is raised if there are radicals and they cannot be
    removed or if the relationship between the original symbols and the
    change of variable needed to rewrite the system as a polynomial cannot
    be solved.
    Otherwise the tuple, ``(eq, cov)``, is returned where:
    *eq*, ``cov``
        *eq* is an equation without radicals (in the symbol(s) of
        interest) whose solutions are a superset of the solutions to the
        original expression. *eq* might be rewritten in terms of a new
        variable; the relationship to the original variables is given by
        ``cov`` which is a list containing ``v`` and ``v**p - b`` where
        ``p`` is the power needed to clear the radical and ``b`` is the
        radical now expressed as a polynomial in the symbols of interest.
        For example, for sqrt(2 - x) the tuple would be
        ``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
        solutions to the original equation (if there are any).
    *syms*
        An iterable of symbols which, if provided, will limit the focus of
        radical removal: only radicals with one or more of the symbols of
        interest will be cleared. All free symbols are used if *syms* is not
        set.
    *flags* are used internally for communication during recursive calls.
    Two options are also recognized:
        ``take``, when defined, is interpreted as a single-argument function
        that returns True if a given Pow should be handled.
    Radicals can be removed from an expression if:
        *   All bases of the radicals are the same; a change of variables is
            done in this case.
        *   If all radicals appear in one term of the expression.
        *   There are only four terms with sqrt() factors or there are less than
            four terms having sqrt() factors.
        *   There are only two terms with radicals.
    Examples
    ========
    >>> from sympy.solvers.solvers import unrad
    >>> from sympy.abc import x
    >>> from sympy import sqrt, Rational, root
    >>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
    (x**5 - 64, [])
    >>> unrad(sqrt(x) + root(x + 1, 3))
    (-x**3 + x**2 + 2*x + 1, [])
    >>> eq = sqrt(x) + root(x, 3) - 2
    >>> unrad(eq)
    (_p**3 + _p**2 - 2, [_p, _p**6 - x])
    """
 
 | 
	/usr/src/app/target_test_cases/failed_tests_unrad.txt 
 | 
	def unrad(eq, *syms, **flags):
    """
    Remove radicals with symbolic arguments and return (eq, cov),
    None, or raise an error.
    Explanation
    ===========
    None is returned if there are no radicals to remove.
    NotImplementedError is raised if there are radicals and they cannot be
    removed or if the relationship between the original symbols and the
    change of variable needed to rewrite the system as a polynomial cannot
    be solved.
    Otherwise the tuple, ``(eq, cov)``, is returned where:
    *eq*, ``cov``
        *eq* is an equation without radicals (in the symbol(s) of
        interest) whose solutions are a superset of the solutions to the
        original expression. *eq* might be rewritten in terms of a new
        variable; the relationship to the original variables is given by
        ``cov`` which is a list containing ``v`` and ``v**p - b`` where
        ``p`` is the power needed to clear the radical and ``b`` is the
        radical now expressed as a polynomial in the symbols of interest.
        For example, for sqrt(2 - x) the tuple would be
        ``(c, c**2 - 2 + x)``. The solutions of *eq* will contain
        solutions to the original equation (if there are any).
    *syms*
        An iterable of symbols which, if provided, will limit the focus of
        radical removal: only radicals with one or more of the symbols of
        interest will be cleared. All free symbols are used if *syms* is not
        set.
    *flags* are used internally for communication during recursive calls.
    Two options are also recognized:
        ``take``, when defined, is interpreted as a single-argument function
        that returns True if a given Pow should be handled.
    Radicals can be removed from an expression if:
        *   All bases of the radicals are the same; a change of variables is
            done in this case.
        *   If all radicals appear in one term of the expression.
        *   There are only four terms with sqrt() factors or there are less than
            four terms having sqrt() factors.
        *   There are only two terms with radicals.
    Examples
    ========
    >>> from sympy.solvers.solvers import unrad
    >>> from sympy.abc import x
    >>> from sympy import sqrt, Rational, root
    >>> unrad(sqrt(x)*x**Rational(1, 3) + 2)
    (x**5 - 64, [])
    >>> unrad(sqrt(x) + root(x + 1, 3))
    (-x**3 + x**2 + 2*x + 1, [])
    >>> eq = sqrt(x) + root(x, 3) - 2
    >>> unrad(eq)
    (_p**3 + _p**2 - 2, [_p, _p**6 - x])
    """
    uflags = {"check": False, "simplify": False}
    def _cov(p, e):
        if cov:
            # XXX - uncovered
            oldp, olde = cov
            if Poly(e, p).degree(p) in (1, 2):
                cov[:] = [p, olde.subs(oldp, _vsolve(e, p, **uflags)[0])]
            else:
                raise NotImplementedError
        else:
            cov[:] = [p, e]
    def _canonical(eq, cov):
        if cov:
            # change symbol to vanilla so no solutions are eliminated
            p, e = cov
            rep = {p: Dummy(p.name)}
            eq = eq.xreplace(rep)
            cov = [p.xreplace(rep), e.xreplace(rep)]
        # remove constants and powers of factors since these don't change
        # the location of the root; XXX should factor or factor_terms be used?
        eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True)
        if eq.is_Mul:
            args = []
            for f in eq.args:
                if f.is_number:
                    continue
                if f.is_Pow:
                    args.append(f.base)
                else:
                    args.append(f)
            eq = Mul(*args)  # leave as Mul for more efficient solving
        # make the sign canonical
        margs = list(Mul.make_args(eq))
        changed = False
        for i, m in enumerate(margs):
            if m.could_extract_minus_sign():
                margs[i] = -m
                changed = True
        if changed:
            eq = Mul(*margs, evaluate=False)
        return eq, cov
    def _Q(pow):
        # return leading Rational of denominator of Pow's exponent
        c = pow.as_base_exp()[1].as_coeff_Mul()[0]
        if not c.is_Rational:
            return S.One
        return c.q
    # define the _take method that will determine whether a term is of interest
    def _take(d):
        # return True if coefficient of any factor's exponent's den is not 1
        for pow in Mul.make_args(d):
            if not pow.is_Pow:
                continue
            if _Q(pow) == 1:
                continue
            if pow.free_symbols & syms:
                return True
        return False
    _take = flags.setdefault('_take', _take)
    if isinstance(eq, Eq):
        eq = eq.lhs - eq.rhs  # XXX legacy Eq as Eqn support
    elif not isinstance(eq, Expr):
        return
    cov, nwas, rpt = [flags.setdefault(k, v) for k, v in
        sorted({"cov": [], "n": None, "rpt": 0}.items())]
    # preconditioning
    eq = powdenest(factor_terms(eq, radical=True, clear=True))
    eq = eq.as_numer_denom()[0]
    eq = _mexpand(eq, recursive=True)
    if eq.is_number:
        return
    # see if there are radicals in symbols of interest
    syms = set(syms) or eq.free_symbols  # _take uses this
    poly = eq.as_poly()
    gens = [g for g in poly.gens if _take(g)]
    if not gens:
        return
    # recast poly in terms of eigen-gens
    poly = eq.as_poly(*gens)
    # not a polynomial e.g. 1 + sqrt(x)*exp(sqrt(x)) with gen sqrt(x)
    if poly is None:
        return
    # - an exponent has a symbol of interest (don't handle)
    if any(g.exp.has(*syms) for g in gens):
        return
    def _rads_bases_lcm(poly):
        # if all the bases are the same or all the radicals are in one
        # term, `lcm` will be the lcm of the denominators of the
        # exponents of the radicals
        lcm = 1
        rads = set()
        bases = set()
        for g in poly.gens:
            q = _Q(g)
            if q != 1:
                rads.add(g)
                lcm = ilcm(lcm, q)
                bases.add(g.base)
        return rads, bases, lcm
    rads, bases, lcm = _rads_bases_lcm(poly)
    covsym = Dummy('p', nonnegative=True)
    # only keep in syms symbols that actually appear in radicals;
    # and update gens
    newsyms = set()
    for r in rads:
        newsyms.update(syms & r.free_symbols)
    if newsyms != syms:
        syms = newsyms
    # get terms together that have common generators
    drad = dict(zip(rads, range(len(rads))))
    rterms = {(): []}
    args = Add.make_args(poly.as_expr())
    for t in args:
        if _take(t):
            common = set(t.as_poly().gens).intersection(rads)
            key = tuple(sorted([drad[i] for i in common]))
        else:
            key = ()
        rterms.setdefault(key, []).append(t)
    others = Add(*rterms.pop(()))
    rterms = [Add(*rterms[k]) for k in rterms.keys()]
    # the output will depend on the order terms are processed, so
    # make it canonical quickly
    rterms = list(reversed(list(ordered(rterms))))
    ok = False  # we don't have a solution yet
    depth = sqrt_depth(eq)
    if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2):
        eq = rterms[0]**lcm - ((-others)**lcm)
        ok = True
    else:
        if len(rterms) == 1 and rterms[0].is_Add:
            rterms = list(rterms[0].args)
        if len(bases) == 1:
            b = bases.pop()
            if len(syms) > 1:
                x = b.free_symbols
            else:
                x = syms
            x = list(ordered(x))[0]
            try:
                inv = _vsolve(covsym**lcm - b, x, **uflags)
                if not inv:
                    raise NotImplementedError
                eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0])
                _cov(covsym, covsym**lcm - b)
                return _canonical(eq, cov)
            except NotImplementedError:
                pass
        if len(rterms) == 2:
            if not others:
                eq = rterms[0]**lcm - (-rterms[1])**lcm
                ok = True
            elif not log(lcm, 2).is_Integer:
                # the lcm-is-power-of-two case is handled below
                r0, r1 = rterms
                if flags.get('_reverse', False):
                    r1, r0 = r0, r1
                i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly())
                i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly())
                for reverse in range(2):
                    if reverse:
                        i0, i1 = i1, i0
                        r0, r1 = r1, r0
                    _rads1, _, lcm1 = i1
                    _rads1 = Mul(*_rads1)
                    t1 = _rads1**lcm1
                    c = covsym**lcm1 - t1
                    for x in syms:
                        try:
                            sol = _vsolve(c, x, **uflags)
                            if not sol:
                                raise NotImplementedError
                            neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \
                                others
                            tmp = unrad(neweq, covsym)
                            if tmp:
                                eq, newcov = tmp
                                if newcov:
                                    newp, newc = newcov
                                    _cov(newp, c.subs(covsym,
                                        _vsolve(newc, covsym, **uflags)[0]))
                                else:
                                    _cov(covsym, c)
                            else:
                                eq = neweq
                                _cov(covsym, c)
                            ok = True
                            break
                        except NotImplementedError:
                            if reverse:
                                raise NotImplementedError(
                                    'no successful change of variable found')
                            else:
                                pass
                    if ok:
                        break
        elif len(rterms) == 3:
            # two cube roots and another with order less than 5
            # (so an analytical solution can be found) or a base
            # that matches one of the cube root bases
            info = [_rads_bases_lcm(i.as_poly()) for i in rterms]
            RAD = 0
            BASES = 1
            LCM = 2
            if info[0][LCM] != 3:
                info.append(info.pop(0))
                rterms.append(rterms.pop(0))
            elif info[1][LCM] != 3:
                info.append(info.pop(1))
                rterms.append(rterms.pop(1))
            if info[0][LCM] == info[1][LCM] == 3:
                if info[1][BASES] != info[2][BASES]:
                    info[0], info[1] = info[1], info[0]
                    rterms[0], rterms[1] = rterms[1], rterms[0]
                if info[1][BASES] == info[2][BASES]:
                    eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3
                    ok = True
                elif info[2][LCM] < 5:
                    # a*root(A, 3) + b*root(B, 3) + others = c
                    a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB']
                    # zz represents the unraded expression into which the
                    # specifics for this case are substituted
                    zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 -
                        3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 +
                        3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 -
                        63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 -
                        21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d +
                        45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 -
                        18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 +
                        9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 +
                        3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 -
                        60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 +
                        3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 -
                        126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 -
                        9*c*d**8 + d**9)
                    def _t(i):
                        b = Mul(*info[i][RAD])
                        return cancel(rterms[i]/b), Mul(*info[i][BASES])
                    aa, AA = _t(0)
                    bb, BB = _t(1)
                    cc = -rterms[2]
                    dd = others
                    eq = zz.xreplace(dict(zip(
                        (a, A, b, B, c, d),
                        (aa, AA, bb, BB, cc, dd))))
                    ok = True
        # handle power-of-2 cases
        if not ok:
            if log(lcm, 2).is_Integer and (not others and
                    len(rterms) == 4 or len(rterms) < 4):
                def _norm2(a, b):
                    return a**2 + b**2 + 2*a*b
                if len(rterms) == 4:
                    # (r0+r1)**2 - (r2+r3)**2
                    r0, r1, r2, r3 = rterms
                    eq = _norm2(r0, r1) - _norm2(r2, r3)
                    ok = True
                elif len(rterms) == 3:
                    # (r1+r2)**2 - (r0+others)**2
                    r0, r1, r2 = rterms
                    eq = _norm2(r1, r2) - _norm2(r0, others)
                    ok = True
                elif len(rterms) == 2:
                    # r0**2 - (r1+others)**2
                    r0, r1 = rterms
                    eq = r0**2 - _norm2(r1, others)
                    ok = True
    new_depth = sqrt_depth(eq) if ok else depth
    rpt += 1  # XXX how many repeats with others unchanging is enough?
    if not ok or (
                nwas is not None and len(rterms) == nwas and
                new_depth is not None and new_depth == depth and
                rpt > 3):
        raise NotImplementedError('Cannot remove all radicals')
    flags.update({"cov": cov, "n": len(rterms), "rpt": rpt})
    neq = unrad(eq, *syms, **flags)
    if neq:
        eq, cov = neq
    eq, cov = _canonical(eq, cov)
    return eq, cov
 
 | 
	unrad 
 | 
	File-Level 
 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.