id
list | project
string | origin_file
list | test_list
list | prob_info
list | type
list | node
list | language
string | toolfunc_count
int64 | func_count
int64 | pytest_info
dict |
---|---|---|---|---|---|---|---|---|---|---|
[
"cloudnetpy.cloudnetpy.utils.cumsumr",
"cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_adiabatic_lwc"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/categorize/atmos_utils.py"
] |
[
"tests/unit/test_atmos_utils.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 532,
"func_end_lineno": 549,
"func_code": "def cumsumr(array: np.ndarray, axis: int = 0) -> np.ndarray:\n \"\"\"Finds cumulative sum that resets on 0.\n\n Args:\n array: Input array.\n axis: Axis where the sum is calculated. Default is 0.\n\n Returns:\n Cumulative sum, restarted at 0.\n\n Examples:\n >>> x = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1])\n >>> cumsumr(x)\n [0, 0, 1, 2, 0, 0, 0, 1, 2, 3]\n\n \"\"\"\n cums = array.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums * (array == 0), axis=axis)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 357,
"func_start_lineno": 302,
"func_end_lineno": 318,
"func_code": "def calc_adiabatic_lwc(lwc_dz: np.ndarray, height: np.ndarray) -> np.ndarray:\n \"\"\"Calculates adiabatic liquid water content (kg m-3).\n\n Args:\n lwc_dz: Liquid water content change rate (kg m-3 m-1) calculated at the\n base of each cloud and filled to that cloud.\n height: Height vector (m).\n\n Returns:\n Liquid water content (kg m-3).\n\n \"\"\"\n is_cloud = lwc_dz != 0\n cloud_indices = utils.cumsumr(is_cloud, axis=1)\n dz = utils.path_lengths_from_ground(height) * np.ones_like(lwc_dz)\n dz[cloud_indices < 1] = 0\n return utils.cumsumr(dz, axis=1) * lwc_dz"
}
] |
[
"function_empty"
] |
[
"cloudnetpy.utils.cumsumr",
"cloudnetpy.categorize.atmos_utils.calc_adiabatic_lwc"
] |
Python
| 2 | 2 |
{
"total_num": 5,
"base_passed_num": 4
}
|
[
"cloudnetpy.cloudnetpy.utils.binvec",
"cloudnetpy.cloudnetpy.utils.rebin_1d",
"cloudnetpy.cloudnetpy.utils.rebin_2d",
"cloudnetpy.cloudnetpy.cloudnetarray.CloudnetArray::rebin_data"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/utils.py",
"cloudnetpy/utils.py",
"cloudnetpy/cloudnetarray.py"
] |
[
"tests/unit/test_categorize.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 124,
"func_end_lineno": 140,
"func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 195,
"func_end_lineno": 231,
"func_code": "def rebin_1d(\n x_in: np.ndarray,\n array: np.ndarray | ma.MaskedArray,\n x_new: np.ndarray,\n statistic: str = \"mean\",\n *,\n mask_zeros: bool = True,\n) -> ma.MaskedArray:\n \"\"\"Rebins 1D array.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 1-D input data with shape (m,).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Re-binned data with shape (N,).\n\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros(len(x_new))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n mask = ~array_screened.mask\n if ma.any(array_screened[mask]):\n result, _, _ = stats.binned_statistic(\n x_in[mask],\n array_screened[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros:\n return ma.masked_equal(result, 0)\n return ma.array(result)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 143,
"func_end_lineno": 192,
"func_code": "def rebin_2d(\n x_in: np.ndarray,\n array: ma.MaskedArray,\n x_new: np.ndarray,\n statistic: Literal[\"mean\", \"std\"] = \"mean\",\n n_min: int = 1,\n *,\n mask_zeros: bool = True,\n) -> tuple[ma.MaskedArray, list]:\n \"\"\"Rebins 2-D data in one dimension.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 2-D input data with shape (n, m).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n n_min: Minimum number of points to have good statistics in a bin. Default is 1.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n tuple: Rebinned data with shape (N, m) and indices of bins without enough data.\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros((len(x_new), array.shape[1]))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n for ind, values in enumerate(array_screened.T):\n mask = ~values.mask\n if ma.any(values[mask]):\n result[:, ind], _, _ = stats.binned_statistic(\n x_in[mask],\n values[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros is True:\n masked_result = ma.masked_equal(result, 0)\n else:\n masked_result = ma.array(result)\n\n # Fill bins with not enough profiles\n x_hist, _ = np.histogram(x_in, bins=edges)\n empty_mask = x_hist < n_min\n masked_result[empty_mask, :] = ma.masked\n empty_indices = list(np.nonzero(empty_mask)[0])\n if len(empty_indices) > 0:\n logging.debug(\"No data in %s bins\", len(empty_indices))\n\n return masked_result, empty_indices"
},
{
"class_start_lineno": 14,
"class_end_lineno": 211,
"func_start_lineno": 61,
"func_end_lineno": 84,
"func_code": " def rebin_data(\n self, time: np.ndarray, time_new: np.ndarray, *, mask_zeros: bool = True\n ) -> list:\n \"\"\"Rebins `data` in time.\n\n Args:\n time: 1D time array.\n time_new: 1D new time array.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Time indices without data.\n\n \"\"\"\n if self.data.ndim == 1:\n self.data = utils.rebin_1d(time, self.data, time_new, mask_zeros=mask_zeros)\n bad_indices = list(np.where(self.data == ma.masked)[0])\n else:\n if not isinstance(self.data, ma.MaskedArray):\n self.data = ma.masked_array(self.data)\n self.data, bad_indices = utils.rebin_2d(\n time, self.data, time_new, mask_zeros=mask_zeros\n )\n return bad_indices"
}
] |
[
"function_empty"
] |
[
"cloudnetpy.utils.binvec",
"cloudnetpy.utils.rebin_1d",
"cloudnetpy.utils.rebin_2d",
"cloudnetpy.cloudnetarray.CloudnetArray.rebin_data"
] |
Python
| 4 | 4 |
{
"total_num": 4,
"base_passed_num": 0
}
|
[
"cloudnetpy.cloudnetpy.utils.binvec",
"cloudnetpy.cloudnetpy.utils.rebin_2d",
"cloudnetpy.cloudnetpy.cloudnetarray.CloudnetArray::rebin_data"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/utils.py",
"cloudnetpy/cloudnetarray.py"
] |
[
"tests/unit/test_cloudnetarray.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 124,
"func_end_lineno": 140,
"func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 143,
"func_end_lineno": 192,
"func_code": "def rebin_2d(\n x_in: np.ndarray,\n array: ma.MaskedArray,\n x_new: np.ndarray,\n statistic: Literal[\"mean\", \"std\"] = \"mean\",\n n_min: int = 1,\n *,\n mask_zeros: bool = True,\n) -> tuple[ma.MaskedArray, list]:\n \"\"\"Rebins 2-D data in one dimension.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 2-D input data with shape (n, m).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n n_min: Minimum number of points to have good statistics in a bin. Default is 1.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n tuple: Rebinned data with shape (N, m) and indices of bins without enough data.\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros((len(x_new), array.shape[1]))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n for ind, values in enumerate(array_screened.T):\n mask = ~values.mask\n if ma.any(values[mask]):\n result[:, ind], _, _ = stats.binned_statistic(\n x_in[mask],\n values[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros is True:\n masked_result = ma.masked_equal(result, 0)\n else:\n masked_result = ma.array(result)\n\n # Fill bins with not enough profiles\n x_hist, _ = np.histogram(x_in, bins=edges)\n empty_mask = x_hist < n_min\n masked_result[empty_mask, :] = ma.masked\n empty_indices = list(np.nonzero(empty_mask)[0])\n if len(empty_indices) > 0:\n logging.debug(\"No data in %s bins\", len(empty_indices))\n\n return masked_result, empty_indices"
},
{
"class_start_lineno": 14,
"class_end_lineno": 211,
"func_start_lineno": 61,
"func_end_lineno": 84,
"func_code": " def rebin_data(\n self, time: np.ndarray, time_new: np.ndarray, *, mask_zeros: bool = True\n ) -> list:\n \"\"\"Rebins `data` in time.\n\n Args:\n time: 1D time array.\n time_new: 1D new time array.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Time indices without data.\n\n \"\"\"\n if self.data.ndim == 1:\n self.data = utils.rebin_1d(time, self.data, time_new, mask_zeros=mask_zeros)\n bad_indices = list(np.where(self.data == ma.masked)[0])\n else:\n if not isinstance(self.data, ma.MaskedArray):\n self.data = ma.masked_array(self.data)\n self.data, bad_indices = utils.rebin_2d(\n time, self.data, time_new, mask_zeros=mask_zeros\n )\n return bad_indices"
}
] |
[
"function_empty"
] |
[
"cloudnetpy.utils.binvec",
"cloudnetpy.utils.rebin_2d",
"cloudnetpy.cloudnetarray.CloudnetArray.rebin_data"
] |
Python
| 3 | 3 |
{
"total_num": 17,
"base_passed_num": 15
}
|
[
"cloudnetpy.cloudnetpy.concat_lib._Concat::_write_initial_data",
"cloudnetpy.cloudnetpy.concat_lib._Concat::concat_data"
] |
cloudnetpy
|
[
"cloudnetpy/concat_lib.py",
"cloudnetpy/concat_lib.py"
] |
[
"tests/unit/test_concat_lib.py",
"tests/unit/test_copernicus.py",
"tests/unit/test_galileo.py",
"tests/unit/test_mira.py"
] |
[
{
"class_start_lineno": 122,
"class_end_lineno": 253,
"func_start_lineno": 173,
"func_end_lineno": 202,
"func_code": " def _write_initial_data(self, variables: list | None, ignore: list | None) -> None:\n for key in self.first_file.variables:\n if (\n variables is not None\n and key not in variables\n and key not in self.common_variables\n and key != self.concat_dimension\n ):\n continue\n if ignore and key in ignore:\n continue\n\n auto_scale = False\n self.first_file[key].set_auto_scale(auto_scale)\n array = self.first_file[key][:]\n dimensions = self.first_file[key].dimensions\n fill_value = getattr(self.first_file[key], \"_FillValue\", None)\n var = self.concatenated_file.createVariable(\n key,\n array.dtype,\n dimensions,\n zlib=True,\n complevel=3,\n shuffle=False,\n fill_value=fill_value,\n )\n auto_scale = False\n var.set_auto_scale(auto_scale)\n var[:] = array\n _copy_attributes(self.first_file[key], var)"
},
{
"class_start_lineno": 122,
"class_end_lineno": 253,
"func_start_lineno": 151,
"func_end_lineno": 171,
"func_code": " def concat_data(\n self,\n variables: list | None,\n ignore: list | None,\n allow_vary: list | None,\n ) -> list:\n \"\"\"Concatenates data arrays.\"\"\"\n self._write_initial_data(variables, ignore)\n output = [self.first_filename]\n if len(self.filenames) > 1:\n for filename in self.filenames[1:]:\n try:\n self._append_data(filename, allow_vary)\n except RuntimeError as e:\n if \"NetCDF: HDF error\" in str(e):\n msg = f\"Caught a NetCDF HDF error. Skipping file '{filename}'.\"\n logging.exception(msg)\n continue\n raise\n output.append(filename)\n return output"
}
] |
[
"Development"
] |
[
"cloudnetpy.concat_lib._Concat._write_initial_data",
"cloudnetpy.concat_lib._Concat.concat_data"
] |
Python
| 0 | 2 |
{
"total_num": 71,
"base_passed_num": 32
}
|
[
"cloudnetpy.cloudnetpy.datasource.DataSource::getvar",
"cloudnetpy.cloudnetpy.datasource.DataSource::_init_time"
] |
cloudnetpy
|
[
"cloudnetpy/datasource.py",
"cloudnetpy/datasource.py"
] |
[
"tests/unit/test_datasource.py"
] |
[
{
"class_start_lineno": 16,
"class_end_lineno": 236,
"func_start_lineno": 60,
"func_end_lineno": 80,
"func_code": " def getvar(self, *args) -> np.ndarray:\n \"\"\"Returns data array from the source file variables.\n\n Returns just the data (and no attributes) from the original\n variables dictionary, fetched from the input netCDF file.\n\n Args:\n *args: possible names of the variable. The first match is returned.\n\n Returns:\n ndarray: The actual data.\n\n Raises:\n RuntimeError: The variable is not found.\n\n \"\"\"\n for arg in args:\n if arg in self.dataset.variables:\n return self.dataset.variables[arg][:]\n msg = f\"Missing variable {args[0]} in the input file.\"\n raise RuntimeError(msg)"
},
{
"class_start_lineno": 16,
"class_end_lineno": 236,
"func_start_lineno": 152,
"func_end_lineno": 160,
"func_code": " def _init_time(self) -> np.ndarray:\n time = self.getvar(\"time\")\n if len(time) == 0:\n msg = \"Empty time vector\"\n raise ValidTimeStampError(msg)\n if max(time) > 25:\n logging.debug(\"Assuming time as seconds, converting to fraction hour\")\n time = utils.seconds2hours(time)\n return time"
}
] |
[
"function_empty",
"Development"
] |
[
"cloudnetpy.datasource.DataSource.getvar",
"cloudnetpy.datasource.DataSource._init_time"
] |
Python
| 1 | 2 |
{
"total_num": 9,
"base_passed_num": 6
}
|
[
"cloudnetpy.cloudnetpy.instruments.disdrometer.parsivel._read_fmi",
"cloudnetpy.cloudnetpy.instruments.disdrometer.parsivel.parsivel2nc"
] |
cloudnetpy
|
[
"cloudnetpy/instruments/disdrometer/parsivel.py",
"cloudnetpy/instruments/disdrometer/parsivel.py"
] |
[
"tests/unit/test_disdrometer.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 713,
"func_start_lineno": 618,
"func_end_lineno": 657,
"func_code": "def _read_fmi(content: str):\n r\"\"\"Read format used by Finnish Meteorological Institute and University of\n Helsinki.\n\n Format consists of sequence of the following:\n - \"[YYYY-MM-DD HH:MM:SS\\n\"\n - output of \"CS/PA\" command without non-printable characters at the end\n - \"]\\n\"\n \"\"\"\n output: dict[str, list] = {\"_datetime\": []}\n for m in re.finditer(\n r\"\\[(?P<year>\\d+)-(?P<month>\\d+)-(?P<day>\\d+) \"\n r\"(?P<hour>\\d+):(?P<minute>\\d+):(?P<second>\\d+)\"\n r\"(?P<output>[^\\]]*)\\]\",\n content,\n ):\n try:\n record = _read_typ_op4a(m[\"output\"].splitlines())\n except ValueError:\n continue\n\n for key, value in record.items():\n if key not in output:\n output[key] = [None] * len(output[\"_datetime\"])\n output[key].append(value)\n for key in output:\n if key not in record and key != \"_datetime\":\n output[key].append(None)\n\n output[\"_datetime\"].append(\n datetime.datetime(\n int(m[\"year\"]),\n int(m[\"month\"]),\n int(m[\"day\"]),\n int(m[\"hour\"]),\n int(m[\"minute\"]),\n int(m[\"second\"]),\n )\n )\n return output"
},
{
"class_start_lineno": 1,
"class_end_lineno": 713,
"func_start_lineno": 23,
"func_end_lineno": 77,
"func_code": "def parsivel2nc(\n disdrometer_file: str | PathLike | Iterable[str | PathLike],\n output_file: str,\n site_meta: dict,\n uuid: str | None = None,\n date: str | datetime.date | None = None,\n telegram: Sequence[int | None] | None = None,\n timestamps: Sequence[datetime.datetime] | None = None,\n) -> str:\n \"\"\"Converts OTT Parsivel-2 disdrometer data into Cloudnet Level 1b netCDF\n file.\n\n Args:\n disdrometer_file: Filename of disdrometer file or list of filenames.\n output_file: Output filename.\n site_meta: Dictionary containing information about the site. Required key\n is `name`.\n uuid: Set specific UUID for the file.\n date: Expected date of the measurements as YYYY-MM-DD.\n telegram: List of measured value numbers as specified in section 11.2 of\n the instrument's operating instructions. Unknown values are indicated\n with None. Telegram is required if the input file doesn't contain a\n header.\n timestamps: Specify list of timestamps if they are missing in the input file.\n\n Returns:\n UUID of the generated file.\n\n Raises:\n DisdrometerDataError: Timestamps do not match the expected date, or unable\n to read the disdrometer file.\n\n Examples:\n >>> from cloudnetpy.instruments import parsivel2nc\n >>> site_meta = {'name': 'Lindenberg', 'altitude': 104, 'latitude': 52.2,\n 'longitude': 14.1}\n >>> uuid = parsivel2nc('parsivel.log', 'parsivel.nc', site_meta)\n\n \"\"\"\n if isinstance(date, str):\n date = datetime.date.fromisoformat(date)\n if isinstance(disdrometer_file, str | PathLike):\n disdrometer_file = [disdrometer_file]\n disdrometer = Parsivel(disdrometer_file, site_meta, telegram, date, timestamps)\n disdrometer.sort_timestamps()\n disdrometer.remove_duplicate_timestamps()\n disdrometer.mask_invalid_values()\n if len(disdrometer.data[\"time\"].data) < 2:\n msg = \"Too few data points\"\n raise DisdrometerDataError(msg)\n disdrometer.convert_units()\n disdrometer.add_meta()\n attributes = output.add_time_attribute(ATTRIBUTES, disdrometer.date)\n output.update_attributes(disdrometer.data, attributes)\n return output.save_level1b(disdrometer, output_file, uuid)"
}
] |
[
"function_empty"
] |
[
"cloudnetpy.instruments.disdrometer.parsivel._read_fmi",
"cloudnetpy.instruments.disdrometer.parsivel.parsivel2nc"
] |
Python
| 2 | 2 |
{
"total_num": 54,
"base_passed_num": 0
}
|
[
"cloudnetpy.cloudnetpy.utils.l2norm_weighted",
"cloudnetpy.cloudnetpy.products.drizzle_error._calc_error"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/products/drizzle_error.py"
] |
[
"tests/unit/test_drizzle.py",
"tests/unit/test_drizzle_error.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 504,
"func_end_lineno": 529,
"func_code": "def l2norm_weighted(\n values: tuple,\n overall_scale: float,\n term_weights: tuple,\n) -> ma.MaskedArray:\n \"\"\"Calculates scaled and weighted Euclidean distance.\n\n Calculated distance is of form: scale * sqrt((a1*a)**2 + (b1*b)**2 + ...)\n where a, b, ... are terms to be summed and a1, a2, ... are optional weights\n for the terms.\n\n Args:\n values: Tuple containing the values.\n overall_scale: Scale factor for the calculated Euclidean distance.\n term_weights: Weights for the terms. Must be single float or a list of numbers\n (one per term).\n\n Returns:\n Scaled and weighted Euclidean distance.\n\n TODO: Use masked arrays instead of tuples.\n\n \"\"\"\n generic_values = ma.array(values, dtype=object)\n weighted_values = ma.multiply(generic_values, term_weights)\n return overall_scale * l2norm(*weighted_values)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 188,
"func_start_lineno": 140,
"func_end_lineno": 153,
"func_code": "def _calc_error(\n scale: float,\n weights: tuple,\n error_input: tuple,\n *,\n add_mu: bool = False,\n add_mu_small: bool = False,\n) -> ma.MaskedArray:\n error = utils.l2norm_weighted(error_input, scale, weights)\n if add_mu is True:\n error = utils.l2norm(error, MU_ERROR)\n if add_mu_small is True:\n error = utils.l2norm(error, MU_ERROR_SMALL)\n return error"
}
] |
[
"function_empty",
"Development"
] |
[
"cloudnetpy.utils.l2norm_weighted",
"cloudnetpy.products.drizzle_error._calc_error"
] |
Python
| 1 | 2 |
{
"total_num": 103,
"base_passed_num": 70
}
|
[
"cloudnetpy.cloudnetpy.categorize.droplet.interpolate_lwp",
"cloudnetpy.cloudnetpy.categorize.droplet.find_liquid"
] |
cloudnetpy
|
[
"cloudnetpy/categorize/droplet.py",
"cloudnetpy/categorize/droplet.py"
] |
[
"tests/unit/test_droplet.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 245,
"func_start_lineno": 225,
"func_end_lineno": 238,
"func_code": "def interpolate_lwp(obs: ClassData) -> np.ndarray:\n \"\"\"Linear interpolation of liquid water path to fill masked values.\n\n Args:\n obs: The :class:`ClassData` instance.\n\n Returns:\n Liquid water path where the masked values are filled by interpolation.\n\n \"\"\"\n if obs.lwp.all() is ma.masked:\n return np.zeros(obs.time.shape)\n ind = ma.where(obs.lwp)\n return np.interp(obs.time, obs.time[ind], obs.lwp[ind])"
},
{
"class_start_lineno": 1,
"class_end_lineno": 245,
"func_start_lineno": 52,
"func_end_lineno": 121,
"func_code": "def find_liquid(\n obs: ClassData,\n peak_amp: float = 1e-6,\n max_width: float = 300,\n min_points: int = 3,\n min_top_der: float = 1e-7,\n min_lwp: float = 0,\n min_alt: float = 100,\n) -> np.ndarray:\n \"\"\"Estimate liquid layers from SNR-screened attenuated backscatter.\n\n Args:\n obs: The :class:`ClassData` instance.\n peak_amp: Minimum value of peak. Default is 1e-6.\n max_width: Maximum width of peak. Default is 300 (m).\n min_points: Minimum number of valid points in peak. Default is 3.\n min_top_der: Minimum derivative above peak, defined as\n (beta_peak-beta_top) / (alt_top-alt_peak). Default is 1e-7.\n min_lwp: Minimum value from linearly interpolated lwp (kg m-2)\n measured by the mwr. Default is 0.\n min_alt: Minimum altitude of the peak from the ground. Default is 100 (m).\n\n Returns:\n 2-D boolean array denoting liquid layers.\n\n References:\n The method is based on Tuononen, M. et.al, 2019,\n https://acp.copernicus.org/articles/19/1985/2019/.\n\n \"\"\"\n\n def _is_proper_peak() -> bool:\n conditions = (\n npoints >= min_points,\n peak_width < max_width,\n top_der > min_top_der,\n is_positive_lwp,\n peak_alt > min_alt,\n )\n return all(conditions)\n\n lwp_int = interpolate_lwp(obs)\n beta = ma.copy(obs.beta)\n height = obs.height\n\n is_liquid = np.zeros(beta.shape, dtype=bool)\n base_below_peak = utils.n_elements(height, 200)\n top_above_peak = utils.n_elements(height, 150)\n difference = ma.array(np.diff(beta, axis=1))\n beta_diff = difference.filled(0)\n beta = beta.filled(0)\n peak_indices = _find_strong_peaks(beta, peak_amp)\n\n for n, peak in zip(*peak_indices, strict=True):\n lprof = beta[n, :]\n dprof = beta_diff[n, :]\n try:\n base = ind_base(dprof, peak, base_below_peak, 4)\n top = ind_top(dprof, peak, height.shape[0], top_above_peak, 4)\n except IndexError:\n continue\n npoints = np.count_nonzero(lprof[base : top + 1])\n peak_width = height[top] - height[base]\n peak_alt = height[peak] - height[0]\n top_der = (lprof[peak] - lprof[top]) / (height[top] - height[peak])\n is_positive_lwp = lwp_int[n] >= min_lwp\n if _is_proper_peak():\n is_liquid[n, base : top + 1] = True\n\n return is_liquid"
}
] |
[
"function_empty",
"Development"
] |
[
"cloudnetpy.categorize.droplet.interpolate_lwp",
"cloudnetpy.categorize.droplet.find_liquid"
] |
Python
| 1 | 2 |
{
"total_num": 18,
"base_passed_num": 15
}
|
[
"cloudnetpy.cloudnetpy.categorize.atmos_utils.fill_clouds_with_lwc_dz",
"cloudnetpy.cloudnetpy.products.lwc.Lwc::_init_lwc_adiabatic",
"cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_saturation_vapor_pressure",
"cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_mixing_ratio",
"cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_lwc_change_rate"
] |
cloudnetpy
|
[
"cloudnetpy/categorize/atmos_utils.py",
"cloudnetpy/products/lwc.py",
"cloudnetpy/products/lwc.py",
"cloudnetpy/categorize/atmos_utils.py",
"cloudnetpy/categorize/atmos_utils.py",
"cloudnetpy/categorize/atmos_utils.py"
] |
[
"tests/unit/test_lwc.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 357,
"func_start_lineno": 154,
"func_end_lineno": 172,
"func_code": "def fill_clouds_with_lwc_dz(\n temperature: np.ndarray, pressure: np.ndarray, is_liquid: np.ndarray\n) -> np.ndarray:\n \"\"\"Fills liquid clouds with lwc change rate at the cloud bases.\n\n Args:\n temperature: 2D temperature array (K).\n pressure: 2D pressure array (Pa).\n is_liquid: Boolean array indicating presence of liquid clouds.\n\n Returns:\n Liquid water content change rate (kg m-3 m-1), so that for each cloud the base\n value is filled for the whole cloud.\n\n \"\"\"\n lwc_dz = get_lwc_change_rate_at_bases(temperature, pressure, is_liquid)\n lwc_dz_filled = ma.zeros(lwc_dz.shape)\n lwc_dz_filled[is_liquid] = utils.ffill(lwc_dz[is_liquid])\n return lwc_dz_filled"
},
{
"class_start_lineno": 120,
"class_end_lineno": 167,
"func_start_lineno": 146,
"func_end_lineno": 152,
"func_code": " def _init_lwc_adiabatic(self) -> np.ndarray:\n \"\"\"Returns theoretical adiabatic lwc in liquid clouds (kg/m3).\"\"\"\n lwc_dz = atmos_utils.fill_clouds_with_lwc_dz(\n *self.lwc_source.atmosphere,\n self.is_liquid,\n )\n return atmos_utils.calc_adiabatic_lwc(lwc_dz, self.height)"
},
{
"class_start_lineno": 120,
"class_end_lineno": 167,
"func_start_lineno": 134,
"func_end_lineno": 140,
"func_code": " def __init__(self, lwc_source: LwcSource):\n self.lwc_source = lwc_source\n self.height = lwc_source.getvar(\"height\")\n self.is_liquid = self._get_liquid()\n self.lwc_adiabatic = self._init_lwc_adiabatic()\n self.lwc = self._adiabatic_lwc_to_lwc()\n self._mask_rain()"
},
{
"class_start_lineno": 1,
"class_end_lineno": 357,
"func_start_lineno": 245,
"func_end_lineno": 266,
"func_code": "def calc_saturation_vapor_pressure(temperature: np.ndarray) -> np.ndarray:\n \"\"\"Goff-Gratch formula for saturation vapor pressure over water adopted by WMO.\n\n Args:\n temperature: Temperature (K).\n\n Returns:\n Saturation vapor pressure (Pa).\n\n \"\"\"\n ratio = con.T0 / temperature\n inv_ratio = ratio**-1\n return (\n 10\n ** (\n 10.79574 * (1 - ratio)\n - 5.028 * np.log10(inv_ratio)\n + 1.50475e-4 * (1 - (10 ** (-8.2969 * (inv_ratio - 1))))\n + 0.42873e-3 * (10 ** (4.76955 * (1 - ratio)) - 1)\n + 0.78614\n )\n ) * con.HPA_TO_PA"
},
{
"class_start_lineno": 1,
"class_end_lineno": 357,
"func_start_lineno": 269,
"func_end_lineno": 280,
"func_code": "def calc_mixing_ratio(vapor_pressure: np.ndarray, pressure: np.ndarray) -> np.ndarray:\n \"\"\"Calculates mixing ratio from partial vapor pressure and pressure.\n\n Args:\n vapor_pressure: Partial pressure of water vapor (Pa).\n pressure: Atmospheric pressure (Pa).\n\n Returns:\n Mixing ratio (kg kg-1).\n\n \"\"\"\n return con.MW_RATIO * vapor_pressure / (pressure - vapor_pressure)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 357,
"func_start_lineno": 201,
"func_end_lineno": 242,
"func_code": "def calc_lwc_change_rate(temperature: np.ndarray, pressure: np.ndarray) -> np.ndarray:\n \"\"\"Returns rate of change of condensable water (LWC).\n\n Calculates the theoretical adiabatic rate of increase of LWC\n with height, given the cloud base temperature and pressure.\n\n Args:\n temperature: Temperature of cloud base (K).\n pressure: Pressure of cloud base (Pa).\n\n Returns:\n dlwc/dz (kg m-3 m-1)\n\n References:\n Brenguier, 1991, https://doi.org/10.1175/1520-0469(1991)048<0264:POTCPA>2.0.CO;2\n\n \"\"\"\n svp = calc_saturation_vapor_pressure(temperature)\n svp_mixing_ratio = calc_mixing_ratio(svp, pressure)\n air_density = calc_air_density(pressure, temperature, svp_mixing_ratio)\n\n e = 0.622\n Cp = 1004 # J kg-1 K-1\n Lv = 2.45e6 # J kg-1 = Pa m3 kg-1\n qs = svp_mixing_ratio # kg kg-1\n pa = air_density # kg m-3\n es = svp # Pa\n P = pressure # Pa\n T = temperature # K\n\n # See Appendix B in Brenguier (1991) for the derivation of the following equation\n dqs_dp = (\n -(1 - (Cp * T) / (e * Lv))\n * (((Cp * T) / (e * Lv)) + ((Lv * qs * pa) / (P - es))) ** -1\n * (e * es)\n * (P - es) ** -2\n )\n\n # Using hydrostatic equation to convert dqs_dp to dqs_dz\n dqs_dz = dqs_dp * air_density * -scipy.constants.g\n\n return dqs_dz * air_density"
}
] |
[
"function_empty",
"Development"
] |
[
"cloudnetpy.categorize.atmos_utils.fill_clouds_with_lwc_dz",
"cloudnetpy.products.lwc.Lwc._init_lwc_adiabatic",
"cloudnetpy.products.lwc.Lwc.__init__",
"cloudnetpy.categorize.atmos_utils.calc_saturation_vapor_pressure",
"cloudnetpy.categorize.atmos_utils.calc_mixing_ratio",
"cloudnetpy.categorize.atmos_utils.calc_lwc_change_rate"
] |
Python
| 3 | 5 |
{
"total_num": 37,
"base_passed_num": 0
}
|
[
"cloudnetpy.cloudnetpy.utils.rebin_1d",
"cloudnetpy.cloudnetpy.cloudnetarray.CloudnetArray::rebin_data",
"cloudnetpy.cloudnetpy.utils.binvec"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/cloudnetarray.py",
"cloudnetpy/categorize/mwr.py",
"cloudnetpy/utils.py"
] |
[
"tests/unit/test_mwr.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 195,
"func_end_lineno": 231,
"func_code": "def rebin_1d(\n x_in: np.ndarray,\n array: np.ndarray | ma.MaskedArray,\n x_new: np.ndarray,\n statistic: str = \"mean\",\n *,\n mask_zeros: bool = True,\n) -> ma.MaskedArray:\n \"\"\"Rebins 1D array.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 1-D input data with shape (m,).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Re-binned data with shape (N,).\n\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros(len(x_new))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n mask = ~array_screened.mask\n if ma.any(array_screened[mask]):\n result, _, _ = stats.binned_statistic(\n x_in[mask],\n array_screened[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros:\n return ma.masked_equal(result, 0)\n return ma.array(result)"
},
{
"class_start_lineno": 14,
"class_end_lineno": 211,
"func_start_lineno": 61,
"func_end_lineno": 84,
"func_code": " def rebin_data(\n self, time: np.ndarray, time_new: np.ndarray, *, mask_zeros: bool = True\n ) -> list:\n \"\"\"Rebins `data` in time.\n\n Args:\n time: 1D time array.\n time_new: 1D new time array.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Time indices without data.\n\n \"\"\"\n if self.data.ndim == 1:\n self.data = utils.rebin_1d(time, self.data, time_new, mask_zeros=mask_zeros)\n bad_indices = list(np.where(self.data == ma.masked)[0])\n else:\n if not isinstance(self.data, ma.MaskedArray):\n self.data = ma.masked_array(self.data)\n self.data, bad_indices = utils.rebin_2d(\n time, self.data, time_new, mask_zeros=mask_zeros\n )\n return bad_indices"
},
{
"class_start_lineno": 11,
"class_end_lineno": 50,
"func_start_lineno": 24,
"func_end_lineno": 32,
"func_code": " def rebin_to_grid(self, time_grid: np.ndarray) -> None:\n \"\"\"Approximates lwp and its error in a grid using mean.\n\n Args:\n time_grid: 1D target time grid.\n\n \"\"\"\n for array in self.data.values():\n array.rebin_data(self.time, time_grid)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 124,
"func_end_lineno": 140,
"func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)"
}
] |
[
"function_empty"
] |
[
"cloudnetpy.utils.rebin_1d",
"cloudnetpy.cloudnetarray.CloudnetArray.rebin_data",
"cloudnetpy.categorize.mwr.Mwr.rebin_to_grid",
"cloudnetpy.utils.binvec"
] |
Python
| 3 | 3 |
{
"total_num": 4,
"base_passed_num": 3
}
|
[
"cloudnetpy.cloudnetpy.utils.append_data",
"cloudnetpy.cloudnetpy.instruments.radiometrics.RadiometricsCombined::__init__"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/instruments/radiometrics.py"
] |
[
"tests/unit/test_radiometrics.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 911,
"func_end_lineno": 925,
"func_code": "def append_data(data_in: dict, key: str, array: np.ndarray) -> dict:\n \"\"\"Appends data to a dictionary field (creates the field if not yet present).\n\n Args:\n data_in: Dictionary where data will be appended.\n key: Key of the field.\n array: Numpy array to be appended to data_in[key].\n\n \"\"\"\n data = data_in.copy()\n if key not in data:\n data[key] = array\n else:\n data[key] = ma.concatenate((data[key], array))\n return data"
},
{
"class_start_lineno": 227,
"class_end_lineno": 289,
"func_start_lineno": 233,
"func_end_lineno": 246,
"func_code": " def __init__(self, objs: list[Radiometrics], site_meta: dict):\n self.site_meta = site_meta\n self.data = {}\n self.date = None\n for obj in objs:\n if obj.ranges != objs[0].ranges:\n msg = \"Inconsistent range between files\"\n raise InconsistentDataError(msg)\n for key in obj.data:\n self.data = utils.append_data(self.data, key, obj.data[key])\n ranges = [float(x) for x in objs[0].ranges]\n self.data[\"range\"] = np.array(ranges) * 1000 # m => km\n self.data[\"height\"] = self.data[\"range\"] + self.site_meta[\"altitude\"]\n self.instrument = instruments.RADIOMETRICS"
}
] |
[
"function_empty",
"Development"
] |
[
"cloudnetpy.utils.append_data",
"cloudnetpy.instruments.radiometrics.RadiometricsCombined.__init__"
] |
Python
| 1 | 2 |
{
"total_num": 17,
"base_passed_num": 0
}
|
[
"cloudnetpy.cloudnetpy.utils.binvec",
"cloudnetpy.cloudnetpy.utils.rebin_2d",
"cloudnetpy.cloudnetpy.utils.rebin_1d"
] |
cloudnetpy
|
[
"cloudnetpy/utils.py",
"cloudnetpy/utils.py",
"cloudnetpy/utils.py"
] |
[
"tests/unit/test_utils.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 124,
"func_end_lineno": 140,
"func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 143,
"func_end_lineno": 192,
"func_code": "def rebin_2d(\n x_in: np.ndarray,\n array: ma.MaskedArray,\n x_new: np.ndarray,\n statistic: Literal[\"mean\", \"std\"] = \"mean\",\n n_min: int = 1,\n *,\n mask_zeros: bool = True,\n) -> tuple[ma.MaskedArray, list]:\n \"\"\"Rebins 2-D data in one dimension.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 2-D input data with shape (n, m).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n n_min: Minimum number of points to have good statistics in a bin. Default is 1.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n tuple: Rebinned data with shape (N, m) and indices of bins without enough data.\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros((len(x_new), array.shape[1]))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n for ind, values in enumerate(array_screened.T):\n mask = ~values.mask\n if ma.any(values[mask]):\n result[:, ind], _, _ = stats.binned_statistic(\n x_in[mask],\n values[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros is True:\n masked_result = ma.masked_equal(result, 0)\n else:\n masked_result = ma.array(result)\n\n # Fill bins with not enough profiles\n x_hist, _ = np.histogram(x_in, bins=edges)\n empty_mask = x_hist < n_min\n masked_result[empty_mask, :] = ma.masked\n empty_indices = list(np.nonzero(empty_mask)[0])\n if len(empty_indices) > 0:\n logging.debug(\"No data in %s bins\", len(empty_indices))\n\n return masked_result, empty_indices"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1151,
"func_start_lineno": 195,
"func_end_lineno": 231,
"func_code": "def rebin_1d(\n x_in: np.ndarray,\n array: np.ndarray | ma.MaskedArray,\n x_new: np.ndarray,\n statistic: str = \"mean\",\n *,\n mask_zeros: bool = True,\n) -> ma.MaskedArray:\n \"\"\"Rebins 1D array.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 1-D input data with shape (m,).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Re-binned data with shape (N,).\n\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros(len(x_new))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n mask = ~array_screened.mask\n if ma.any(array_screened[mask]):\n result, _, _ = stats.binned_statistic(\n x_in[mask],\n array_screened[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros:\n return ma.masked_equal(result, 0)\n return ma.array(result)"
}
] |
[
"function_empty"
] |
[
"cloudnetpy.utils.binvec",
"cloudnetpy.utils.rebin_2d",
"cloudnetpy.utils.rebin_1d"
] |
Python
| 3 | 3 |
{
"total_num": 160,
"base_passed_num": 151
}
|
[
"d3rlpy.d3rlpy.models.encoders.DefaultEncoderFactory::create",
"d3rlpy.d3rlpy.models.builders.create_discrete_q_function"
] |
d3rlpy
|
[
"d3rlpy/models/encoders.py",
"d3rlpy/models/builders.py"
] |
[
"tests_copy/envs/test_wrappers.py",
"tests_copy/models/test_builders.py"
] |
[
{
"class_start_lineno": 209,
"class_end_lineno": 265,
"func_start_lineno": 224,
"func_end_lineno": 238,
"func_code": " def create(self, observation_shape: Shape) -> Encoder:\n factory: Union[PixelEncoderFactory, VectorEncoderFactory]\n if len(observation_shape) == 3:\n factory = PixelEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n else:\n factory = VectorEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n return factory.create(observation_shape)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 403,
"func_start_lineno": 47,
"func_end_lineno": 82,
"func_code": "def create_discrete_q_function(\n observation_shape: Shape,\n action_size: int,\n encoder_factory: EncoderFactory,\n q_func_factory: QFunctionFactory,\n device: str,\n enable_ddp: bool,\n n_ensembles: int = 1,\n) -> tuple[nn.ModuleList, DiscreteEnsembleQFunctionForwarder]:\n if q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n # normalize gradient scale by ensemble size\n for p in cast(nn.Module, encoder).parameters():\n p.register_hook(lambda grad: grad / n_ensembles)\n\n q_funcs = []\n forwarders = []\n for _ in range(n_ensembles):\n if not q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n q_func, forwarder = q_func_factory.create_discrete(\n encoder, hidden_size, action_size\n )\n q_func.to(device)\n if enable_ddp:\n q_func = wrap_model_by_ddp(q_func)\n forwarder.set_q_func(q_func)\n q_funcs.append(q_func)\n forwarders.append(forwarder)\n q_func_modules = nn.ModuleList(q_funcs)\n ensemble_forwarder = DiscreteEnsembleQFunctionForwarder(\n forwarders, action_size\n )\n return q_func_modules, ensemble_forwarder"
}
] |
[
"Development"
] |
[
"d3rlpy.models.encoders.DefaultEncoderFactory.create",
"d3rlpy.models.builders.create_discrete_q_function"
] |
Python
| 0 | 2 |
{
"total_num": 42,
"base_passed_num": 15
}
|
[
"d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__",
"d3rlpy.d3rlpy.metrics.evaluators.make_batches",
"d3rlpy.d3rlpy.metrics.evaluators.TDErrorEvaluator::__call__",
"d3rlpy.d3rlpy.models.encoders.DefaultEncoderFactory::create",
"d3rlpy.d3rlpy.models.builders.create_discrete_q_function"
] |
d3rlpy
|
[
"d3rlpy/dataset/transition_pickers.py",
"d3rlpy/metrics/evaluators.py",
"d3rlpy/metrics/evaluators.py",
"d3rlpy/models/encoders.py",
"d3rlpy/models/builders.py"
] |
[
"tests_copy/metrics/test_evaluators.py"
] |
[
{
"class_start_lineno": 43,
"class_end_lineno": 72,
"func_start_lineno": 49,
"func_end_lineno": 72,
"func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 548,
"func_start_lineno": 52,
"func_end_lineno": 68,
"func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"
},
{
"class_start_lineno": 71,
"class_end_lineno": 121,
"func_start_lineno": 93,
"func_end_lineno": 121,
"func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_errors = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n # estimate values for current observations\n values = algo.predict_value(batch.observations, batch.actions)\n\n # estimate values for next observations\n next_actions = algo.predict(batch.next_observations)\n next_values = algo.predict_value(\n batch.next_observations, next_actions\n )\n\n # calculate td errors\n mask = (1.0 - batch.terminals).reshape(-1)\n rewards = np.asarray(batch.rewards).reshape(-1)\n if algo.reward_scaler:\n rewards = algo.reward_scaler.transform_numpy(rewards)\n y = rewards + algo.gamma * next_values * mask\n total_errors += ((values - y) ** 2).tolist()\n\n return float(np.mean(total_errors))"
},
{
"class_start_lineno": 209,
"class_end_lineno": 265,
"func_start_lineno": 224,
"func_end_lineno": 238,
"func_code": " def create(self, observation_shape: Shape) -> Encoder:\n factory: Union[PixelEncoderFactory, VectorEncoderFactory]\n if len(observation_shape) == 3:\n factory = PixelEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n else:\n factory = VectorEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n return factory.create(observation_shape)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 403,
"func_start_lineno": 47,
"func_end_lineno": 82,
"func_code": "def create_discrete_q_function(\n observation_shape: Shape,\n action_size: int,\n encoder_factory: EncoderFactory,\n q_func_factory: QFunctionFactory,\n device: str,\n enable_ddp: bool,\n n_ensembles: int = 1,\n) -> tuple[nn.ModuleList, DiscreteEnsembleQFunctionForwarder]:\n if q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n # normalize gradient scale by ensemble size\n for p in cast(nn.Module, encoder).parameters():\n p.register_hook(lambda grad: grad / n_ensembles)\n\n q_funcs = []\n forwarders = []\n for _ in range(n_ensembles):\n if not q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n q_func, forwarder = q_func_factory.create_discrete(\n encoder, hidden_size, action_size\n )\n q_func.to(device)\n if enable_ddp:\n q_func = wrap_model_by_ddp(q_func)\n forwarder.set_q_func(q_func)\n q_funcs.append(q_func)\n forwarders.append(forwarder)\n q_func_modules = nn.ModuleList(q_funcs)\n ensemble_forwarder = DiscreteEnsembleQFunctionForwarder(\n forwarders, action_size\n )\n return q_func_modules, ensemble_forwarder"
}
] |
[
"Development"
] |
[
"d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__",
"d3rlpy.metrics.evaluators.make_batches",
"d3rlpy.metrics.evaluators.TDErrorEvaluator.__call__",
"d3rlpy.models.encoders.DefaultEncoderFactory.create",
"d3rlpy.models.builders.create_discrete_q_function"
] |
Python
| 0 | 5 |
{
"total_num": 19,
"base_passed_num": 0
}
|
[
"d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function._gather_quantiles_by_indices",
"d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function._reduce_quantile_ensemble",
"d3rlpy.d3rlpy.models.torch.q_functions.mean_q_function.DiscreteMeanQFunctionForwarder::compute_error",
"d3rlpy.d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunctionForwarder::compute_error",
"d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function.compute_ensemble_q_function_error"
] |
d3rlpy
|
[
"d3rlpy/models/torch/q_functions/ensemble_q_function.py",
"d3rlpy/models/torch/q_functions/ensemble_q_function.py",
"d3rlpy/models/torch/q_functions/mean_q_function.py",
"d3rlpy/models/torch/q_functions/iqn_q_function.py",
"d3rlpy/models/torch/q_functions/ensemble_q_function.py",
"d3rlpy/models/torch/q_functions/ensemble_q_function.py"
] |
[
"tests_copy/models/torch/q_functions/test_ensemble_q_function.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 367,
"func_start_lineno": 35,
"func_end_lineno": 52,
"func_code": "def _gather_quantiles_by_indices(\n y: torch.Tensor, indices: torch.Tensor\n) -> torch.Tensor:\n # TODO: implement this in general case\n if y.dim() == 3:\n # (N, batch, n_quantiles) -> (batch, n_quantiles)\n return y.transpose(0, 1)[torch.arange(y.shape[1]), indices]\n elif y.dim() == 4:\n # (N, batch, action, n_quantiles) -> (batch, action, N, n_quantiles)\n transposed_y = y.transpose(0, 1).transpose(1, 2)\n # (batch, action, N, n_quantiles) -> (batch * action, N, n_quantiles)\n flat_y = transposed_y.reshape(-1, y.shape[0], y.shape[3])\n head_indices = torch.arange(y.shape[1] * y.shape[2])\n # (batch * action, N, n_quantiles) -> (batch * action, n_quantiles)\n gathered_y = flat_y[head_indices, indices.view(-1)]\n # (batch * action, n_quantiles) -> (batch, action, n_quantiles)\n return gathered_y.view(y.shape[1], y.shape[2], -1)\n raise ValueError"
},
{
"class_start_lineno": 1,
"class_end_lineno": 367,
"func_start_lineno": 55,
"func_end_lineno": 74,
"func_code": "def _reduce_quantile_ensemble(\n y: torch.Tensor, reduction: str = \"min\", dim: int = 0, lam: float = 0.75\n) -> torch.Tensor:\n # reduction beased on expectation\n mean = y.mean(dim=-1)\n if reduction == \"min\":\n indices = mean.min(dim=dim).indices\n return _gather_quantiles_by_indices(y, indices)\n elif reduction == \"max\":\n indices = mean.max(dim=dim).indices\n return _gather_quantiles_by_indices(y, indices)\n elif reduction == \"none\":\n return y\n elif reduction == \"mix\":\n min_indices = mean.min(dim=dim).indices\n max_indices = mean.max(dim=dim).indices\n min_values = _gather_quantiles_by_indices(y, min_indices)\n max_values = _gather_quantiles_by_indices(y, max_indices)\n return lam * min_values + (1.0 - lam) * max_values\n raise ValueError"
},
{
"class_start_lineno": 47,
"class_end_lineno": 86,
"func_start_lineno": 58,
"func_end_lineno": 74,
"func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n one_hot = F.one_hot(actions.view(-1), num_classes=self._action_size)\n value = (self._q_func(observations).q_value * one_hot.float()).sum(\n dim=1, keepdim=True\n )\n y = rewards + gamma * target * (1 - terminals)\n loss = compute_huber_loss(value, y)\n return compute_reduce(loss, reduction)"
},
{
"class_start_lineno": 122,
"class_end_lineno": 174,
"func_start_lineno": 133,
"func_end_lineno": 162,
"func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n batch_size = get_batch_size(observations)\n assert target.shape == (batch_size, self._n_quantiles)\n\n # extraect quantiles corresponding to act_t\n output = self._q_func(observations)\n taus = output.taus\n all_quantiles = output.quantiles\n assert taus is not None and all_quantiles is not None\n quantiles = pick_quantile_value_by_action(all_quantiles, actions)\n\n loss = compute_quantile_loss(\n quantiles=quantiles,\n rewards=rewards,\n target=target,\n terminals=terminals,\n taus=taus,\n gamma=gamma,\n )\n\n return compute_reduce(loss, reduction)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 367,
"func_start_lineno": 77,
"func_end_lineno": 109,
"func_code": "def compute_ensemble_q_function_error(\n forwarders: Union[\n Sequence[DiscreteQFunctionForwarder],\n Sequence[ContinuousQFunctionForwarder],\n ],\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n assert target.ndim == 2\n td_sum = torch.tensor(\n 0.0,\n dtype=torch.float32,\n device=get_device(observations),\n )\n for forwarder in forwarders:\n loss = forwarder.compute_error(\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n reduction=\"none\",\n )\n if masks is not None:\n loss = loss * masks\n td_sum += loss.mean()\n return td_sum"
},
{
"class_start_lineno": 150,
"class_end_lineno": 218,
"func_start_lineno": 179,
"func_end_lineno": 198,
"func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n return compute_ensemble_q_function_error(\n forwarders=self._forwarders,\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n masks=masks,\n )"
}
] |
[
"Development"
] |
[
"d3rlpy.models.torch.q_functions.ensemble_q_function._gather_quantiles_by_indices",
"d3rlpy.models.torch.q_functions.ensemble_q_function._reduce_quantile_ensemble",
"d3rlpy.models.torch.q_functions.mean_q_function.DiscreteMeanQFunctionForwarder.compute_error",
"d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunctionForwarder.compute_error",
"d3rlpy.models.torch.q_functions.ensemble_q_function.compute_ensemble_q_function_error",
"d3rlpy.models.torch.q_functions.ensemble_q_function.DiscreteEnsembleQFunctionForwarder.compute_error"
] |
Python
| 0 | 5 |
{
"total_num": 30,
"base_passed_num": 4
}
|
[
"d3rlpy.d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunction::forward",
"d3rlpy.d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunctionForwarder::compute_error"
] |
d3rlpy
|
[
"d3rlpy/models/torch/q_functions/iqn_q_function.py",
"d3rlpy/models/torch/q_functions/base.py",
"d3rlpy/models/torch/q_functions/iqn_q_function.py"
] |
[
"tests_copy/models/torch/q_functions/test_iqn_q_function.py"
] |
[
{
"class_start_lineno": 65,
"class_end_lineno": 119,
"func_start_lineno": 92,
"func_end_lineno": 115,
"func_code": " def forward(self, x: TorchObservation) -> QFunctionOutput:\n h = self._encoder(x)\n\n if self.training:\n n_quantiles = self._n_quantiles\n else:\n n_quantiles = self._n_greedy_quantiles\n taus = _make_taus(\n batch_size=get_batch_size(x),\n n_quantiles=n_quantiles,\n training=self.training,\n device=torch.device(get_device(x)),\n )\n\n # (batch, quantile, feature)\n prod = compute_iqn_feature(h, taus, self._embed, self._embed_size)\n # (batch, quantile, action) -> (batch, action, quantile)\n quantiles = self._fc(prod).transpose(1, 2)\n\n return QFunctionOutput(\n q_value=quantiles.mean(dim=2),\n quantiles=quantiles,\n taus=taus,\n )"
},
{
"class_start_lineno": null,
"class_end_lineno": null,
"func_start_lineno": null,
"func_end_lineno": null,
"func_code": "未找到 DiscreteIQNQFunction::__call__"
},
{
"class_start_lineno": 122,
"class_end_lineno": 174,
"func_start_lineno": 133,
"func_end_lineno": 162,
"func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n batch_size = get_batch_size(observations)\n assert target.shape == (batch_size, self._n_quantiles)\n\n # extraect quantiles corresponding to act_t\n output = self._q_func(observations)\n taus = output.taus\n all_quantiles = output.quantiles\n assert taus is not None and all_quantiles is not None\n quantiles = pick_quantile_value_by_action(all_quantiles, actions)\n\n loss = compute_quantile_loss(\n quantiles=quantiles,\n rewards=rewards,\n target=target,\n terminals=terminals,\n taus=taus,\n gamma=gamma,\n )\n\n return compute_reduce(loss, reduction)"
}
] |
[
"Development"
] |
[
"d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunction.forward",
"d3rlpy.models.torch.q_functions.base.DiscreteIQNQFunction.__call__",
"d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunctionForwarder.compute_error"
] |
Python
| 0 | 2 |
{
"total_num": 8,
"base_passed_num": 4
}
|
[
"datachain.src.datachain.lib.convert.python_to_sql.python_to_sql",
"datachain.src.datachain.lib.signal_schema.SignalSchema::get_column_type",
"datachain.src.datachain.lib.dc.DataChain::mutate",
"datachain.src.datachain.lib.signal_schema.SignalSchema::mutate"
] |
datachain
|
[
"datachain/lib/convert/python_to_sql.py",
"datachain/func/func.py",
"datachain/lib/signal_schema.py",
"datachain/func/func.py",
"datachain/lib/dc.py",
"datachain/lib/signal_schema.py"
] |
[
"tests/unit/test_func.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 117,
"func_start_lineno": 37,
"func_end_lineno": 82,
"func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")"
},
{
"class_start_lineno": 29,
"class_end_lineno": 422,
"func_start_lineno": 375,
"func_end_lineno": 422,
"func_code": " def get_column(\n self,\n signals_schema: Optional[\"SignalSchema\"] = None,\n label: Optional[str] = None,\n table: Optional[\"TableClause\"] = None,\n ) -> Column:\n col_type = self.get_result_type(signals_schema)\n sql_type = python_to_sql(col_type)\n\n def get_col(col: ColT, string_as_literal=False) -> ColT:\n # string_as_literal is used only for conditionals like `case()` where\n # literals are nested inside ColT as we have tuples of condition - values\n # and if user wants to set some case value as column, explicit `C(\"col\")`\n # syntax must be used to distinguish from literals\n if isinstance(col, tuple):\n return tuple(get_col(x, string_as_literal=True) for x in col)\n if isinstance(col, Func):\n return col.get_column(signals_schema, table=table)\n if isinstance(col, str) and not string_as_literal:\n column = Column(col, sql_type)\n column.table = table\n return column\n return col\n\n cols = [get_col(col) for col in self._db_cols]\n kwargs = {k: get_col(v, string_as_literal=True) for k, v in self.kwargs.items()}\n func_col = self.inner(*cols, *self.args, **kwargs)\n\n if self.is_window:\n if not self.window:\n raise DataChainParamsError(\n f\"Window function {self} requires over() clause with a window spec\",\n )\n func_col = func_col.over(\n partition_by=self.window.partition_by,\n order_by=(\n desc(self.window.order_by)\n if self.window.desc\n else self.window.order_by\n ),\n )\n\n func_col.type = sql_type() if inspect.isclass(sql_type) else sql_type\n\n if col_name := self.get_col_name(label):\n func_col = func_col.label(col_name)\n\n return func_col"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 464,
"func_end_lineno": 479,
"func_code": " def get_column_type(self, col_name: str, with_subtree: bool = False) -> DataType:\n \"\"\"\n Returns column type by column name.\n\n If `with_subtree` is True, then it will return the type of the column\n even if it has a subtree (e.g. model with nested fields), otherwise it will\n return the type of the column (standard type field, not the model).\n\n If column is not found, raises `SignalResolvingError`.\n \"\"\"\n for path, _type, has_subtree, _ in self.get_flat_tree():\n if (with_subtree or not has_subtree) and DEFAULT_DELIMITER.join(\n path\n ) == col_name:\n return _type\n raise SignalResolvingError([col_name], \"is not found\")"
},
{
"class_start_lineno": 1,
"class_end_lineno": 449,
"func_start_lineno": 425,
"func_end_lineno": 438,
"func_code": "def get_db_col_type(signals_schema: \"SignalSchema\", col: ColT) -> \"DataType\":\n if isinstance(col, tuple):\n # we can only get tuple from case statement where the first tuple item\n # is condition, and second one is value which type is important\n col = col[1]\n if isinstance(col, Func):\n return col.get_result_type(signals_schema)\n\n if isinstance(col, ColumnElement) and not hasattr(col, \"name\"):\n return sql_to_python(col)\n\n return signals_schema.get_column_type(\n col.name if isinstance(col, ColumnElement) else col # type: ignore[arg-type]\n )"
},
{
"class_start_lineno": 174,
"class_end_lineno": 2625,
"func_start_lineno": 1136,
"func_end_lineno": 1215,
"func_code": " def mutate(self, **kwargs) -> \"Self\":\n \"\"\"Create new signals based on existing signals.\n\n This method cannot modify existing columns. If you need to modify an\n existing column, use a different name for the new column and then use\n `select()` to choose which columns to keep.\n\n This method is vectorized and more efficient compared to map(), and it does not\n extract or download any data from the internal database. However, it can only\n utilize predefined built-in functions and their combinations.\n\n The supported functions:\n Numerical: +, -, *, /, rand(), avg(), count(), func(),\n greatest(), least(), max(), min(), sum()\n String: length(), split(), replace(), regexp_replace()\n Filename: name(), parent(), file_stem(), file_ext()\n Array: length(), sip_hash_64(), euclidean_distance(),\n cosine_distance()\n Window: row_number(), rank(), dense_rank(), first()\n\n Example:\n ```py\n dc.mutate(\n area=Column(\"image.height\") * Column(\"image.width\"),\n extension=file_ext(Column(\"file.name\")),\n dist=cosine_distance(embedding_text, embedding_image)\n )\n ```\n\n Window function example:\n ```py\n window = func.window(partition_by=\"file.parent\", order_by=\"file.size\")\n dc.mutate(\n row_number=func.row_number().over(window),\n )\n ```\n\n This method can be also used to rename signals. If the Column(\"name\") provided\n as value for the new signal - the old column will be dropped. Otherwise a new\n column is created.\n\n Example:\n ```py\n dc.mutate(\n newkey=Column(\"oldkey\")\n )\n ```\n \"\"\"\n primitives = (bool, str, int, float)\n\n for col_name, expr in kwargs.items():\n if not isinstance(expr, (*primitives, Column, Func)) and isinstance(\n expr.type, NullType\n ):\n raise DataChainColumnError(\n col_name, f\"Cannot infer type with expression {expr}\"\n )\n\n mutated = {}\n schema = self.signals_schema\n for name, value in kwargs.items():\n if isinstance(value, Column):\n # renaming existing column\n for signal in schema.db_signals(name=value.name, as_columns=True):\n mutated[signal.name.replace(value.name, name, 1)] = signal # type: ignore[union-attr]\n elif isinstance(value, Func):\n # adding new signal\n mutated[name] = value.get_column(schema)\n elif isinstance(value, primitives):\n # adding simple python constant primitives like str, int, float, bool\n val = literal(value)\n val.type = python_to_sql(type(value))()\n mutated[name] = val # type: ignore[assignment]\n else:\n # adding new signal\n mutated[name] = value\n\n return self._evolve(\n query=self._query.mutate(**mutated), signal_schema=schema.mutate(kwargs)\n )"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 557,
"func_end_lineno": 585,
"func_code": " def mutate(self, args_map: dict) -> \"SignalSchema\":\n new_values = self.values.copy()\n\n for name, value in args_map.items():\n if isinstance(value, Column) and value.name in self.values:\n # renaming existing signal\n del new_values[value.name]\n new_values[name] = self.values[value.name]\n continue\n if isinstance(value, Column):\n # adding new signal from existing signal field\n try:\n new_values[name] = self.get_column_type(\n value.name, with_subtree=True\n )\n continue\n except SignalResolvingError:\n pass\n if isinstance(value, Func):\n # adding new signal with function\n new_values[name] = value.get_result_type(self)\n continue\n if isinstance(value, ColumnElement):\n # adding new signal\n new_values[name] = sql_to_python(value)\n continue\n new_values[name] = value\n\n return SignalSchema(new_values)"
}
] |
[
"function_empty",
"Development"
] |
[
"datachain.lib.convert.python_to_sql.python_to_sql",
"datachain.func.func.Func.get_column",
"datachain.lib.signal_schema.SignalSchema.get_column_type",
"datachain.func.func.get_db_col_type",
"datachain.lib.dc.DataChain.mutate",
"datachain.lib.signal_schema.SignalSchema.mutate"
] |
Python
| 1 | 4 |
{
"total_num": 94,
"base_passed_num": 39
}
|
[
"datachain.src.datachain.query.session.Session::_cleanup_temp_datasets",
"datachain.src.datachain.query.session.Session::__exit__"
] |
datachain
|
[
"datachain/query/session.py",
"datachain/query/session.py"
] |
[
"tests/unit/test_listing.py",
"tests/unit/test_session.py"
] |
[
{
"class_start_lineno": 19,
"class_end_lineno": 195,
"func_start_lineno": 103,
"func_end_lineno": 110,
"func_code": " def _cleanup_temp_datasets(self) -> None:\n prefix = self.get_temp_prefix()\n try:\n for dataset in list(self.catalog.metastore.list_datasets_by_prefix(prefix)):\n self.catalog.remove_dataset(dataset.name, force=True)\n # suppress error when metastore has been reset during testing\n except TableMissingError:\n pass"
},
{
"class_start_lineno": 19,
"class_end_lineno": 195,
"func_start_lineno": 80,
"func_end_lineno": 90,
"func_code": " def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self._cleanup_created_versions()\n\n self._cleanup_temp_datasets()\n if self.is_new_catalog:\n self.catalog.metastore.close_on_exit()\n self.catalog.warehouse.close_on_exit()\n\n if Session.SESSION_CONTEXTS:\n Session.SESSION_CONTEXTS.pop()"
}
] |
[
"Development"
] |
[
"datachain.query.session.Session._cleanup_temp_datasets",
"datachain.query.session.Session.__exit__"
] |
Python
| 0 | 2 |
{
"total_num": 24,
"base_passed_num": 7
}
|
[
"datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree",
"datachain.src.datachain.lib.convert.python_to_sql.python_to_sql",
"datachain.src.datachain.lib.signal_schema.SignalSchema::db_signals",
"datachain.src.datachain.query.session.Session::_cleanup_temp_datasets",
"datachain.src.datachain.query.session.Session::__exit__"
] |
datachain
|
[
"datachain/lib/signal_schema.py",
"datachain/lib/signal_schema.py",
"datachain/lib/convert/python_to_sql.py",
"datachain/lib/signal_schema.py",
"datachain/query/session.py",
"datachain/query/session.py"
] |
[
"tests/unit/lib/test_arrow.py"
] |
[
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 630,
"func_end_lineno": 639,
"func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 627,
"func_end_lineno": 628,
"func_code": " def get_flat_tree(self) -> Iterator[tuple[list[str], DataType, bool, int]]:\n yield from self._get_flat_tree(self.tree, [], 0)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 117,
"func_start_lineno": 37,
"func_end_lineno": 82,
"func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 481,
"func_end_lineno": 503,
"func_code": " def db_signals(\n self, name: Optional[str] = None, as_columns=False\n ) -> Union[list[str], list[Column]]:\n \"\"\"\n Returns DB columns as strings or Column objects with proper types\n Optionally, it can filter results by specific object, returning only his signals\n \"\"\"\n signals = [\n DEFAULT_DELIMITER.join(path)\n if not as_columns\n else Column(DEFAULT_DELIMITER.join(path), python_to_sql(_type))\n for path, _type, has_subtree, _ in self.get_flat_tree()\n if not has_subtree\n ]\n\n if name:\n signals = [\n s\n for s in signals\n if str(s) == name or str(s).startswith(f\"{name}{DEFAULT_DELIMITER}\")\n ]\n\n return signals # type: ignore[return-value]"
},
{
"class_start_lineno": 19,
"class_end_lineno": 195,
"func_start_lineno": 103,
"func_end_lineno": 110,
"func_code": " def _cleanup_temp_datasets(self) -> None:\n prefix = self.get_temp_prefix()\n try:\n for dataset in list(self.catalog.metastore.list_datasets_by_prefix(prefix)):\n self.catalog.remove_dataset(dataset.name, force=True)\n # suppress error when metastore has been reset during testing\n except TableMissingError:\n pass"
},
{
"class_start_lineno": 19,
"class_end_lineno": 195,
"func_start_lineno": 80,
"func_end_lineno": 90,
"func_code": " def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self._cleanup_created_versions()\n\n self._cleanup_temp_datasets()\n if self.is_new_catalog:\n self.catalog.metastore.close_on_exit()\n self.catalog.warehouse.close_on_exit()\n\n if Session.SESSION_CONTEXTS:\n Session.SESSION_CONTEXTS.pop()"
}
] |
[
"function_empty",
"Development"
] |
[
"datachain.lib.signal_schema.SignalSchema._get_flat_tree",
"datachain.lib.signal_schema.SignalSchema.get_flat_tree",
"datachain.lib.convert.python_to_sql.python_to_sql",
"datachain.lib.signal_schema.SignalSchema.db_signals",
"datachain.query.session.Session._cleanup_temp_datasets",
"datachain.query.session.Session.__exit__"
] |
Python
| 1 | 5 |
{
"total_num": 32,
"base_passed_num": 31
}
|
[
"datachain.src.datachain.lib.image.convert_image",
"datachain.src.datachain.lib.image.convert_images"
] |
datachain
|
[
"datachain/lib/image.py",
"datachain/lib/image.py"
] |
[
"tests/unit/lib/test_clip.py",
"tests/unit/lib/test_image.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 81,
"func_start_lineno": 7,
"func_end_lineno": 46,
"func_code": "def convert_image(\n img: Image.Image,\n mode: str = \"RGB\",\n size: Optional[tuple[int, int]] = None,\n transform: Optional[Callable] = None,\n encoder: Optional[Callable] = None,\n device: Optional[Union[str, torch.device]] = None,\n) -> Union[Image.Image, torch.Tensor]:\n \"\"\"\n Resize, transform, and otherwise convert an image.\n\n Args:\n img (Image): PIL.Image object.\n mode (str): PIL.Image mode.\n size (tuple[int, int]): Size in (width, height) pixels for resizing.\n transform (Callable): Torchvision transform or huggingface processor to apply.\n encoder (Callable): Encode image using model.\n device (str or torch.device): Device to use.\n \"\"\"\n if mode:\n img = img.convert(mode)\n if size:\n img = img.resize(size)\n if transform:\n img = transform(img)\n\n try:\n from transformers.image_processing_utils import BaseImageProcessor\n\n if isinstance(transform, BaseImageProcessor):\n img = torch.as_tensor(img.pixel_values[0]).clone().detach() # type: ignore[assignment,attr-defined]\n except ImportError:\n pass\n if device:\n img = img.to(device) # type: ignore[attr-defined]\n if encoder:\n img = img.unsqueeze(0) # type: ignore[attr-defined]\n if encoder:\n img = encoder(img)\n return img"
},
{
"class_start_lineno": 1,
"class_end_lineno": 81,
"func_start_lineno": 49,
"func_end_lineno": 81,
"func_code": "def convert_images(\n images: Union[Image.Image, list[Image.Image]],\n mode: str = \"RGB\",\n size: Optional[tuple[int, int]] = None,\n transform: Optional[Callable] = None,\n encoder: Optional[Callable] = None,\n device: Optional[Union[str, torch.device]] = None,\n) -> Union[list[Image.Image], torch.Tensor]:\n \"\"\"\n Resize, transform, and otherwise convert one or more images.\n\n Args:\n images (Image, list[Image]): PIL.Image object or list of objects.\n mode (str): PIL.Image mode.\n size (tuple[int, int]): Size in (width, height) pixels for resizing.\n transform (Callable): Torchvision transform or huggingface processor to apply.\n encoder (Callable): Encode image using model.\n device (str or torch.device): Device to use.\n \"\"\"\n if isinstance(images, Image.Image):\n images = [images]\n\n converted = [\n convert_image(img, mode, size, transform, device=device) for img in images\n ]\n\n if isinstance(converted[0], torch.Tensor):\n converted = torch.stack(converted) # type: ignore[assignment,arg-type]\n\n if encoder:\n converted = encoder(converted)\n\n return converted # type: ignore[return-value]"
}
] |
[
"function_empty"
] |
[
"datachain.lib.image.convert_image",
"datachain.lib.image.convert_images"
] |
Python
| 2 | 2 |
{
"total_num": 41,
"base_passed_num": 13
}
|
[
"datachain.src.datachain.lib.file.File::get_destination_path",
"datachain.src.datachain.lib.file.File::export",
"datachain.src.datachain.lib.file.File::ensure_cached",
"datachain.src.datachain.lib.file.File::_symlink_to"
] |
datachain
|
[
"datachain/lib/file.py",
"datachain/lib/file.py",
"datachain/lib/file.py",
"datachain/lib/file.py"
] |
[
"tests/unit/lib/test_file.py"
] |
[
{
"class_start_lineno": 125,
"class_end_lineno": 468,
"func_start_lineno": 396,
"func_end_lineno": 414,
"func_code": " def get_destination_path(self, output: str, placement: ExportPlacement) -> str:\n \"\"\"\n Returns full destination path of a file for exporting to some output\n based on export placement\n \"\"\"\n if placement == \"filename\":\n path = unquote(self.name)\n elif placement == \"etag\":\n path = f\"{self.etag}{self.get_file_suffix()}\"\n elif placement == \"fullpath\":\n path = unquote(self.get_full_name())\n source = urlparse(self.source)\n if source.scheme and source.scheme != \"file\":\n path = posixpath.join(source.netloc, path)\n elif placement == \"checksum\":\n raise NotImplementedError(\"Checksum placement not implemented yet\")\n else:\n raise ValueError(f\"Unsupported file export placement: {placement}\")\n return posixpath.join(output, path) # type: ignore[union-attr]"
},
{
"class_start_lineno": 125,
"class_end_lineno": 468,
"func_start_lineno": 297,
"func_end_lineno": 319,
"func_code": " def export(\n self,\n output: str,\n placement: ExportPlacement = \"fullpath\",\n use_cache: bool = True,\n link_type: Literal[\"copy\", \"symlink\"] = \"copy\",\n ) -> None:\n \"\"\"Export file to new location.\"\"\"\n if use_cache:\n self._caching_enabled = use_cache\n dst = self.get_destination_path(output, placement)\n dst_dir = os.path.dirname(dst)\n client: Client = self._catalog.get_client(dst_dir)\n client.fs.makedirs(dst_dir, exist_ok=True)\n\n if link_type == \"symlink\":\n try:\n return self._symlink_to(dst)\n except OSError as exc:\n if exc.errno not in (errno.ENOTSUP, errno.EXDEV, errno.ENOSYS):\n raise\n\n self.save(dst)"
},
{
"class_start_lineno": 125,
"class_end_lineno": 468,
"func_start_lineno": 331,
"func_end_lineno": 337,
"func_code": " def ensure_cached(self) -> None:\n if self._catalog is None:\n raise RuntimeError(\n \"cannot download file to cache because catalog is not setup\"\n )\n client = self._catalog.get_client(self.source)\n client.download(self, callback=self._download_cb)"
},
{
"class_start_lineno": 125,
"class_end_lineno": 468,
"func_start_lineno": 282,
"func_end_lineno": 295,
"func_code": " def _symlink_to(self, destination: str):\n if self.location:\n raise OSError(errno.ENOTSUP, \"Symlinking virtual file is not supported\")\n\n if self._caching_enabled:\n self.ensure_cached()\n source = self.get_local_path()\n assert source, \"File was not cached\"\n elif self.source.startswith(\"file://\"):\n source = self.get_path()\n else:\n raise OSError(errno.EXDEV, \"can't link across filesystems\")\n\n return os.symlink(source, destination)"
}
] |
[
"function_empty",
"Development"
] |
[
"datachain.lib.file.File.get_destination_path",
"datachain.lib.file.File.export",
"datachain.lib.file.File.ensure_cached",
"datachain.lib.file.File._symlink_to"
] |
Python
| 1 | 4 |
{
"total_num": 33,
"base_passed_num": 14
}
|
[
"datachain.src.datachain.query.session.Session::_cleanup_temp_datasets",
"datachain.src.datachain.query.session.Session::__exit__",
"datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree",
"datachain.src.datachain.lib.convert.python_to_sql.python_to_sql",
"datachain.src.datachain.lib.signal_schema.SignalSchema::db_signals"
] |
datachain
|
[
"datachain/query/session.py",
"datachain/query/session.py",
"datachain/lib/signal_schema.py",
"datachain/lib/signal_schema.py",
"datachain/lib/convert/python_to_sql.py",
"datachain/lib/signal_schema.py"
] |
[
"tests/unit/lib/test_signal_schema.py"
] |
[
{
"class_start_lineno": 19,
"class_end_lineno": 195,
"func_start_lineno": 103,
"func_end_lineno": 110,
"func_code": " def _cleanup_temp_datasets(self) -> None:\n prefix = self.get_temp_prefix()\n try:\n for dataset in list(self.catalog.metastore.list_datasets_by_prefix(prefix)):\n self.catalog.remove_dataset(dataset.name, force=True)\n # suppress error when metastore has been reset during testing\n except TableMissingError:\n pass"
},
{
"class_start_lineno": 19,
"class_end_lineno": 195,
"func_start_lineno": 80,
"func_end_lineno": 90,
"func_code": " def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self._cleanup_created_versions()\n\n self._cleanup_temp_datasets()\n if self.is_new_catalog:\n self.catalog.metastore.close_on_exit()\n self.catalog.warehouse.close_on_exit()\n\n if Session.SESSION_CONTEXTS:\n Session.SESSION_CONTEXTS.pop()"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 630,
"func_end_lineno": 639,
"func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 627,
"func_end_lineno": 628,
"func_code": " def get_flat_tree(self) -> Iterator[tuple[list[str], DataType, bool, int]]:\n yield from self._get_flat_tree(self.tree, [], 0)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 117,
"func_start_lineno": 37,
"func_end_lineno": 82,
"func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")"
},
{
"class_start_lineno": 135,
"class_end_lineno": 751,
"func_start_lineno": 481,
"func_end_lineno": 503,
"func_code": " def db_signals(\n self, name: Optional[str] = None, as_columns=False\n ) -> Union[list[str], list[Column]]:\n \"\"\"\n Returns DB columns as strings or Column objects with proper types\n Optionally, it can filter results by specific object, returning only his signals\n \"\"\"\n signals = [\n DEFAULT_DELIMITER.join(path)\n if not as_columns\n else Column(DEFAULT_DELIMITER.join(path), python_to_sql(_type))\n for path, _type, has_subtree, _ in self.get_flat_tree()\n if not has_subtree\n ]\n\n if name:\n signals = [\n s\n for s in signals\n if str(s) == name or str(s).startswith(f\"{name}{DEFAULT_DELIMITER}\")\n ]\n\n return signals # type: ignore[return-value]"
}
] |
[
"function_empty",
"Development"
] |
[
"datachain.query.session.Session._cleanup_temp_datasets",
"datachain.query.session.Session.__exit__",
"datachain.lib.signal_schema.SignalSchema._get_flat_tree",
"datachain.lib.signal_schema.SignalSchema.get_flat_tree",
"datachain.lib.convert.python_to_sql.python_to_sql",
"datachain.lib.signal_schema.SignalSchema.db_signals"
] |
Python
| 1 | 5 |
{
"total_num": 58,
"base_passed_num": 36
}
|
[
"datachain.src.datachain.lib.webdataset.Builder::add",
"datachain.src.datachain.lib.webdataset.get_tar_groups"
] |
datachain
|
[
"datachain/lib/webdataset.py",
"datachain/lib/webdataset.py"
] |
[
"tests/unit/lib/test_webdataset.py"
] |
[
{
"class_start_lineno": 104,
"class_end_lineno": 194,
"func_start_lineno": 134,
"func_end_lineno": 171,
"func_code": " def add(self, file: tarfile.TarInfo):\n fstream = File(path=file.name)\n ext = fstream.get_file_ext()\n stem = fstream.get_file_stem()\n\n if self.state.stem is not None and self.state.stem != stem:\n raise StopIteration\n\n if self.state.stem is None:\n self.state.stem = stem\n\n if ext in self._core_extensions:\n if self.state.core_file is not None:\n raise CoreFileDuplicationError(\n self._tar_stream, file.name, self.state.core_file.name\n )\n self.state.core_file = file\n elif ext in self.state.data:\n raise WDSError(\n self._tar_stream,\n f\"file with extension '.{ext}' already exists in the archive\",\n )\n else:\n type_ = self._get_type(ext)\n if type_ is None:\n raise UnknownFileExtensionError(self._tar_stream, fstream.name, ext)\n\n if issubclass(type_, WDSReadableSubclass):\n reader = type_._reader\n else:\n reader = self.DEFAULT_TYPES_READERS.get(type_, None)\n\n if reader is None:\n raise WDSError(\n self._tar_stream,\n f\"unable to find a reader for type {type_}, extension .{ext}\",\n )\n self.state.data[ext] = reader(self, file)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 220,
"func_start_lineno": 197,
"func_end_lineno": 209,
"func_code": "def get_tar_groups(stream, tar, core_extensions, spec, encoding=\"utf-8\"):\n builder = Builder(stream, core_extensions, spec, tar, encoding)\n\n for item in sorted(tar.getmembers(), key=lambda m: Path(m.name).stem):\n if not item.isfile():\n continue\n try:\n builder.add(item)\n except StopIteration:\n yield builder.produce()\n builder.add(item)\n if builder.state.stem is not None:\n yield builder.produce()"
}
] |
[
"Development"
] |
[
"datachain.lib.webdataset.Builder.add",
"datachain.lib.webdataset.get_tar_groups"
] |
Python
| 0 | 2 |
{
"total_num": 7,
"base_passed_num": 0
}
|
[
"datachain.src.datachain.func.conditional.case",
"datachain.src.datachain.func.conditional.ifelse"
] |
datachain
|
[
"datachain/func/conditional.py",
"datachain/func/conditional.py"
] |
[
"tests/unit/sql/test_conditional.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 270,
"func_start_lineno": 93,
"func_end_lineno": 158,
"func_code": "def case(\n *args: tuple[Union[ColumnElement, Func, bool], CaseT], else_: Optional[CaseT] = None\n) -> Func:\n \"\"\"\n Returns the case function that produces case expression which has a list of\n conditions and corresponding results. Results can be python primitives like string,\n numbers or booleans but can also be other nested functions (including case function)\n or columns.\n Result type is inferred from condition results.\n\n Args:\n args tuple((ColumnElement | Func | bool),(str | int | float | complex | bool, Func, ColumnElement)):\n Tuple of condition and values pair.\n else_ (str | int | float | complex | bool, Func): optional else value in case\n expression. If omitted, and no case conditions are satisfied, the result\n will be None (NULL in DB).\n\n Returns:\n Func: A Func object that represents the case function.\n\n Example:\n ```py\n dc.mutate(\n res=func.case((C(\"num\") > 0, \"P\"), (C(\"num\") < 0, \"N\"), else_=\"Z\"),\n )\n ```\n \"\"\" # noqa: E501\n supported_types = [int, float, complex, str, bool]\n\n def _get_type(val):\n from enum import Enum\n\n if isinstance(val, Func):\n # nested functions\n return val.result_type\n if isinstance(val, Column):\n # at this point we cannot know what is the type of a column\n return None\n if isinstance(val, Enum):\n return type(val.value)\n return type(val)\n\n if not args:\n raise DataChainParamsError(\"Missing statements\")\n\n type_ = _get_type(else_) if else_ is not None else None\n\n for arg in args:\n arg_type = _get_type(arg[1])\n if arg_type is None:\n # we couldn't figure out the type of case value\n continue\n if type_ and arg_type != type_:\n raise DataChainParamsError(\n f\"Statement values must be of the same type, got {type_} and {arg_type}\"\n )\n type_ = arg_type\n\n if type_ is not None and type_ not in supported_types:\n raise DataChainParamsError(\n f\"Only python literals ({supported_types}) are supported for values\"\n )\n\n kwargs = {\"else_\": else_}\n\n return Func(\"case\", inner=sql_case, cols=args, kwargs=kwargs, result_type=type_)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 270,
"func_start_lineno": 161,
"func_end_lineno": 187,
"func_code": "def ifelse(\n condition: Union[ColumnElement, Func], if_val: CaseT, else_val: CaseT\n) -> Func:\n \"\"\"\n Returns the ifelse function that produces if expression which has a condition\n and values for true and false outcome. Results can be one of python primitives\n like string, numbers or booleans, but can also be nested functions or columns.\n Result type is inferred from the values.\n\n Args:\n condition (ColumnElement, Func): Condition which is evaluated.\n if_val (str | int | float | complex | bool, Func, ColumnElement): Value for true\n condition outcome.\n else_val (str | int | float | complex | bool, Func, ColumnElement): Value for\n false condition outcome.\n\n Returns:\n Func: A Func object that represents the ifelse function.\n\n Example:\n ```py\n dc.mutate(\n res=func.ifelse(isnone(\"col\"), \"EMPTY\", \"NOT_EMPTY\")\n )\n ```\n \"\"\"\n return case((condition, if_val), else_=else_val)"
}
] |
[
"function_empty",
"Development"
] |
[
"datachain.func.conditional.case",
"datachain.func.conditional.ifelse"
] |
Python
| 1 | 2 |
{
"total_num": 34,
"base_passed_num": 2
}
|
[
"haystack.haystack.components.builders.prompt_builder.PromptBuilder::_validate_variables",
"haystack.haystack.components.builders.prompt_builder.PromptBuilder::run",
"haystack.haystack.core.type_utils._strict_types_are_compatible",
"haystack.haystack.core.type_utils._types_are_compatible"
] |
haystack
|
[
"haystack/components/builders/prompt_builder.py",
"haystack/components/builders/prompt_builder.py",
"haystack/core/type_utils.py",
"haystack/core/type_utils.py"
] |
[
"test/components/builders/test_prompt_builder.py"
] |
[
{
"class_start_lineno": 17,
"class_end_lineno": 266,
"func_start_lineno": 247,
"func_end_lineno": 266,
"func_code": " def _validate_variables(self, provided_variables: Set[str]):\n \"\"\"\n Checks if all the required template variables are provided.\n\n :param provided_variables:\n A set of provided template variables.\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n if self.required_variables == \"*\":\n required_variables = sorted(self.variables)\n else:\n required_variables = self.required_variables\n missing_variables = [var for var in required_variables if var not in provided_variables]\n if missing_variables:\n missing_vars_str = \", \".join(missing_variables)\n raise ValueError(\n f\"Missing required input variables in PromptBuilder: {missing_vars_str}. \"\n f\"Required variables: {required_variables}. Provided variables: {provided_variables}.\"\n )"
},
{
"class_start_lineno": 17,
"class_end_lineno": 266,
"func_start_lineno": 213,
"func_end_lineno": 245,
"func_code": " def run(self, template: Optional[str] = None, template_variables: Optional[Dict[str, Any]] = None, **kwargs):\n \"\"\"\n Renders the prompt template with the provided variables.\n\n It applies the template variables to render the final prompt. You can provide variables via pipeline kwargs.\n In order to overwrite the default template, you can set the `template` parameter.\n In order to overwrite pipeline kwargs, you can set the `template_variables` parameter.\n\n :param template:\n An optional string template to overwrite PromptBuilder's default template. If None, the default template\n provided at initialization is used.\n :param template_variables:\n An optional dictionary of template variables to overwrite the pipeline variables.\n :param kwargs:\n Pipeline variables used for rendering the prompt.\n\n :returns: A dictionary with the following keys:\n - `prompt`: The updated prompt text after rendering the prompt template.\n\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n kwargs = kwargs or {}\n template_variables = template_variables or {}\n template_variables_combined = {**kwargs, **template_variables}\n self._validate_variables(set(template_variables_combined.keys()))\n\n compiled_template = self.template\n if template is not None:\n compiled_template = self._env.from_string(template)\n\n result = compiled_template.render(template_variables_combined)\n return {\"prompt\": result}"
},
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 29,
"func_end_lineno": 76,
"func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))"
},
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 14,
"func_end_lineno": 26,
"func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True"
}
] |
[
"function_empty"
] |
[
"haystack.components.builders.prompt_builder.PromptBuilder._validate_variables",
"haystack.components.builders.prompt_builder.PromptBuilder.run",
"haystack.core.type_utils._strict_types_are_compatible",
"haystack.core.type_utils._types_are_compatible"
] |
Python
| 4 | 4 |
{
"total_num": 29,
"base_passed_num": 7
}
|
[
"haystack.haystack.core.type_utils._strict_types_are_compatible",
"haystack.haystack.core.type_utils._types_are_compatible",
"haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict",
"haystack.haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever::to_dict",
"haystack.haystack.core.serialization.component_to_dict"
] |
haystack
|
[
"haystack/core/type_utils.py",
"haystack/core/type_utils.py",
"haystack/document_stores/in_memory/document_store.py",
"haystack/components/retrievers/in_memory/bm25_retriever.py",
"haystack/core/serialization.py"
] |
[
"test/components/classifiers/test_zero_shot_document_classifier.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 29,
"func_end_lineno": 76,
"func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))"
},
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 14,
"func_end_lineno": 26,
"func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True"
},
{
"class_start_lineno": 58,
"class_end_lineno": 738,
"func_start_lineno": 344,
"func_end_lineno": 358,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )"
},
{
"class_start_lineno": 13,
"class_end_lineno": 203,
"func_start_lineno": 88,
"func_end_lineno": 103,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self,\n document_store=docstore,\n filters=self.filters,\n top_k=self.top_k,\n scale_score=self.scale_score,\n filter_policy=self.filter_policy.value,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.core.type_utils._strict_types_are_compatible",
"haystack.core.type_utils._types_are_compatible",
"haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict",
"haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever.to_dict",
"haystack.core.serialization.component_to_dict"
] |
Python
| 4 | 5 |
{
"total_num": 10,
"base_passed_num": 9
}
|
[
"haystack.haystack.core.serialization.default_to_dict",
"haystack.haystack.core.serialization.component_to_dict"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/components/connectors/openapi_service.py",
"haystack/core/serialization.py"
] |
[
"test/components/connectors/test_openapi_service.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 172,
"func_end_lineno": 210,
"func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"
},
{
"class_start_lineno": 149,
"class_end_lineno": 399,
"func_start_lineno": 265,
"func_end_lineno": 272,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(self, ssl_verify=self.ssl_verify)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.core.serialization.default_to_dict",
"haystack.components.connectors.openapi_service.OpenAPIServiceConnector.to_dict",
"haystack.core.serialization.component_to_dict"
] |
Python
| 1 | 2 |
{
"total_num": 12,
"base_passed_num": 10
}
|
[
"haystack.haystack.core.serialization.default_to_dict",
"haystack.haystack.components.converters.json.JSONConverter::to_dict",
"haystack.haystack.components.converters.utils.normalize_metadata",
"haystack.haystack.components.converters.json.JSONConverter::run"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/components/converters/json.py",
"haystack/components/converters/utils.py",
"haystack/components/converters/json.py"
] |
[
"test/components/converters/test_json.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 172,
"func_end_lineno": 210,
"func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"
},
{
"class_start_lineno": 22,
"class_end_lineno": 291,
"func_start_lineno": 152,
"func_end_lineno": 165,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n jq_schema=self._jq_schema,\n content_key=self._content_key,\n extra_meta_fields=self._meta_fields,\n store_full_path=self._store_full_path,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 51,
"func_start_lineno": 30,
"func_end_lineno": 51,
"func_code": "def normalize_metadata(\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], sources_count: int\n) -> List[Dict[str, Any]]:\n \"\"\"\n Normalize the metadata input for a converter.\n\n Given all the possible value of the meta input for a converter (None, dictionary or list of dicts),\n makes sure to return a list of dictionaries of the correct length for the converter to use.\n\n :param meta: the meta input of the converter, as-is\n :param sources_count: the number of sources the converter received\n :returns: a list of dictionaries of the make length as the sources list\n \"\"\"\n if meta is None:\n return [{}] * sources_count\n if isinstance(meta, dict):\n return [meta] * sources_count\n if isinstance(meta, list):\n if sources_count != len(meta):\n raise ValueError(\"The length of the metadata list must match the number of sources.\")\n return meta\n raise ValueError(\"meta must be either None, a dictionary or a list of dictionaries.\")"
},
{
"class_start_lineno": 22,
"class_end_lineno": 291,
"func_start_lineno": 250,
"func_end_lineno": 291,
"func_code": " def run(\n self,\n sources: List[Union[str, Path, ByteStream]],\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n ):\n \"\"\"\n Converts a list of JSON files to documents.\n\n :param sources:\n A list of file paths or ByteStream objects.\n :param meta:\n Optional metadata to attach to the documents.\n This value can be either a list of dictionaries or a single dictionary.\n If it's a single dictionary, its content is added to the metadata of all produced documents.\n If it's a list, the length of the list must match the number of sources.\n If `sources` contain ByteStream objects, their `meta` will be added to the output documents.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of created documents.\n \"\"\"\n documents = []\n meta_list = normalize_metadata(meta=meta, sources_count=len(sources))\n\n for source, metadata in zip(sources, meta_list):\n try:\n bytestream = get_bytestream_from_source(source)\n except Exception as exc:\n logger.warning(\"Could not read {source}. Skipping it. Error: {error}\", source=source, error=exc)\n continue\n\n data = self._get_content_and_meta(bytestream)\n\n for text, extra_meta in data:\n merged_metadata = {**bytestream.meta, **metadata, **extra_meta}\n\n if not self._store_full_path and (file_path := bytestream.meta.get(\"file_path\")):\n merged_metadata[\"file_path\"] = os.path.basename(file_path)\n document = Document(content=text, meta=merged_metadata)\n documents.append(document)\n\n return {\"documents\": documents}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.core.serialization.default_to_dict",
"haystack.components.converters.json.JSONConverter.to_dict",
"haystack.components.converters.utils.normalize_metadata",
"haystack.components.converters.json.JSONConverter.run"
] |
Python
| 3 | 4 |
{
"total_num": 19,
"base_passed_num": 5
}
|
[
"haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_openapi_spec",
"haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::run",
"haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_property_attributes",
"haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_endpoint_spec",
"haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_openapi_to_functions"
] |
haystack
|
[
"haystack/components/converters/openapi_functions.py",
"haystack/components/converters/openapi_functions.py",
"haystack/components/converters/openapi_functions.py",
"haystack/components/converters/openapi_functions.py",
"haystack/components/converters/openapi_functions.py"
] |
[
"test/components/converters/test_openapi_functions.py"
] |
[
{
"class_start_lineno": 23,
"class_end_lineno": 257,
"func_start_lineno": 232,
"func_end_lineno": 257,
"func_code": " def _parse_openapi_spec(self, content: str) -> Dict[str, Any]:\n \"\"\"\n Parses OpenAPI specification content, supporting both JSON and YAML formats.\n\n :param content: The content of the OpenAPI specification.\n :return: The parsed OpenAPI specification.\n \"\"\"\n open_api_spec_content = None\n try:\n open_api_spec_content = json.loads(content)\n return jsonref.replace_refs(open_api_spec_content)\n except json.JSONDecodeError as json_error:\n # heuristic to confirm that the content is likely malformed JSON\n if content.strip().startswith((\"{\", \"[\")):\n raise json_error\n\n try:\n open_api_spec_content = yaml.safe_load(content)\n except yaml.YAMLError:\n error_message = (\n \"Failed to parse the OpenAPI specification. The content does not appear to be valid JSON or YAML.\\n\\n\"\n )\n raise RuntimeError(error_message, content)\n\n # Replace references in the object with their resolved values, if any\n return jsonref.replace_refs(open_api_spec_content)"
},
{
"class_start_lineno": 23,
"class_end_lineno": 257,
"func_start_lineno": 56,
"func_end_lineno": 115,
"func_code": " def run(self, sources: List[Union[str, Path, ByteStream]]) -> Dict[str, Any]:\n \"\"\"\n Converts OpenAPI definitions in OpenAI function calling format.\n\n :param sources:\n File paths or ByteStream objects of OpenAPI definitions (in JSON or YAML format).\n\n :returns:\n A dictionary with the following keys:\n - functions: Function definitions in JSON object format\n - openapi_specs: OpenAPI specs in JSON/YAML object format with resolved references\n\n :raises RuntimeError:\n If the OpenAPI definitions cannot be downloaded or processed.\n :raises ValueError:\n If the source type is not recognized or no functions are found in the OpenAPI definitions.\n \"\"\"\n all_extracted_fc_definitions: List[Dict[str, Any]] = []\n all_openapi_specs = []\n for source in sources:\n openapi_spec_content = None\n if isinstance(source, (str, Path)):\n if os.path.exists(source):\n try:\n with open(source, \"r\") as f:\n openapi_spec_content = f.read()\n except IOError as e:\n logger.warning(\n \"IO error reading OpenAPI specification file: {source}. Error: {e}\", source=source, e=e\n )\n else:\n logger.warning(f\"OpenAPI specification file not found: {source}\")\n elif isinstance(source, ByteStream):\n openapi_spec_content = source.data.decode(\"utf-8\")\n if not openapi_spec_content:\n logger.warning(\n \"Invalid OpenAPI specification content provided: {openapi_spec_content}\",\n openapi_spec_content=openapi_spec_content,\n )\n else:\n logger.warning(\n \"Invalid source type {source}. Only str, Path, and ByteStream are supported.\", source=type(source)\n )\n continue\n\n if openapi_spec_content:\n try:\n service_openapi_spec = self._parse_openapi_spec(openapi_spec_content)\n functions: List[Dict[str, Any]] = self._openapi_to_functions(service_openapi_spec)\n all_extracted_fc_definitions.extend(functions)\n all_openapi_specs.append(service_openapi_spec)\n except Exception as e:\n logger.error(\n \"Error processing OpenAPI specification from source {source}: {error}\", source=source, error=e\n )\n\n if not all_extracted_fc_definitions:\n logger.warning(\"No OpenAI function definitions extracted from the provided OpenAPI specification sources.\")\n\n return {\"functions\": all_extracted_fc_definitions, \"openapi_specs\": all_openapi_specs}"
},
{
"class_start_lineno": 23,
"class_end_lineno": 257,
"func_start_lineno": 193,
"func_end_lineno": 230,
"func_code": " def _parse_property_attributes(\n self, property_schema: Dict[str, Any], include_attributes: Optional[List[str]] = None\n ) -> Dict[str, Any]:\n \"\"\"\n Parses the attributes of a property schema.\n\n Recursively parses the attributes of a property schema, including nested objects and arrays,\n and includes specified attributes like description, pattern, etc.\n\n :param property_schema: The schema of the property to parse.\n :param include_attributes: The list of attributes to include in the parsed schema.\n :return: The parsed schema of the property including the specified attributes.\n \"\"\"\n include_attributes = include_attributes or [\"description\", \"pattern\", \"enum\"]\n\n schema_type = property_schema.get(\"type\")\n\n parsed_schema = {\"type\": schema_type} if schema_type else {}\n for attr in include_attributes:\n if attr in property_schema:\n parsed_schema[attr] = property_schema[attr]\n\n if schema_type == \"object\":\n properties = property_schema.get(\"properties\", {})\n parsed_properties = {\n prop_name: self._parse_property_attributes(prop, include_attributes)\n for prop_name, prop in properties.items()\n }\n parsed_schema[\"properties\"] = parsed_properties\n\n if \"required\" in property_schema:\n parsed_schema[\"required\"] = property_schema[\"required\"]\n\n elif schema_type == \"array\":\n items = property_schema.get(\"items\", {})\n parsed_schema[\"items\"] = self._parse_property_attributes(items, include_attributes)\n\n return parsed_schema"
},
{
"class_start_lineno": 23,
"class_end_lineno": 257,
"func_start_lineno": 153,
"func_end_lineno": 191,
"func_code": " def _parse_endpoint_spec(self, resolved_spec: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n if not isinstance(resolved_spec, dict):\n logger.warning(\"Invalid OpenAPI spec format provided. Could not extract function.\")\n return {}\n\n function_name = resolved_spec.get(\"operationId\")\n description = resolved_spec.get(\"description\") or resolved_spec.get(\"summary\", \"\")\n\n schema: Dict[str, Any] = {\"type\": \"object\", \"properties\": {}}\n\n # requestBody section\n req_body_schema = (\n resolved_spec.get(\"requestBody\", {}).get(\"content\", {}).get(\"application/json\", {}).get(\"schema\", {})\n )\n if \"properties\" in req_body_schema:\n for prop_name, prop_schema in req_body_schema[\"properties\"].items():\n schema[\"properties\"][prop_name] = self._parse_property_attributes(prop_schema)\n\n if \"required\" in req_body_schema:\n schema.setdefault(\"required\", []).extend(req_body_schema[\"required\"])\n\n # parameters section\n for param in resolved_spec.get(\"parameters\", []):\n if \"schema\" in param:\n schema_dict = self._parse_property_attributes(param[\"schema\"])\n # these attributes are not in param[schema] level but on param level\n useful_attributes = [\"description\", \"pattern\", \"enum\"]\n schema_dict.update({key: param[key] for key in useful_attributes if param.get(key)})\n schema[\"properties\"][param[\"name\"]] = schema_dict\n if param.get(\"required\", False):\n schema.setdefault(\"required\", []).append(param[\"name\"])\n\n if function_name and description and schema[\"properties\"]:\n return {\"name\": function_name, \"description\": description, \"parameters\": schema}\n else:\n logger.warning(\n \"Invalid OpenAPI spec format provided. Could not extract function from {spec}\", spec=resolved_spec\n )\n return {}"
},
{
"class_start_lineno": 23,
"class_end_lineno": 257,
"func_start_lineno": 117,
"func_end_lineno": 151,
"func_code": " def _openapi_to_functions(self, service_openapi_spec: Dict[str, Any]) -> List[Dict[str, Any]]:\n \"\"\"\n OpenAPI to OpenAI function conversion.\n\n Extracts functions from the OpenAPI specification of the service and converts them into a format\n suitable for OpenAI function calling.\n\n :param service_openapi_spec: The OpenAPI specification from which functions are to be extracted.\n :type service_openapi_spec: Dict[str, Any]\n :return: A list of dictionaries, each representing a function. Each dictionary includes the function's\n name, description, and a schema of its parameters.\n :rtype: List[Dict[str, Any]]\n \"\"\"\n\n # Doesn't enforce rigid spec validation because that would require a lot of dependencies\n # We check the version and require minimal fields to be present, so we can extract functions\n spec_version = service_openapi_spec.get(\"openapi\")\n if not spec_version:\n raise ValueError(f\"Invalid OpenAPI spec provided. Could not extract version from {service_openapi_spec}\")\n service_openapi_spec_version = int(spec_version.split(\".\")[0])\n\n # Compare the versions\n if service_openapi_spec_version < OpenAPIServiceToFunctions.MIN_REQUIRED_OPENAPI_SPEC_VERSION:\n raise ValueError(\n f\"Invalid OpenAPI spec version {service_openapi_spec_version}. Must be \"\n f\"at least {OpenAPIServiceToFunctions.MIN_REQUIRED_OPENAPI_SPEC_VERSION}.\"\n )\n\n functions: List[Dict[str, Any]] = []\n for paths in service_openapi_spec[\"paths\"].values():\n for path_spec in paths.values():\n function_dict = self._parse_endpoint_spec(path_spec)\n if function_dict:\n functions.append(function_dict)\n return functions"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_openapi_spec",
"haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions.run",
"haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_property_attributes",
"haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_endpoint_spec",
"haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._openapi_to_functions"
] |
Python
| 3 | 5 |
{
"total_num": 8,
"base_passed_num": 0
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.converters.output_adapter.OutputAdapter::to_dict",
"haystack.haystack.utils.type_serialization.thread_safe_import",
"haystack.haystack.utils.callable_serialization.deserialize_callable"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/converters/output_adapter.py",
"haystack/utils/type_serialization.py",
"haystack/utils/callable_serialization.py"
] |
[
"test/components/converters/test_output_adapter.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 25,
"class_end_lineno": 184,
"func_start_lineno": 139,
"func_end_lineno": 153,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n se_filters = {name: serialize_callable(filter_func) for name, filter_func in self.custom_filters.items()}\n return default_to_dict(\n self,\n template=self.template,\n output_type=serialize_type(self.output_type),\n custom_filters=se_filters,\n unsafe=self._unsafe,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 159,
"func_end_lineno": 170,
"func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 45,
"func_end_lineno": 80,
"func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.converters.output_adapter.OutputAdapter.to_dict",
"haystack.utils.type_serialization.thread_safe_import",
"haystack.utils.callable_serialization.deserialize_callable"
] |
Python
| 3 | 4 |
{
"total_num": 14,
"base_passed_num": 9
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.embedders.azure_document_embedder.AzureOpenAIDocumentEmbedder::to_dict",
"haystack.haystack.utils.type_serialization.thread_safe_import",
"haystack.haystack.utils.callable_serialization.deserialize_callable"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/embedders/azure_document_embedder.py",
"haystack/utils/type_serialization.py",
"haystack/utils/callable_serialization.py"
] |
[
"test/components/embedders/test_azure_document_embedder.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 20,
"class_end_lineno": 281,
"func_start_lineno": 154,
"func_end_lineno": 183,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 159,
"func_end_lineno": 170,
"func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 45,
"func_end_lineno": 80,
"func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.embedders.azure_document_embedder.AzureOpenAIDocumentEmbedder.to_dict",
"haystack.utils.type_serialization.thread_safe_import",
"haystack.utils.callable_serialization.deserialize_callable"
] |
Python
| 3 | 4 |
{
"total_num": 6,
"base_passed_num": 3
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.embedders.azure_text_embedder.AzureOpenAITextEmbedder::to_dict",
"haystack.haystack.utils.type_serialization.thread_safe_import",
"haystack.haystack.utils.callable_serialization.deserialize_callable"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/embedders/azure_text_embedder.py",
"haystack/utils/type_serialization.py",
"haystack/utils/callable_serialization.py"
] |
[
"test/components/embedders/test_azure_text_embedder.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 15,
"class_end_lineno": 216,
"func_start_lineno": 136,
"func_end_lineno": 161,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 159,
"func_end_lineno": 170,
"func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 45,
"func_end_lineno": 80,
"func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.embedders.azure_text_embedder.AzureOpenAITextEmbedder.to_dict",
"haystack.utils.type_serialization.thread_safe_import",
"haystack.utils.callable_serialization.deserialize_callable"
] |
Python
| 3 | 4 |
{
"total_num": 5,
"base_passed_num": 2
}
|
[
"haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::_embed_batch",
"haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::run"
] |
haystack
|
[
"haystack/components/embedders/hugging_face_api_document_embedder.py",
"haystack/components/embedders/hugging_face_api_document_embedder.py"
] |
[
"test/components/embedders/test_hugging_face_api_document_embedder.py"
] |
[
{
"class_start_lineno": 24,
"class_end_lineno": 298,
"func_start_lineno": 236,
"func_end_lineno": 271,
"func_code": " def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> List[List[float]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n truncate = self.truncate\n normalize = self.normalize\n\n if self.api_type == HFEmbeddingAPIType.SERVERLESS_INFERENCE_API:\n if truncate is not None:\n msg = \"`truncate` parameter is not supported for Serverless Inference API. It will be ignored.\"\n warnings.warn(msg)\n truncate = None\n if normalize is not None:\n msg = \"`normalize` parameter is not supported for Serverless Inference API. It will be ignored.\"\n warnings.warn(msg)\n normalize = None\n\n all_embeddings = []\n for i in tqdm(\n range(0, len(texts_to_embed), batch_size), disable=not self.progress_bar, desc=\"Calculating embeddings\"\n ):\n batch = texts_to_embed[i : i + batch_size]\n\n np_embeddings = self._client.feature_extraction(\n # this method does not officially support list of strings, but works as expected\n text=batch, # type: ignore[arg-type]\n truncate=truncate,\n normalize=normalize,\n )\n\n if np_embeddings.ndim != 2 or np_embeddings.shape[0] != len(batch):\n raise ValueError(f\"Expected embedding shape ({batch_size}, embedding_dim), got {np_embeddings.shape}\")\n\n all_embeddings.extend(np_embeddings.tolist())\n\n return all_embeddings"
},
{
"class_start_lineno": 24,
"class_end_lineno": 298,
"func_start_lineno": 274,
"func_end_lineno": 298,
"func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Embeds a list of documents.\n\n :param documents:\n Documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of documents with embeddings.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\n \"HuggingFaceAPIDocumentEmbedder expects a list of Documents as input.\"\n \" In case you want to embed a string, please use the HuggingFaceAPITextEmbedder.\"\n )\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n\n embeddings = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder._embed_batch",
"haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder.run"
] |
Python
| 1 | 2 |
{
"total_num": 17,
"base_passed_num": 12
}
|
[
"haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::_prepare_texts_to_embed",
"haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::_embed_batch",
"haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::run"
] |
haystack
|
[
"haystack/components/embedders/openai_document_embedder.py",
"haystack/components/embedders/openai_document_embedder.py",
"haystack/components/embedders/openai_document_embedder.py"
] |
[
"test/components/embedders/test_openai_document_embedder.py"
] |
[
{
"class_start_lineno": 19,
"class_end_lineno": 245,
"func_start_lineno": 164,
"func_end_lineno": 181,
"func_code": " def _prepare_texts_to_embed(self, documents: List[Document]) -> Dict[str, str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = {}\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n )\n\n # copied from OpenAI embedding_utils (https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py)\n # replace newlines, which can negatively affect performance.\n texts_to_embed[doc.id] = text_to_embed.replace(\"\\n\", \" \")\n return texts_to_embed"
},
{
"class_start_lineno": 19,
"class_end_lineno": 245,
"func_start_lineno": 183,
"func_end_lineno": 217,
"func_code": " def _embed_batch(self, texts_to_embed: Dict[str, str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n\n all_embeddings = []\n meta: Dict[str, Any] = {}\n for batch in tqdm(\n batched(texts_to_embed.items(), batch_size), disable=not self.progress_bar, desc=\"Calculating embeddings\"\n ):\n args: Dict[str, Any] = {\"model\": self.model, \"input\": [b[1] for b in batch]}\n\n if self.dimensions is not None:\n args[\"dimensions\"] = self.dimensions\n\n try:\n response = self.client.embeddings.create(**args)\n except APIError as exc:\n ids = \", \".join(b[0] for b in batch)\n msg = \"Failed embedding of documents {ids} caused by {exc}\"\n logger.exception(msg, ids=ids, exc=exc)\n continue\n\n embeddings = [el.embedding for el in response.data]\n all_embeddings.extend(embeddings)\n\n if \"model\" not in meta:\n meta[\"model\"] = response.model\n if \"usage\" not in meta:\n meta[\"usage\"] = dict(response.usage)\n else:\n meta[\"usage\"][\"prompt_tokens\"] += response.usage.prompt_tokens\n meta[\"usage\"][\"total_tokens\"] += response.usage.total_tokens\n\n return all_embeddings, meta"
},
{
"class_start_lineno": 19,
"class_end_lineno": 245,
"func_start_lineno": 220,
"func_end_lineno": 245,
"func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Embeds a list of documents.\n\n :param documents:\n A list of documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of documents with embeddings.\n - `meta`: Information about the usage of the model.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\n \"OpenAIDocumentEmbedder expects a list of Documents as input.\"\n \"In case you want to embed a string, please use the OpenAITextEmbedder.\"\n )\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents, \"meta\": meta}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder._prepare_texts_to_embed",
"haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder._embed_batch",
"haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder.run"
] |
Python
| 2 | 3 |
{
"total_num": 11,
"base_passed_num": 4
}
|
[
"haystack.haystack.utils.device.ComponentDevice::to_dict",
"haystack.haystack.components.embedders.sentence_transformers_document_embedder.SentenceTransformersDocumentEmbedder::to_dict"
] |
haystack
|
[
"haystack/utils/device.py",
"haystack/components/embedders/sentence_transformers_document_embedder.py"
] |
[
"test/components/embedders/test_sentence_transformers_document_embedder.py"
] |
[
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 450,
"func_end_lineno": 463,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False"
},
{
"class_start_lineno": 16,
"class_end_lineno": 256,
"func_start_lineno": 145,
"func_end_lineno": 175,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n trust_remote_code=self.trust_remote_code,\n truncate_dim=self.truncate_dim,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n precision=self.precision,\n encode_kwargs=self.encode_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict"
}
] |
[
"function_empty"
] |
[
"haystack.utils.device.ComponentDevice.to_dict",
"haystack.components.embedders.sentence_transformers_document_embedder.SentenceTransformersDocumentEmbedder.to_dict"
] |
Python
| 2 | 2 |
{
"total_num": 18,
"base_passed_num": 15
}
|
[
"haystack.haystack.utils.device.ComponentDevice::to_dict",
"haystack.haystack.components.embedders.sentence_transformers_text_embedder.SentenceTransformersTextEmbedder::to_dict",
"haystack.haystack.utils.device.ComponentDevice::to_torch_str",
"haystack.haystack.components.embedders.sentence_transformers_text_embedder.SentenceTransformersTextEmbedder::warm_up"
] |
haystack
|
[
"haystack/utils/device.py",
"haystack/components/embedders/sentence_transformers_text_embedder.py",
"haystack/utils/device.py",
"haystack/components/embedders/sentence_transformers_text_embedder.py"
] |
[
"test/components/embedders/test_sentence_transformers_text_embedder.py"
] |
[
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 450,
"func_end_lineno": 463,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False"
},
{
"class_start_lineno": 16,
"class_end_lineno": 229,
"func_start_lineno": 133,
"func_end_lineno": 161,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n trust_remote_code=self.trust_remote_code,\n truncate_dim=self.truncate_dim,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n precision=self.precision,\n encode_kwargs=self.encode_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict"
},
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 321,
"func_end_lineno": 336,
"func_code": " def to_torch_str(self) -> str:\n \"\"\"\n Convert the component device representation to PyTorch string format.\n\n Device maps are not supported.\n\n :returns:\n The PyTorch device string representation.\n \"\"\"\n self._validate()\n\n if self._single_device is None:\n raise ValueError(\"Only single devices can be converted to PyTorch format\")\n\n assert self._single_device is not None\n return str(self._single_device)"
},
{
"class_start_lineno": 16,
"class_end_lineno": 229,
"func_start_lineno": 181,
"func_end_lineno": 198,
"func_code": " def warm_up(self):\n \"\"\"\n Initializes the component.\n \"\"\"\n if self.embedding_backend is None:\n self.embedding_backend = _SentenceTransformersEmbeddingBackendFactory.get_embedding_backend(\n model=self.model,\n device=self.device.to_torch_str(),\n auth_token=self.token,\n trust_remote_code=self.trust_remote_code,\n truncate_dim=self.truncate_dim,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n backend=self.backend,\n )\n if self.tokenizer_kwargs and self.tokenizer_kwargs.get(\"model_max_length\"):\n self.embedding_backend.model.max_seq_length = self.tokenizer_kwargs[\"model_max_length\"]"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.device.ComponentDevice.to_dict",
"haystack.components.embedders.sentence_transformers_text_embedder.SentenceTransformersTextEmbedder.to_dict",
"haystack.utils.device.ComponentDevice.to_torch_str",
"haystack.components.embedders.sentence_transformers_text_embedder.SentenceTransformersTextEmbedder.warm_up"
] |
Python
| 3 | 4 |
{
"total_num": 19,
"base_passed_num": 8
}
|
[
"haystack.haystack.utils.type_serialization.serialize_type",
"haystack.haystack.components.evaluators.llm_evaluator.LLMEvaluator::to_dict",
"haystack.haystack.core.serialization.component_to_dict",
"haystack.haystack.utils.type_serialization.deserialize_type",
"haystack.haystack.core.serialization.component_from_dict"
] |
haystack
|
[
"haystack/utils/type_serialization.py",
"haystack/components/evaluators/llm_evaluator.py",
"haystack/core/serialization.py",
"haystack/utils/type_serialization.py",
"haystack/core/serialization.py"
] |
[
"test/components/evaluators/test_llm_evaluator.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 19,
"func_end_lineno": 52,
"func_code": "def serialize_type(target: Any) -> str:\n \"\"\"\n Serializes a type or an instance to its string representation, including the module name.\n\n This function handles types, instances of types, and special typing objects.\n It assumes that non-typing objects will have a '__name__' attribute.\n\n :param target:\n The object to serialize, can be an instance or a type.\n :return:\n The string representation of the type.\n \"\"\"\n name = getattr(target, \"__name__\", str(target))\n\n # Remove the 'typing.' prefix when using python <3.9\n if name.startswith(\"typing.\"):\n name = name[7:]\n # Remove the arguments from the name when using python <3.9\n if \"[\" in name:\n name = name.split(\"[\")[0]\n\n # Get module name\n module = inspect.getmodule(target)\n module_name = \"\"\n # We omit the module name for builtins to not clutter the output\n if module and hasattr(module, \"__name__\") and module.__name__ != \"builtins\":\n module_name = f\"{module.__name__}\"\n\n args = get_args(target)\n if args:\n args_str = \", \".join([serialize_type(a) for a in args if a is not type(None)])\n return f\"{module_name}.{name}[{args_str}]\" if module_name else f\"{name}[{args_str}]\"\n\n return f\"{module_name}.{name}\" if module_name else f\"{name}\""
},
{
"class_start_lineno": 18,
"class_end_lineno": 387,
"func_start_lineno": 278,
"func_end_lineno": 297,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n # Since we cannot currently serialize tuples, convert the inputs to a list.\n inputs = [[name, serialize_type(type_)] for name, type_ in self.inputs]\n return default_to_dict(\n self,\n instructions=self.instructions,\n inputs=inputs,\n outputs=self.outputs,\n examples=self.examples,\n api=self.api,\n api_key=self.api_key and self.api_key.to_dict(),\n api_params=self.api_params,\n progress_bar=self.progress_bar,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
},
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 78,
"func_end_lineno": 156,
"func_code": "def deserialize_type(type_str: str) -> Any: # pylint: disable=too-many-return-statements\n \"\"\"\n Deserializes a type given its full import path as a string, including nested generic types.\n\n This function will dynamically import the module if it's not already imported\n and then retrieve the type object from it. It also handles nested generic types like\n `typing.List[typing.Dict[int, str]]`.\n\n :param type_str:\n The string representation of the type's full import path.\n :returns:\n The deserialized type object.\n :raises DeserializationError:\n If the type cannot be deserialized due to missing module or type.\n \"\"\"\n\n type_mapping = {\n list: typing.List,\n dict: typing.Dict,\n set: typing.Set,\n tuple: typing.Tuple,\n frozenset: typing.FrozenSet,\n }\n\n # Handle generics\n if \"[\" in type_str and type_str.endswith(\"]\"):\n main_type_str, generics_str = type_str.split(\"[\", 1)\n generics_str = generics_str[:-1]\n\n main_type = deserialize_type(main_type_str)\n generic_args = [deserialize_type(arg) for arg in _parse_generic_args(generics_str)]\n\n # Reconstruct\n try:\n if sys.version_info >= (3, 9) or repr(main_type).startswith(\"typing.\"):\n return main_type[tuple(generic_args) if len(generic_args) > 1 else generic_args[0]]\n else:\n return type_mapping[main_type][tuple(generic_args) if len(generic_args) > 1 else generic_args[0]]\n except (TypeError, AttributeError) as e:\n raise DeserializationError(f\"Could not apply arguments {generic_args} to type {main_type}\") from e\n\n # Handle non-generic types\n # First, check if there's a module prefix\n if \".\" in type_str:\n parts = type_str.split(\".\")\n module_name = \".\".join(parts[:-1])\n type_name = parts[-1]\n\n module = sys.modules.get(module_name)\n if module is None:\n try:\n module = thread_safe_import(module_name)\n except ImportError as e:\n raise DeserializationError(f\"Could not import the module: {module_name}\") from e\n\n # Get the class from the module\n if hasattr(module, type_name):\n return getattr(module, type_name)\n\n raise DeserializationError(f\"Could not locate the type: {type_name} in the module: {module_name}\")\n\n # No module prefix, check builtins and typing\n # First check builtins\n if hasattr(builtins, type_str):\n return getattr(builtins, type_str)\n\n # Then check typing\n if hasattr(typing, type_str):\n return getattr(typing, type_str)\n\n # Special case for NoneType\n if type_str == \"NoneType\":\n return type(None)\n\n # Special case for None\n if type_str == \"None\":\n return None\n\n raise DeserializationError(f\"Could not deserialize type: {type_str}\")"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 134,
"func_end_lineno": 169,
"func_code": "def component_from_dict(\n cls: Type[object], data: Dict[str, Any], name: str, callbacks: Optional[DeserializationCallbacks] = None\n) -> Any:\n \"\"\"\n Creates a component instance from a dictionary.\n\n If a `from_dict` method is present in the component class, that will be used instead of the default method.\n\n :param cls:\n The class to be used for deserialization.\n :param data:\n The serialized data.\n :param name:\n The name of the component.\n :param callbacks:\n Callbacks to invoke during deserialization.\n :returns:\n The deserialized component.\n \"\"\"\n\n def component_pre_init_callback(component_cls, init_params):\n assert callbacks is not None\n assert callbacks.component_pre_init is not None\n callbacks.component_pre_init(name, component_cls, init_params)\n\n def do_from_dict():\n if hasattr(cls, \"from_dict\"):\n return cls.from_dict(data)\n\n return default_from_dict(cls, data)\n\n if callbacks is None or callbacks.component_pre_init is None:\n return do_from_dict()\n\n with _hook_component_init(component_pre_init_callback):\n return do_from_dict()"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.type_serialization.serialize_type",
"haystack.components.evaluators.llm_evaluator.LLMEvaluator.to_dict",
"haystack.core.serialization.component_to_dict",
"haystack.utils.type_serialization.deserialize_type",
"haystack.core.serialization.component_from_dict"
] |
Python
| 2 | 5 |
{
"total_num": 17,
"base_passed_num": 12
}
|
[
"haystack.haystack.utils.device.ComponentDevice::to_dict",
"haystack.haystack.components.evaluators.sas_evaluator.SASEvaluator::to_dict"
] |
haystack
|
[
"haystack/utils/device.py",
"haystack/components/evaluators/sas_evaluator.py"
] |
[
"test/components/evaluators/test_sas_evaluator.py"
] |
[
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 450,
"func_end_lineno": 463,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False"
},
{
"class_start_lineno": 20,
"class_end_lineno": 201,
"func_start_lineno": 85,
"func_end_lineno": 98,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n return default_to_dict(\n self,\n model=self._model,\n batch_size=self._batch_size,\n device=self._device.to_dict() if self._device else None,\n token=self._token.to_dict() if self._token else None,\n )"
}
] |
[
"function_empty"
] |
[
"haystack.utils.device.ComponentDevice.to_dict",
"haystack.components.evaluators.sas_evaluator.SASEvaluator.to_dict"
] |
Python
| 2 | 2 |
{
"total_num": 12,
"base_passed_num": 11
}
|
[
"haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict",
"haystack.haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor::to_dict",
"haystack.haystack.components.builders.prompt_builder.PromptBuilder::_validate_variables",
"haystack.haystack.components.builders.prompt_builder.PromptBuilder::run",
"haystack.haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor::_prepare_prompts"
] |
haystack
|
[
"haystack/components/generators/chat/openai.py",
"haystack/components/extractors/llm_metadata_extractor.py",
"haystack/components/builders/prompt_builder.py",
"haystack/components/builders/prompt_builder.py",
"haystack/components/extractors/llm_metadata_extractor.py"
] |
[
"test/components/extractors/test_llm_metadata_extractor.py"
] |
[
{
"class_start_lineno": 32,
"class_end_lineno": 571,
"func_start_lineno": 170,
"func_end_lineno": 190,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )"
},
{
"class_start_lineno": 61,
"class_end_lineno": 442,
"func_start_lineno": 239,
"func_end_lineno": 258,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n\n llm_provider = self.llm_provider.to_dict()\n\n return default_to_dict(\n self,\n prompt=self.prompt,\n generator_api=self.generator_api.value,\n generator_api_params=llm_provider[\"init_parameters\"],\n expected_keys=self.expected_keys,\n page_range=self.expanded_range,\n raise_on_failure=self.raise_on_failure,\n max_workers=self.max_workers,\n )"
},
{
"class_start_lineno": 17,
"class_end_lineno": 266,
"func_start_lineno": 247,
"func_end_lineno": 266,
"func_code": " def _validate_variables(self, provided_variables: Set[str]):\n \"\"\"\n Checks if all the required template variables are provided.\n\n :param provided_variables:\n A set of provided template variables.\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n if self.required_variables == \"*\":\n required_variables = sorted(self.variables)\n else:\n required_variables = self.required_variables\n missing_variables = [var for var in required_variables if var not in provided_variables]\n if missing_variables:\n missing_vars_str = \", \".join(missing_variables)\n raise ValueError(\n f\"Missing required input variables in PromptBuilder: {missing_vars_str}. \"\n f\"Required variables: {required_variables}. Provided variables: {provided_variables}.\"\n )"
},
{
"class_start_lineno": 17,
"class_end_lineno": 266,
"func_start_lineno": 213,
"func_end_lineno": 245,
"func_code": " def run(self, template: Optional[str] = None, template_variables: Optional[Dict[str, Any]] = None, **kwargs):\n \"\"\"\n Renders the prompt template with the provided variables.\n\n It applies the template variables to render the final prompt. You can provide variables via pipeline kwargs.\n In order to overwrite the default template, you can set the `template` parameter.\n In order to overwrite pipeline kwargs, you can set the `template_variables` parameter.\n\n :param template:\n An optional string template to overwrite PromptBuilder's default template. If None, the default template\n provided at initialization is used.\n :param template_variables:\n An optional dictionary of template variables to overwrite the pipeline variables.\n :param kwargs:\n Pipeline variables used for rendering the prompt.\n\n :returns: A dictionary with the following keys:\n - `prompt`: The updated prompt text after rendering the prompt template.\n\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n kwargs = kwargs or {}\n template_variables = template_variables or {}\n template_variables_combined = {**kwargs, **template_variables}\n self._validate_variables(set(template_variables_combined.keys()))\n\n compiled_template = self.template\n if template is not None:\n compiled_template = self._env.from_string(template)\n\n result = compiled_template.render(template_variables_combined)\n return {\"prompt\": result}"
},
{
"class_start_lineno": 61,
"class_end_lineno": 442,
"func_start_lineno": 332,
"func_end_lineno": 359,
"func_code": " def _prepare_prompts(\n self, documents: List[Document], expanded_range: Optional[List[int]] = None\n ) -> List[Union[ChatMessage, None]]:\n all_prompts: List[Union[ChatMessage, None]] = []\n for document in documents:\n if not document.content:\n logger.warning(\"Document {doc_id} has no content. Skipping metadata extraction.\", doc_id=document.id)\n all_prompts.append(None)\n continue\n\n if expanded_range:\n doc_copy = copy.deepcopy(document)\n pages = self.splitter.run(documents=[doc_copy])\n content = \"\"\n for idx, page in enumerate(pages[\"documents\"]):\n if idx + 1 in expanded_range:\n content += page.content\n doc_copy.content = content\n else:\n doc_copy = document\n\n prompt_with_doc = self.builder.run(template=self.prompt, template_variables={\"document\": doc_copy})\n\n # build a ChatMessage with the prompt\n message = ChatMessage.from_user(prompt_with_doc[\"prompt\"])\n all_prompts.append(message)\n\n return all_prompts"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict",
"haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor.to_dict",
"haystack.components.builders.prompt_builder.PromptBuilder._validate_variables",
"haystack.components.builders.prompt_builder.PromptBuilder.run",
"haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor._prepare_prompts"
] |
Python
| 4 | 5 |
{
"total_num": 13,
"base_passed_num": 9
}
|
[
"haystack.haystack.components.extractors.named_entity_extractor.NamedEntityExtractor::to_dict",
"haystack.haystack.core.serialization.component_to_dict"
] |
haystack
|
[
"haystack/components/extractors/named_entity_extractor.py",
"haystack/core/serialization.py"
] |
[
"test/components/extractors/test_named_entity_extractor.py"
] |
[
{
"class_start_lineno": 78,
"class_end_lineno": 275,
"func_start_lineno": 212,
"func_end_lineno": 232,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n backend=self._backend.type.name,\n model=self._backend.model_name,\n device=self._backend.device.to_dict(),\n pipeline_kwargs=self._backend._pipeline_kwargs,\n token=self.token.to_dict() if self.token else None,\n )\n\n hf_pipeline_kwargs = serialization_dict[\"init_parameters\"][\"pipeline_kwargs\"]\n hf_pipeline_kwargs.pop(\"token\", None)\n\n serialize_hf_model_kwargs(hf_pipeline_kwargs)\n return serialization_dict"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.extractors.named_entity_extractor.NamedEntityExtractor.to_dict",
"haystack.core.serialization.component_to_dict"
] |
Python
| 1 | 2 |
{
"total_num": 7,
"base_passed_num": 2
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.generators.azure.AzureOpenAIGenerator::to_dict",
"haystack.haystack.core.serialization.component_to_dict"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/generators/azure.py",
"haystack/core/serialization.py"
] |
[
"test/components/generators/test_azure.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 19,
"class_end_lineno": 210,
"func_start_lineno": 162,
"func_end_lineno": 188,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n system_prompt=self.system_prompt,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.generators.azure.AzureOpenAIGenerator.to_dict",
"haystack.core.serialization.component_to_dict"
] |
Python
| 2 | 3 |
{
"total_num": 7,
"base_passed_num": 4
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.generators.openai.OpenAIGenerator::to_dict",
"haystack.haystack.utils.type_serialization.thread_safe_import",
"haystack.haystack.utils.callable_serialization.deserialize_callable"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/generators/openai.py",
"haystack/utils/type_serialization.py",
"haystack/utils/callable_serialization.py"
] |
[
"test/components/generators/test_openai.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 20,
"class_end_lineno": 335,
"func_start_lineno": 133,
"func_end_lineno": 150,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n system_prompt=self.system_prompt,\n api_key=self.api_key.to_dict(),\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 159,
"func_end_lineno": 170,
"func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 45,
"func_end_lineno": 80,
"func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.generators.openai.OpenAIGenerator.to_dict",
"haystack.utils.type_serialization.thread_safe_import",
"haystack.utils.callable_serialization.deserialize_callable"
] |
Python
| 3 | 4 |
{
"total_num": 12,
"base_passed_num": 9
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.generators.chat.azure.AzureOpenAIChatGenerator::to_dict",
"haystack.haystack.core.serialization.import_class_by_name",
"haystack.haystack.tools.tool.deserialize_tools_inplace"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/generators/chat/azure.py",
"haystack/core/serialization.py",
"haystack/tools/tool.py"
] |
[
"test/components/generators/chat/test_azure.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 20,
"class_end_lineno": 226,
"func_start_lineno": 177,
"func_end_lineno": 204,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n timeout=self.timeout,\n max_retries=self.max_retries,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n default_headers=self.default_headers,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 243,
"func_end_lineno": 264,
"func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error"
},
{
"class_start_lineno": 1,
"class_end_lineno": 136,
"func_start_lineno": 106,
"func_end_lineno": 136,
"func_code": "def deserialize_tools_inplace(data: Dict[str, Any], key: str = \"tools\"):\n \"\"\"\n Deserialize Tools in a dictionary inplace.\n\n :param data:\n The dictionary with the serialized data.\n :param key:\n The key in the dictionary where the Tools are stored.\n \"\"\"\n if key in data:\n serialized_tools = data[key]\n\n if serialized_tools is None:\n return\n\n if not isinstance(serialized_tools, list):\n raise TypeError(f\"The value of '{key}' is not a list\")\n\n deserialized_tools = []\n for tool in serialized_tools:\n if not isinstance(tool, dict):\n raise TypeError(f\"Serialized tool '{tool}' is not a dictionary\")\n\n # different classes are allowed: Tool, ComponentTool, etc.\n tool_class = import_class_by_name(tool[\"type\"])\n if not issubclass(tool_class, Tool):\n raise TypeError(f\"Class '{tool_class}' is not a subclass of Tool\")\n\n deserialized_tools.append(tool_class.from_dict(tool))\n\n data[key] = deserialized_tools"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.generators.chat.azure.AzureOpenAIChatGenerator.to_dict",
"haystack.core.serialization.import_class_by_name",
"haystack.tools.tool.deserialize_tools_inplace"
] |
Python
| 2 | 4 |
{
"total_num": 8,
"base_passed_num": 4
}
|
[
"haystack.haystack.utils.callable_serialization.serialize_callable",
"haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict",
"haystack.haystack.core.serialization.import_class_by_name",
"haystack.haystack.tools.tool.deserialize_tools_inplace"
] |
haystack
|
[
"haystack/utils/callable_serialization.py",
"haystack/components/generators/chat/openai.py",
"haystack/core/serialization.py",
"haystack/tools/tool.py"
] |
[
"test/components/generators/chat/test_openai.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 12,
"func_end_lineno": 42,
"func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"
},
{
"class_start_lineno": 32,
"class_end_lineno": 571,
"func_start_lineno": 170,
"func_end_lineno": 190,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 243,
"func_end_lineno": 264,
"func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error"
},
{
"class_start_lineno": 1,
"class_end_lineno": 136,
"func_start_lineno": 106,
"func_end_lineno": 136,
"func_code": "def deserialize_tools_inplace(data: Dict[str, Any], key: str = \"tools\"):\n \"\"\"\n Deserialize Tools in a dictionary inplace.\n\n :param data:\n The dictionary with the serialized data.\n :param key:\n The key in the dictionary where the Tools are stored.\n \"\"\"\n if key in data:\n serialized_tools = data[key]\n\n if serialized_tools is None:\n return\n\n if not isinstance(serialized_tools, list):\n raise TypeError(f\"The value of '{key}' is not a list\")\n\n deserialized_tools = []\n for tool in serialized_tools:\n if not isinstance(tool, dict):\n raise TypeError(f\"Serialized tool '{tool}' is not a dictionary\")\n\n # different classes are allowed: Tool, ComponentTool, etc.\n tool_class = import_class_by_name(tool[\"type\"])\n if not issubclass(tool_class, Tool):\n raise TypeError(f\"Class '{tool_class}' is not a subclass of Tool\")\n\n deserialized_tools.append(tool_class.from_dict(tool))\n\n data[key] = deserialized_tools"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.callable_serialization.serialize_callable",
"haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict",
"haystack.core.serialization.import_class_by_name",
"haystack.tools.tool.deserialize_tools_inplace"
] |
Python
| 2 | 4 |
{
"total_num": 19,
"base_passed_num": 8
}
|
[
"haystack.haystack.core.type_utils._strict_types_are_compatible",
"haystack.haystack.core.type_utils._types_are_compatible"
] |
haystack
|
[
"haystack/core/type_utils.py",
"haystack/core/type_utils.py"
] |
[
"test/components/joiners/test_list_joiner.py",
"test/components/validators/test_json_schema.py",
"test/core/pipeline/test_pipeline.py",
"test/tracing/test_logging_tracer.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 29,
"func_end_lineno": 76,
"func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))"
},
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 14,
"func_end_lineno": 26,
"func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True"
}
] |
[
"function_empty"
] |
[
"haystack.core.type_utils._strict_types_are_compatible",
"haystack.core.type_utils._types_are_compatible"
] |
Python
| 2 | 2 |
{
"total_num": 36,
"base_passed_num": 29
}
|
[
"haystack.haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter::_split_dataframe",
"haystack.haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter::_recursive_split"
] |
haystack
|
[
"haystack/components/preprocessors/csv_document_splitter.py",
"haystack/components/preprocessors/csv_document_splitter.py"
] |
[
"test/components/preprocessors/test_csv_document_splitter.py"
] |
[
{
"class_start_lineno": 18,
"class_end_lineno": 244,
"func_start_lineno": 174,
"func_end_lineno": 207,
"func_code": " def _split_dataframe(\n self, df: \"pd.DataFrame\", split_threshold: int, axis: Literal[\"row\", \"column\"]\n ) -> List[\"pd.DataFrame\"]:\n \"\"\"\n Splits a DataFrame into sub-tables based on consecutive empty rows or columns exceeding `split_threshold`.\n\n :param df: DataFrame to split.\n :param split_threshold: Minimum number of consecutive empty rows or columns to trigger a split.\n :param axis: Axis along which to split. Either \"row\" or \"column\".\n :return: List of split DataFrames.\n \"\"\"\n # Find indices of consecutive empty rows or columns\n split_indices = self._find_split_indices(df=df, split_threshold=split_threshold, axis=axis)\n\n # If no split_indices are found, return the original DataFrame\n if len(split_indices) == 0:\n return [df]\n\n # Split the DataFrame at identified indices\n sub_tables = []\n table_start_idx = 0\n df_length = df.shape[0] if axis == \"row\" else df.shape[1]\n for empty_start_idx, empty_end_idx in split_indices + [(df_length, df_length)]:\n # Avoid empty splits\n if empty_start_idx - table_start_idx >= 1:\n if axis == \"row\":\n sub_table = df.iloc[table_start_idx:empty_start_idx]\n else:\n sub_table = df.iloc[:, table_start_idx:empty_start_idx]\n if not sub_table.empty:\n sub_tables.append(sub_table)\n table_start_idx = empty_end_idx + 1\n\n return sub_tables"
},
{
"class_start_lineno": 18,
"class_end_lineno": 244,
"func_start_lineno": 209,
"func_end_lineno": 244,
"func_code": " def _recursive_split(\n self, df: \"pd.DataFrame\", row_split_threshold: int, column_split_threshold: int\n ) -> List[\"pd.DataFrame\"]:\n \"\"\"\n Recursively splits a DataFrame.\n\n Recursively splits a DataFrame first by empty rows, then by empty columns, and repeats the process\n until no more splits are possible. Returns a list of DataFrames, each representing a fully separated sub-table.\n\n :param df: A Pandas DataFrame representing a table (or multiple tables) extracted from a CSV.\n :param row_split_threshold: The minimum number of consecutive empty rows required to trigger a split.\n :param column_split_threshold: The minimum number of consecutive empty columns to trigger a split.\n \"\"\"\n\n # Step 1: Split by rows\n new_sub_tables = self._split_dataframe(df=df, split_threshold=row_split_threshold, axis=\"row\")\n\n # Step 2: Split by columns\n final_tables = []\n for table in new_sub_tables:\n final_tables.extend(self._split_dataframe(df=table, split_threshold=column_split_threshold, axis=\"column\"))\n\n # Step 3: Recursively reapply splitting checked by whether any new empty rows appear after column split\n result = []\n for table in final_tables:\n # Check if there are consecutive rows >= row_split_threshold now present\n if len(self._find_split_indices(df=table, split_threshold=row_split_threshold, axis=\"row\")) > 0:\n result.extend(\n self._recursive_split(\n df=table, row_split_threshold=row_split_threshold, column_split_threshold=column_split_threshold\n )\n )\n else:\n result.append(table)\n\n return result"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter._split_dataframe",
"haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter._recursive_split"
] |
Python
| 1 | 2 |
{
"total_num": 23,
"base_passed_num": 15
}
|
[
"haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_find_and_remove_header_footer",
"haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_find_longest_common_ngram",
"haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_allngram",
"haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::run"
] |
haystack
|
[
"haystack/components/preprocessors/document_cleaner.py",
"haystack/components/preprocessors/document_cleaner.py",
"haystack/components/preprocessors/document_cleaner.py",
"haystack/components/preprocessors/document_cleaner.py",
"haystack/components/preprocessors/document_cleaner.py"
] |
[
"test/components/preprocessors/test_document_cleaner.py"
] |
[
{
"class_start_lineno": 18,
"class_end_lineno": 325,
"func_start_lineno": 231,
"func_end_lineno": 267,
"func_code": " def _find_and_remove_header_footer(\n self, text: str, n_chars: int, n_first_pages_to_ignore: int, n_last_pages_to_ignore: int\n ) -> str:\n \"\"\"\n Heuristic to find footers and headers across different pages by searching for the longest common string.\n\n Pages in the text need to be separated by form feed character \"\\f\".\n For headers, we only search in the first n_chars characters (for footer: last n_chars).\n Note: This heuristic uses exact matches and therefore works well for footers like \"Copyright 2019 by XXX\",\n but won't detect \"Page 3 of 4\" or similar.\n\n :param n_chars: The number of first/last characters where the header/footer shall be searched in.\n :param n_first_pages_to_ignore: The number of first pages to ignore\n (e.g. TOCs often don't contain footer/header).\n :param n_last_pages_to_ignore: The number of last pages to ignore.\n :returns: The text without the found headers and footers.\n \"\"\"\n\n pages = text.split(\"\\f\")\n\n # header\n start_of_pages = [p[:n_chars] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]\n found_header = self._find_longest_common_ngram(start_of_pages)\n if found_header:\n pages = [page.replace(found_header, \"\") for page in pages]\n\n # footer\n end_of_pages = [p[-n_chars:] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]\n found_footer = self._find_longest_common_ngram(end_of_pages)\n if found_footer:\n pages = [page.replace(found_footer, \"\") for page in pages]\n\n logger.debug(\n \"Removed header '{header}' and footer '{footer}' in document\", header=found_header, footer=found_footer\n )\n text = \"\\f\".join(pages)\n return text"
},
{
"class_start_lineno": 18,
"class_end_lineno": 325,
"func_start_lineno": 306,
"func_end_lineno": 325,
"func_code": " def _find_longest_common_ngram(self, sequences: List[str], min_ngram: int = 3, max_ngram: int = 30) -> str:\n \"\"\"\n Find the longest common ngram across a list of text sequences (e.g. start of pages).\n\n Considering all ngram lengths between the minimum and maximum length. Helpful for finding footers, headers etc.\n Empty sequences are ignored.\n\n :param sequences: The list of strings that shall be searched for common n_grams.\n :param max_ngram: The maximum length of ngram to consider.\n :param min_ngram: The minimum length of ngram to consider.\n :returns: The longest ngram that all sequences have in common.\n \"\"\"\n sequences = [s for s in sequences if s] # filter empty sequences\n if not sequences:\n return \"\"\n seqs_ngrams = map(partial(self._allngram, min_ngram=min_ngram, max_ngram=max_ngram), sequences)\n intersection = reduce(set.intersection, seqs_ngrams)\n\n longest = max(intersection, key=len, default=\"\")\n return longest if longest.strip() else \"\""
},
{
"class_start_lineno": 18,
"class_end_lineno": 325,
"func_start_lineno": 290,
"func_end_lineno": 304,
"func_code": " def _allngram(self, seq: str, min_ngram: int, max_ngram: int) -> Set[str]:\n \"\"\"\n Generates all possible ngrams from a given sequence of text.\n\n Considering all ngram lengths between the minimum and maximum length.\n\n :param seq: The sequence to generate ngrams from.\n :param min_ngram: The minimum length of ngram to consider.\n :param max_ngram: The maximum length of ngram to consider.\n :returns: A set of all ngrams from the given sequence.\n \"\"\"\n lengths = range(min_ngram, max_ngram) if max_ngram else range(min_ngram, len(seq))\n ngrams = map(partial(self._ngram, seq), lengths)\n res = set(chain.from_iterable(ngrams))\n return res"
},
{
"class_start_lineno": 18,
"class_end_lineno": 325,
"func_start_lineno": 219,
"func_end_lineno": 229,
"func_code": " def _remove_repeated_substrings(self, text: str) -> str:\n \"\"\"\n Remove any substrings from the text that occur repeatedly on every page. For example headers or footers.\n\n Pages in the text need to be separated by form feed character \"\\f\".\n :param text: Text to clean.\n :returns: The text without the repeated substrings.\n \"\"\"\n return self._find_and_remove_header_footer(\n text, n_chars=300, n_first_pages_to_ignore=1, n_last_pages_to_ignore=1\n )"
},
{
"class_start_lineno": 18,
"class_end_lineno": 325,
"func_start_lineno": 93,
"func_end_lineno": 145,
"func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Cleans up the documents.\n\n :param documents: List of Documents to clean.\n\n :returns: A dictionary with the following key:\n - `documents`: List of cleaned Documents.\n\n :raises TypeError: if documents is not a list of Documents.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\"DocumentCleaner expects a List of Documents as input.\")\n\n cleaned_docs = []\n for doc in documents:\n if doc.content is None:\n logger.warning(\n \"DocumentCleaner only cleans text documents but document.content for document ID\"\n \" %{document_id} is None.\",\n document_id=doc.id,\n )\n cleaned_docs.append(doc)\n continue\n text = doc.content\n\n if self.unicode_normalization:\n text = self._normalize_unicode(text, self.unicode_normalization)\n if self.ascii_only:\n text = self._ascii_only(text)\n if self.remove_extra_whitespaces:\n text = self._remove_extra_whitespaces(text)\n if self.remove_empty_lines:\n text = self._remove_empty_lines(text)\n if self.remove_substrings:\n text = self._remove_substrings(text, self.remove_substrings)\n if self.remove_regex:\n text = self._remove_regex(text, self.remove_regex)\n if self.remove_repeated_substrings:\n text = self._remove_repeated_substrings(text)\n\n clean_doc = Document(\n id=doc.id if self.keep_id else \"\",\n content=text,\n blob=doc.blob,\n meta=deepcopy(doc.meta),\n score=doc.score,\n embedding=doc.embedding,\n sparse_embedding=doc.sparse_embedding,\n )\n cleaned_docs.append(clean_doc)\n\n return {\"documents\": cleaned_docs}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.preprocessors.document_cleaner.DocumentCleaner._find_and_remove_header_footer",
"haystack.components.preprocessors.document_cleaner.DocumentCleaner._find_longest_common_ngram",
"haystack.components.preprocessors.document_cleaner.DocumentCleaner._allngram",
"haystack.components.preprocessors.document_cleaner.DocumentCleaner._remove_repeated_substrings",
"haystack.components.preprocessors.document_cleaner.DocumentCleaner.run"
] |
Python
| 3 | 4 |
{
"total_num": 14,
"base_passed_num": 3
}
|
[
"haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_character",
"haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_function",
"haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_nltk_sentence",
"haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::run"
] |
haystack
|
[
"haystack/components/preprocessors/document_splitter.py",
"haystack/components/preprocessors/document_splitter.py",
"haystack/components/preprocessors/document_splitter.py",
"haystack/components/preprocessors/document_splitter.py",
"haystack/components/preprocessors/document_splitter.py"
] |
[
"test/components/preprocessors/test_document_splitter.py"
] |
[
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 238,
"func_end_lineno": 251,
"func_code": " def _split_by_character(self, doc) -> List[Document]:\n split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]\n units = doc.content.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n units, self.split_length, self.split_overlap, self.split_threshold\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n return self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 253,
"func_end_lineno": 261,
"func_code": " def _split_by_function(self, doc) -> List[Document]:\n # the check for None is done already in the run method\n splits = self.splitting_function(doc.content) # type: ignore\n docs: List[Document] = []\n for s in splits:\n meta = deepcopy(doc.meta)\n meta[\"source_id\"] = doc.id\n docs.append(Document(content=s, meta=meta))\n return docs"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 213,
"func_end_lineno": 236,
"func_code": " def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:\n split_docs = []\n\n result = self.sentence_splitter.split_sentences(doc.content) # type: ignore # None check is done in run()\n units = [sentence[\"sentence\"] for sentence in result]\n\n if self.respect_sentence_boundary:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(\n sentences=units, split_length=self.split_length, split_overlap=self.split_overlap\n )\n else:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n elements=units,\n split_length=self.split_length,\n split_overlap=self.split_overlap,\n split_threshold=self.split_threshold,\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )\n\n return split_docs"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 204,
"func_end_lineno": 211,
"func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 166,
"func_end_lineno": 202,
"func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. Each document includes:\n - A metadata field `source_id` to track the original document.\n - A metadata field `page_number` to track the original page number.\n - All other metadata copied from the original document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n if self._use_sentence_splitter and self.sentence_splitter is None:\n raise RuntimeError(\n \"The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'.\"\n )\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs: List[Document] = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but content for document ID {doc.id} is None.\"\n )\n if doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n\n split_docs += self._split_document(doc)\n return {\"documents\": split_docs}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_character",
"haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_function",
"haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_nltk_sentence",
"haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document",
"haystack.components.preprocessors.document_splitter.DocumentSplitter.run"
] |
Python
| 1 | 4 |
{
"total_num": 53,
"base_passed_num": 17
}
|
[
"haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_length",
"haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_split_chunk",
"haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_apply_overlap",
"haystack.haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter::split_sentences",
"haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_text"
] |
haystack
|
[
"haystack/components/preprocessors/recursive_splitter.py",
"haystack/components/preprocessors/recursive_splitter.py",
"haystack/components/preprocessors/recursive_splitter.py",
"haystack/components/preprocessors/recursive_splitter.py",
"haystack/components/preprocessors/sentence_tokenizer.py",
"haystack/components/preprocessors/recursive_splitter.py"
] |
[
"test/components/preprocessors/test_recursive_splitter.py"
] |
[
{
"class_start_lineno": 15,
"class_end_lineno": 421,
"func_start_lineno": 215,
"func_end_lineno": 227,
"func_code": " def _chunk_length(self, text: str) -> int:\n \"\"\"\n Split the text by whitespace and count non-empty elements.\n\n :param: The text to be split.\n :return: The number of words in the text.\n \"\"\"\n\n if self.split_units == \"word\":\n words = [word for word in text.split(\" \") if word]\n return len(words)\n\n return len(text)"
},
{
"class_start_lineno": 15,
"class_end_lineno": 421,
"func_start_lineno": 204,
"func_end_lineno": 213,
"func_code": " def _get_overlap(self, overlapped_chunks: List[str]) -> Tuple[str, str]:\n \"\"\"Get the previous overlapped chunk instead of the original chunk.\"\"\"\n prev_chunk = overlapped_chunks[-1]\n overlap_start = max(0, self._chunk_length(prev_chunk) - self.split_overlap)\n if self.split_units == \"word\":\n word_chunks = prev_chunk.split()\n overlap = \" \".join(word_chunks[overlap_start:])\n else:\n overlap = prev_chunk[overlap_start:]\n return overlap, prev_chunk"
},
{
"class_start_lineno": 15,
"class_end_lineno": 421,
"func_start_lineno": 114,
"func_end_lineno": 133,
"func_code": " def _split_chunk(self, current_chunk: str) -> Tuple[str, str]:\n \"\"\"\n Splits a chunk based on the split_length and split_units attribute.\n\n :param current_chunk: The current chunk to be split.\n :returns:\n A tuple containing the current chunk and the remaining words or characters.\n \"\"\"\n\n if self.split_units == \"word\":\n words = current_chunk.split()\n current_chunk = \" \".join(words[: self.split_length])\n remaining_words = words[self.split_length :]\n return current_chunk, \" \".join(remaining_words)\n\n # split by characters\n text = current_chunk\n current_chunk = text[: self.split_length]\n remaining_chars = text[self.split_length :]\n return current_chunk, remaining_chars"
},
{
"class_start_lineno": 15,
"class_end_lineno": 421,
"func_start_lineno": 135,
"func_end_lineno": 202,
"func_code": " def _apply_overlap(self, chunks: List[str]) -> List[str]:\n \"\"\"\n Applies an overlap between consecutive chunks if the chunk_overlap attribute is greater than zero.\n\n Works for both word- and character-level splitting. It trims the last chunk if it exceeds the split_length and\n adds the trimmed content to the next chunk. If the last chunk is still too long after trimming, it splits it\n and adds the first chunk to the list. This process continues until the last chunk is within the split_length.\n\n :param chunks: A list of text chunks.\n :returns:\n A list of text chunks with the overlap applied.\n \"\"\"\n overlapped_chunks: List[str] = []\n\n for idx, chunk in enumerate(chunks):\n if idx == 0:\n overlapped_chunks.append(chunk)\n continue\n\n # get the overlap between the current and previous chunk\n overlap, prev_chunk = self._get_overlap(overlapped_chunks)\n if overlap == prev_chunk:\n logger.warning(\n \"Overlap is the same as the previous chunk. \"\n \"Consider increasing the `split_length` parameter or decreasing the `split_overlap` parameter.\"\n )\n\n # create a new chunk starting with the overlap\n current_chunk = overlap + \" \" + chunk if self.split_units == \"word\" else overlap + chunk\n\n # if this new chunk exceeds 'split_length', trim it and move the remaining text to the next chunk\n # if this is the last chunk, another new chunk will contain the trimmed text preceded by the overlap\n # of the last chunk\n if self._chunk_length(current_chunk) > self.split_length:\n current_chunk, remaining_text = self._split_chunk(current_chunk)\n if idx < len(chunks) - 1:\n chunks[idx + 1] = remaining_text + (\" \" if self.split_units == \"word\" else \"\") + chunks[idx + 1]\n elif remaining_text:\n # create a new chunk with the trimmed text preceded by the overlap of the last chunk\n overlapped_chunks.append(current_chunk)\n chunk = remaining_text\n overlap, _ = self._get_overlap(overlapped_chunks)\n current_chunk = overlap + \" \" + chunk if self.split_units == \"word\" else overlap + chunk\n\n overlapped_chunks.append(current_chunk)\n\n # it can still be that the new last chunk exceeds the 'split_length'\n # continue splitting until the last chunk is within 'split_length'\n if idx == len(chunks) - 1 and self._chunk_length(current_chunk) > self.split_length:\n last_chunk = overlapped_chunks.pop()\n first_chunk, remaining_chunk = self._split_chunk(last_chunk)\n overlapped_chunks.append(first_chunk)\n\n while remaining_chunk:\n # combine overlap with remaining chunk\n overlap, _ = self._get_overlap(overlapped_chunks)\n current = overlap + (\" \" if self.split_units == \"word\" else \"\") + remaining_chunk\n\n # if it fits within split_length we are done\n if self._chunk_length(current) <= self.split_length:\n overlapped_chunks.append(current)\n break\n\n # otherwise split it again\n first_chunk, remaining_chunk = self._split_chunk(current)\n overlapped_chunks.append(first_chunk)\n\n return overlapped_chunks"
},
{
"class_start_lineno": 116,
"class_end_lineno": 238,
"func_start_lineno": 147,
"func_end_lineno": 159,
"func_code": " def split_sentences(self, text: str) -> List[Dict[str, Any]]:\n \"\"\"\n Splits a text into sentences including references to original char positions for each split.\n\n :param text: The text to split.\n :returns: list of sentences with positions.\n \"\"\"\n sentence_spans = list(self.sentence_tokenizer.span_tokenize(text))\n if self.use_split_rules:\n sentence_spans = SentenceSplitter._apply_split_rules(text, sentence_spans)\n\n sentences = [{\"sentence\": text[start:end], \"start\": start, \"end\": end} for start, end in sentence_spans]\n return sentences"
},
{
"class_start_lineno": 15,
"class_end_lineno": 421,
"func_start_lineno": 229,
"func_end_lineno": 311,
"func_code": " def _chunk_text(self, text: str) -> List[str]:\n \"\"\"\n Recursive chunking algorithm that divides text into smaller chunks based on a list of separator characters.\n\n It starts with a list of separator characters (e.g., [\"\\n\\n\", \"sentence\", \"\\n\", \" \"]) and attempts to divide\n the text using the first separator. If the resulting chunks are still larger than the specified chunk size,\n it moves to the next separator in the list. This process continues recursively, progressively applying each\n specific separator until the chunks meet the desired size criteria.\n\n :param text: The text to be split into chunks.\n :returns:\n A list of text chunks.\n \"\"\"\n if self._chunk_length(text) <= self.split_length:\n return [text]\n\n for curr_separator in self.separators: # type: ignore # the caller already checked that separators is not None\n if curr_separator == \"sentence\":\n # re. ignore: correct SentenceSplitter initialization is checked at the initialization of the component\n sentence_with_spans = self.nltk_tokenizer.split_sentences(text) # type: ignore\n splits = [sentence[\"sentence\"] for sentence in sentence_with_spans]\n else:\n # add escape \"\\\" to the separator and wrapped it in a group so that it's included in the splits as well\n escaped_separator = re.escape(curr_separator)\n escaped_separator = f\"({escaped_separator})\"\n\n # split the text and merge every two consecutive splits, i.e.: the text and the separator after it\n splits = re.split(escaped_separator, text)\n splits = [\n \"\".join([splits[i], splits[i + 1]]) if i < len(splits) - 1 else splits[i]\n for i in range(0, len(splits), 2)\n ]\n\n # remove last split if it's empty\n splits = splits[:-1] if splits[-1] == \"\" else splits\n\n if len(splits) == 1: # go to next separator, if current separator not found in the text\n continue\n\n chunks = []\n current_chunk: List[str] = []\n current_length = 0\n\n # check splits, if any is too long, recursively chunk it, otherwise add to current chunk\n for split in splits:\n split_text = split\n\n # if adding this split exceeds chunk_size, process current_chunk\n if current_length + self._chunk_length(split_text) > self.split_length:\n # process current_chunk\n if current_chunk: # keep the good splits\n chunks.append(\"\".join(current_chunk))\n current_chunk = []\n current_length = 0\n\n # recursively handle splits that are too large\n if self._chunk_length(split_text) > self.split_length:\n if curr_separator == self.separators[-1]:\n # tried last separator, can't split further, do a fixed-split based on word/character\n fall_back_chunks = self._fall_back_to_fixed_chunking(split_text, self.split_units)\n chunks.extend(fall_back_chunks)\n else:\n chunks.extend(self._chunk_text(split_text))\n current_length += self._chunk_length(split_text)\n\n else:\n current_chunk.append(split_text)\n current_length += self._chunk_length(split_text)\n else:\n current_chunk.append(split_text)\n current_length += self._chunk_length(split_text)\n\n if current_chunk:\n chunks.append(\"\".join(current_chunk))\n\n if self.split_overlap > 0:\n chunks = self._apply_overlap(chunks)\n\n if chunks:\n return chunks\n\n # if no separator worked, fall back to word- or character-level chunking\n return self._fall_back_to_fixed_chunking(text, self.split_units)"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_length",
"haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._get_overlap",
"haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._split_chunk",
"haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._apply_overlap",
"haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter.split_sentences",
"haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_text"
] |
Python
| 4 | 5 |
{
"total_num": 35,
"base_passed_num": 9
}
|
[
"haystack.haystack.utils.device.ComponentDevice::to_dict",
"haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::to_dict",
"haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::_greedy_diversity_order",
"haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::run"
] |
haystack
|
[
"haystack/utils/device.py",
"haystack/components/rankers/sentence_transformers_diversity.py",
"haystack/components/rankers/sentence_transformers_diversity.py",
"haystack/components/rankers/sentence_transformers_diversity.py"
] |
[
"test/components/rankers/test_sentence_transformers_diversity.py"
] |
[
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 450,
"func_end_lineno": 463,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False"
},
{
"class_start_lineno": 76,
"class_end_lineno": 435,
"func_start_lineno": 212,
"func_end_lineno": 241,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model_name_or_path,\n top_k=self.top_k,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n similarity=str(self.similarity),\n query_prefix=self.query_prefix,\n query_suffix=self.query_suffix,\n document_prefix=self.document_prefix,\n document_suffix=self.document_suffix,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n strategy=str(self.strategy),\n lambda_threshold=self.lambda_threshold,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict"
},
{
"class_start_lineno": 76,
"class_end_lineno": 435,
"func_start_lineno": 279,
"func_end_lineno": 323,
"func_code": " def _greedy_diversity_order(self, query: str, documents: List[Document]) -> List[Document]:\n \"\"\"\n Orders the given list of documents to maximize diversity.\n\n The algorithm first calculates embeddings for each document and the query. It starts by selecting the document\n that is semantically closest to the query. Then, for each remaining document, it selects the one that, on\n average, is least similar to the already selected documents. This process continues until all documents are\n selected, resulting in a list where each subsequent document contributes the most to the overall diversity of\n the selected set.\n\n :param query: The search query.\n :param documents: The list of Document objects to be ranked.\n\n :return: A list of documents ordered to maximize diversity.\n \"\"\"\n texts_to_embed = self._prepare_texts_to_embed(documents)\n\n doc_embeddings, query_embedding = self._embed_and_normalize(query, texts_to_embed)\n\n n = len(documents)\n selected: List[int] = []\n\n # Compute the similarity vector between the query and documents\n query_doc_sim = query_embedding @ doc_embeddings.T\n\n # Start with the document with the highest similarity to the query\n selected.append(int(torch.argmax(query_doc_sim).item()))\n\n selected_sum = doc_embeddings[selected[0]] / n\n\n while len(selected) < n:\n # Compute mean of dot products of all selected documents and all other documents\n similarities = selected_sum @ doc_embeddings.T\n # Mask documents that are already selected\n similarities[selected] = torch.inf\n # Select the document with the lowest total similarity score\n index_unselected = int(torch.argmin(similarities).item())\n selected.append(index_unselected)\n # It's enough just to add to the selected vectors because dot product is distributive\n # It's divided by n for numerical stability\n selected_sum += doc_embeddings[index_unselected] / n\n\n ranked_docs: List[Document] = [documents[i] for i in selected]\n\n return ranked_docs"
},
{
"class_start_lineno": 76,
"class_end_lineno": 435,
"func_start_lineno": 388,
"func_end_lineno": 435,
"func_code": " def run(\n self,\n query: str,\n documents: List[Document],\n top_k: Optional[int] = None,\n lambda_threshold: Optional[float] = None,\n ) -> Dict[str, List[Document]]:\n \"\"\"\n Rank the documents based on their diversity.\n\n :param query: The search query.\n :param documents: List of Document objects to be ranker.\n :param top_k: Optional. An integer to override the top_k set during initialization.\n :param lambda_threshold: Override the trade-off parameter between relevance and diversity. Only used when\n strategy is \"maximum_margin_relevance\".\n\n :returns: A dictionary with the following key:\n - `documents`: List of Document objects that have been selected based on the diversity ranking.\n\n :raises ValueError: If the top_k value is less than or equal to 0.\n :raises RuntimeError: If the component has not been warmed up.\n \"\"\"\n if self.model is None:\n error_msg = (\n \"The component SentenceTransformersDiversityRanker wasn't warmed up. \"\n \"Run 'warm_up()' before calling 'run()'.\"\n )\n raise RuntimeError(error_msg)\n\n if not documents:\n return {\"documents\": []}\n\n if top_k is None:\n top_k = self.top_k\n elif not 0 < top_k <= len(documents):\n raise ValueError(f\"top_k must be between 1 and {len(documents)}, but got {top_k}\")\n\n if self.strategy == DiversityRankingStrategy.MAXIMUM_MARGIN_RELEVANCE:\n if lambda_threshold is None:\n lambda_threshold = self.lambda_threshold\n self._check_lambda_threshold(lambda_threshold, self.strategy)\n re_ranked_docs = self._maximum_margin_relevance(\n query=query, documents=documents, lambda_threshold=lambda_threshold, top_k=top_k\n )\n else:\n re_ranked_docs = self._greedy_diversity_order(query=query, documents=documents)\n\n return {\"documents\": re_ranked_docs[:top_k]}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.device.ComponentDevice.to_dict",
"haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker.to_dict",
"haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker._greedy_diversity_order",
"haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker.run"
] |
Python
| 3 | 4 |
{
"total_num": 53,
"base_passed_num": 17
}
|
[
"haystack.haystack.utils.auth.EnvVarSecret::resolve_value",
"haystack.haystack.components.rankers.transformers_similarity.TransformersSimilarityRanker::warm_up"
] |
haystack
|
[
"haystack/utils/auth.py",
"haystack/components/rankers/transformers_similarity.py"
] |
[
"test/components/rankers/test_transformers_similarity.py"
] |
[
{
"class_start_lineno": 171,
"class_end_lineno": 211,
"func_start_lineno": 196,
"func_end_lineno": 206,
"func_code": " def resolve_value(self) -> Optional[Any]:\n \"\"\"Resolve the secret to an atomic value. The semantics of the value is secret-dependent.\"\"\"\n out = None\n for env_var in self._env_vars:\n value = os.getenv(env_var)\n if value is not None:\n out = value\n break\n if out is None and self._strict:\n raise ValueError(f\"None of the following authentication environment variables are set: {self._env_vars}\")\n return out"
},
{
"class_start_lineno": 24,
"class_end_lineno": 309,
"func_start_lineno": 142,
"func_end_lineno": 155,
"func_code": " def warm_up(self):\n \"\"\"\n Initializes the component.\n \"\"\"\n if self.model is None:\n self.model = AutoModelForSequenceClassification.from_pretrained(\n self.model_name_or_path, token=self.token.resolve_value() if self.token else None, **self.model_kwargs\n )\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.model_name_or_path,\n token=self.token.resolve_value() if self.token else None,\n **self.tokenizer_kwargs,\n )\n self.device = ComponentDevice.from_multiple(device_map=DeviceMap.from_hf(self.model.hf_device_map))"
}
] |
[
"Development"
] |
[
"haystack.utils.auth.EnvVarSecret.resolve_value",
"haystack.components.rankers.transformers_similarity.TransformersSimilarityRanker.warm_up"
] |
Python
| 0 | 2 |
{
"total_num": 26,
"base_passed_num": 8
}
|
[
"haystack.haystack.core.serialization.default_to_dict",
"haystack.haystack.utils.hf.serialize_hf_model_kwargs",
"haystack.haystack.components.readers.extractive.ExtractiveReader::to_dict"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/utils/hf.py",
"haystack/components/readers/extractive.py"
] |
[
"test/components/readers/test_extractive.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 172,
"func_end_lineno": 210,
"func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"
},
{
"class_start_lineno": 1,
"class_end_lineno": 395,
"func_start_lineno": 98,
"func_end_lineno": 112,
"func_code": "def serialize_hf_model_kwargs(kwargs: Dict[str, Any]):\n \"\"\"\n Recursively serialize HuggingFace specific model keyword arguments in-place to make them JSON serializable.\n\n :param kwargs: The keyword arguments to serialize\n \"\"\"\n torch_import.check()\n\n for k, v in kwargs.items():\n # torch.dtype\n if isinstance(v, torch.dtype):\n kwargs[k] = str(v)\n\n if isinstance(v, dict):\n serialize_hf_model_kwargs(v)"
},
{
"class_start_lineno": 26,
"class_end_lineno": 660,
"func_start_lineno": 136,
"func_end_lineno": 160,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model_name_or_path,\n device=None,\n token=self.token.to_dict() if self.token else None,\n max_seq_length=self.max_seq_length,\n top_k=self.top_k,\n score_threshold=self.score_threshold,\n stride=self.stride,\n max_batch_size=self.max_batch_size,\n answers_per_seq=self.answers_per_seq,\n no_answer=self.no_answer,\n calibration_factor=self.calibration_factor,\n model_kwargs=self.model_kwargs,\n )\n\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict"
}
] |
[
"function_empty"
] |
[
"haystack.core.serialization.default_to_dict",
"haystack.utils.hf.serialize_hf_model_kwargs",
"haystack.components.readers.extractive.ExtractiveReader.to_dict"
] |
Python
| 3 | 3 |
{
"total_num": 34,
"base_passed_num": 21
}
|
[
"haystack.haystack.core.serialization.default_to_dict",
"haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/document_stores/in_memory/document_store.py",
"haystack/components/retrievers/filter_retriever.py"
] |
[
"test/components/retrievers/test_filter_retriever.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 172,
"func_end_lineno": 210,
"func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"
},
{
"class_start_lineno": 58,
"class_end_lineno": 738,
"func_start_lineno": 344,
"func_end_lineno": 358,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )"
},
{
"class_start_lineno": 15,
"class_end_lineno": 96,
"func_start_lineno": 60,
"func_end_lineno": 68,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(self, document_store=docstore, filters=self.filters)"
}
] |
[
"function_empty"
] |
[
"haystack.core.serialization.default_to_dict",
"haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict",
"haystack.components.retrievers.filter_retriever.FilterRetriever.to_dict"
] |
Python
| 2 | 2 |
{
"total_num": 10,
"base_passed_num": 7
}
|
[
"haystack.haystack.core.serialization.default_to_dict",
"haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict",
"haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_character",
"haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::run"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/document_stores/in_memory/document_store.py",
"haystack/components/retrievers/sentence_window_retriever.py",
"haystack/components/preprocessors/document_splitter.py",
"haystack/components/preprocessors/document_splitter.py",
"haystack/components/preprocessors/document_splitter.py"
] |
[
"test/components/retrievers/test_sentence_window_retriever.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 172,
"func_end_lineno": 210,
"func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"
},
{
"class_start_lineno": 58,
"class_end_lineno": 738,
"func_start_lineno": 344,
"func_end_lineno": 358,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )"
},
{
"class_start_lineno": 13,
"class_end_lineno": 198,
"func_start_lineno": 122,
"func_end_lineno": 130,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(self, document_store=docstore, window_size=self.window_size)"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 238,
"func_end_lineno": 251,
"func_code": " def _split_by_character(self, doc) -> List[Document]:\n split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]\n units = doc.content.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n units, self.split_length, self.split_overlap, self.split_threshold\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n return self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 204,
"func_end_lineno": 211,
"func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)"
},
{
"class_start_lineno": 22,
"class_end_lineno": 490,
"func_start_lineno": 166,
"func_end_lineno": 202,
"func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. Each document includes:\n - A metadata field `source_id` to track the original document.\n - A metadata field `page_number` to track the original page number.\n - All other metadata copied from the original document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n if self._use_sentence_splitter and self.sentence_splitter is None:\n raise RuntimeError(\n \"The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'.\"\n )\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs: List[Document] = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but content for document ID {doc.id} is None.\"\n )\n if doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n\n split_docs += self._split_document(doc)\n return {\"documents\": split_docs}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.core.serialization.default_to_dict",
"haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict",
"haystack.components.retrievers.sentence_window_retriever.SentenceWindowRetriever.to_dict",
"haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_character",
"haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document",
"haystack.components.preprocessors.document_splitter.DocumentSplitter.run"
] |
Python
| 3 | 4 |
{
"total_num": 16,
"base_passed_num": 12
}
|
[
"haystack.haystack.utils.type_serialization.serialize_type",
"haystack.haystack.components.routers.conditional_router.ConditionalRouter::to_dict"
] |
haystack
|
[
"haystack/utils/type_serialization.py",
"haystack/components/routers/conditional_router.py"
] |
[
"test/components/routers/test_conditional_router.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 19,
"func_end_lineno": 52,
"func_code": "def serialize_type(target: Any) -> str:\n \"\"\"\n Serializes a type or an instance to its string representation, including the module name.\n\n This function handles types, instances of types, and special typing objects.\n It assumes that non-typing objects will have a '__name__' attribute.\n\n :param target:\n The object to serialize, can be an instance or a type.\n :return:\n The string representation of the type.\n \"\"\"\n name = getattr(target, \"__name__\", str(target))\n\n # Remove the 'typing.' prefix when using python <3.9\n if name.startswith(\"typing.\"):\n name = name[7:]\n # Remove the arguments from the name when using python <3.9\n if \"[\" in name:\n name = name.split(\"[\")[0]\n\n # Get module name\n module = inspect.getmodule(target)\n module_name = \"\"\n # We omit the module name for builtins to not clutter the output\n if module and hasattr(module, \"__name__\") and module.__name__ != \"builtins\":\n module_name = f\"{module.__name__}\"\n\n args = get_args(target)\n if args:\n args_str = \", \".join([serialize_type(a) for a in args if a is not type(None)])\n return f\"{module_name}.{name}[{args_str}]\" if module_name else f\"{name}[{args_str}]\"\n\n return f\"{module_name}.{name}\" if module_name else f\"{name}\""
},
{
"class_start_lineno": 29,
"class_end_lineno": 433,
"func_start_lineno": 237,
"func_end_lineno": 256,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_routes = []\n for route in self.routes:\n # output_type needs to be serialized to a string\n serialized_routes.append({**route, \"output_type\": serialize_type(route[\"output_type\"])})\n se_filters = {name: serialize_callable(filter_func) for name, filter_func in self.custom_filters.items()}\n return default_to_dict(\n self,\n routes=serialized_routes,\n custom_filters=se_filters,\n unsafe=self._unsafe,\n validate_output_type=self._validate_output_type,\n optional_variables=self.optional_variables,\n )"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.type_serialization.serialize_type",
"haystack.components.routers.conditional_router.ConditionalRouter.to_dict"
] |
Python
| 1 | 2 |
{
"total_num": 23,
"base_passed_num": 15
}
|
[
"haystack.haystack.components.samplers.top_p.TopPSampler::_get_documents_and_scores",
"haystack.haystack.components.samplers.top_p.TopPSampler::run"
] |
haystack
|
[
"haystack/components/samplers/top_p.py",
"haystack/components/samplers/top_p.py"
] |
[
"test/components/samplers/test_top_p.py"
] |
[
{
"class_start_lineno": 18,
"class_end_lineno": 177,
"func_start_lineno": 144,
"func_end_lineno": 177,
"func_code": " def _get_documents_and_scores(self, documents: List[Document]) -> Tuple[List[Document], List[float]]:\n \"\"\"\n Checks if documents have scores in their metadata or score field and returns the documents with scores.\n\n :param documents: List of Documents.\n :return: List of scores.\n \"\"\"\n docs_with_scores = []\n scores = []\n docs_missing_scores = []\n for doc in documents:\n score = self._get_doc_score(doc=doc, score_field=self.score_field)\n if score is None:\n docs_missing_scores.append(doc)\n else:\n scores.append(score)\n docs_with_scores.append(doc)\n\n if len(docs_missing_scores) > 0:\n missing_scores_docs_ids = [d.id for d in docs_missing_scores if d.id]\n if self.score_field:\n logger.warning(\n \"Score field '{score_field}' not found in metadata of documents with IDs: {doc_ids}.\"\n \"Make sure that all documents have a score field '{score_field_2}' in their metadata.\",\n score_field=self.score_field,\n doc_ids=\",\".join(missing_scores_docs_ids),\n score_field_2=self.score_field,\n )\n else:\n logger.warning(\n \"Ensure all documents have a valid score value. These documents {doc_ids} are missing scores.\",\n doc_ids=\",\".join(missing_scores_docs_ids),\n )\n return docs_with_scores, scores"
},
{
"class_start_lineno": 18,
"class_end_lineno": 177,
"func_start_lineno": 65,
"func_end_lineno": 122,
"func_code": " def run(self, documents: List[Document], top_p: Optional[float] = None):\n \"\"\"\n Filters documents using top-p sampling based on their scores.\n\n If the specified top_p results in no documents being selected (especially in cases of a low top_p value), the\n method returns the document with the highest score.\n\n :param documents: List of Document objects to be filtered.\n :param top_p: If specified, a float to override the cumulative probability threshold set during initialization.\n\n :returns: A dictionary with the following key:\n - `documents`: List of Document objects that have been selected based on the top-p sampling.\n :raises ValueError: If the top_p value is not within the range [0, 1].\n \"\"\"\n if not documents:\n return {\"documents\": []}\n\n top_p = top_p or self.top_p\n if not 0 <= top_p <= 1:\n raise ValueError(f\"top_p must be between 0 and 1. Got {top_p}.\")\n\n documents_with_scores, scores = self._get_documents_and_scores(documents)\n if len(documents_with_scores) == 0:\n logger.warning(\"No documents with scores found. Returning the original documents.\")\n return {\"documents\": documents}\n\n sorted_docs_with_scores = sorted(zip(documents_with_scores, scores), key=lambda x: x[1], reverse=True)\n sorted_documents, sorted_scores = [list(t) for t in zip(*sorted_docs_with_scores)]\n\n tensor_scores = torch.tensor(sorted_scores, dtype=torch.float32)\n probs = torch.nn.functional.softmax(tensor_scores, dim=-1)\n cumulative_probs = torch.cumsum(probs, dim=-1)\n\n # Check if the cumulative probabilities are close to top_p with a 1e-6 tolerance\n close_to_top_p = torch.isclose(cumulative_probs, torch.tensor(top_p, device=cumulative_probs.device), atol=1e-6)\n\n # Combine the close_to_top_p with original condition using logical OR\n condition = (cumulative_probs <= top_p) | close_to_top_p\n\n # Find the indices with cumulative probabilities that exceed top_p\n top_p_indices = torch.where(torch.BoolTensor(condition))[0]\n\n # Map the selected indices back to their original indices\n selected_docs = [sorted_documents[i.item()] for i in top_p_indices]\n\n if self.min_top_k and len(selected_docs) < self.min_top_k:\n selected_docs = sorted_documents[: self.min_top_k]\n\n # If low p resulted in no documents being selected, then return at least one document\n if len(selected_docs) == 0:\n logger.warning(\n \"Top-p sampling with p={top_p} resulted in no documents being selected. \"\n \"Returning the document with the highest score.\",\n top_p=top_p,\n )\n selected_docs = [sorted_documents[0]]\n\n return {\"documents\": selected_docs}"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.components.samplers.top_p.TopPSampler._get_documents_and_scores",
"haystack.components.samplers.top_p.TopPSampler.run"
] |
Python
| 1 | 2 |
{
"total_num": 11,
"base_passed_num": 3
}
|
[
"haystack.haystack.core.type_utils._strict_types_are_compatible",
"haystack.haystack.core.type_utils._types_are_compatible",
"haystack.haystack.components.tools.tool_invoker.ToolInvoker::to_dict",
"haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict",
"haystack.haystack.core.serialization.component_to_dict"
] |
haystack
|
[
"haystack/core/type_utils.py",
"haystack/core/type_utils.py",
"haystack/components/tools/tool_invoker.py",
"haystack/components/generators/chat/openai.py",
"haystack/core/serialization.py"
] |
[
"test/components/tools/test_tool_invoker.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 29,
"func_end_lineno": 76,
"func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))"
},
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 14,
"func_end_lineno": 26,
"func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True"
},
{
"class_start_lineno": 38,
"class_end_lineno": 242,
"func_start_lineno": 216,
"func_end_lineno": 229,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_tools = [tool.to_dict() for tool in self.tools]\n return default_to_dict(\n self,\n tools=serialized_tools,\n raise_on_failure=self.raise_on_failure,\n convert_result_to_json_string=self.convert_result_to_json_string,\n )"
},
{
"class_start_lineno": 32,
"class_end_lineno": 571,
"func_start_lineno": 170,
"func_end_lineno": 190,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.core.type_utils._strict_types_are_compatible",
"haystack.core.type_utils._types_are_compatible",
"haystack.components.tools.tool_invoker.ToolInvoker.to_dict",
"haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict",
"haystack.core.serialization.component_to_dict"
] |
Python
| 4 | 5 |
{
"total_num": 16,
"base_passed_num": 13
}
|
[
"haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::write_documents",
"haystack.haystack.components.writers.document_writer.DocumentWriter::run"
] |
haystack
|
[
"haystack/document_stores/in_memory/document_store.py",
"haystack/components/writers/document_writer.py"
] |
[
"test/components/writers/test_document_writer.py"
] |
[
{
"class_start_lineno": 58,
"class_end_lineno": 738,
"func_start_lineno": 432,
"func_end_lineno": 473,
"func_code": " def write_documents(self, documents: List[Document], policy: DuplicatePolicy = DuplicatePolicy.NONE) -> int:\n \"\"\"\n Refer to the DocumentStore.write_documents() protocol documentation.\n\n If `policy` is set to `DuplicatePolicy.NONE` defaults to `DuplicatePolicy.FAIL`.\n \"\"\"\n if (\n not isinstance(documents, Iterable)\n or isinstance(documents, str)\n or any(not isinstance(doc, Document) for doc in documents)\n ):\n raise ValueError(\"Please provide a list of Documents.\")\n\n if policy == DuplicatePolicy.NONE:\n policy = DuplicatePolicy.FAIL\n\n written_documents = len(documents)\n for document in documents:\n if policy != DuplicatePolicy.OVERWRITE and document.id in self.storage.keys():\n if policy == DuplicatePolicy.FAIL:\n raise DuplicateDocumentError(f\"ID '{document.id}' already exists.\")\n if policy == DuplicatePolicy.SKIP:\n logger.warning(\"ID '{document_id}' already exists\", document_id=document.id)\n written_documents -= 1\n continue\n\n # Since the statistics are updated in an incremental manner,\n # we need to explicitly remove the existing document to revert\n # the statistics before updating them with the new document.\n if document.id in self.storage.keys():\n self.delete_documents([document.id])\n\n tokens = []\n if document.content is not None:\n tokens = self._tokenize_bm25(document.content)\n\n self.storage[document.id] = document\n\n self._bm25_attr[document.id] = BM25DocumentStats(Counter(tokens), len(tokens))\n self._freq_vocab_for_idf.update(set(tokens))\n self._avg_doc_len = (len(tokens) + self._avg_doc_len * len(self._bm25_attr)) / (len(self._bm25_attr) + 1)\n return written_documents"
},
{
"class_start_lineno": 15,
"class_end_lineno": 134,
"func_start_lineno": 85,
"func_end_lineno": 103,
"func_code": " def run(self, documents: List[Document], policy: Optional[DuplicatePolicy] = None):\n \"\"\"\n Run the DocumentWriter on the given input data.\n\n :param documents:\n A list of documents to write to the document store.\n :param policy:\n The policy to use when encountering duplicate documents.\n :returns:\n Number of documents written to the document store.\n\n :raises ValueError:\n If the specified document store is not found.\n \"\"\"\n if policy is None:\n policy = self.policy\n\n documents_written = self.document_store.write_documents(documents=documents, policy=policy)\n return {\"documents_written\": documents_written}"
}
] |
[
"function_empty"
] |
[
"haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.write_documents",
"haystack.components.writers.document_writer.DocumentWriter.run"
] |
Python
| 2 | 2 |
{
"total_num": 11,
"base_passed_num": 7
}
|
[
"haystack.haystack.evaluation.eval_run_result.EvaluationRunResult::detailed_report",
"haystack.haystack.evaluation.eval_run_result.EvaluationRunResult::comparative_detailed_report"
] |
haystack
|
[
"haystack/evaluation/eval_run_result.py",
"haystack/evaluation/eval_run_result.py"
] |
[
"test/evaluation/test_eval_run_result.py"
] |
[
{
"class_start_lineno": 16,
"class_end_lineno": 222,
"func_start_lineno": 138,
"func_end_lineno": 162,
"func_code": " def detailed_report(\n self, output_format: Literal[\"json\", \"csv\", \"df\"] = \"json\", csv_file: Optional[str] = None\n ) -> Union[Dict[str, List[Any]], \"DataFrame\", str]:\n \"\"\"\n Generates a report with detailed scores for each metric.\n\n :param output_format: The output format for the report, \"json\", \"csv\", or \"df\", default to \"json\".\n :param csv_file: Filepath to save CSV output if `output_format` is \"csv\", must be provided.\n\n :returns:\n JSON or DataFrame with the detailed scores, in case the output is set to a CSV file, a message confirming\n the successful write or an error message.\n \"\"\"\n\n combined_data = {col: self.inputs[col] for col in self.inputs}\n\n # enforce columns type consistency\n scores_columns = list(self.results.keys())\n for col in scores_columns:\n col_values = self.results[col][\"individual_scores\"]\n if any(isinstance(v, float) for v in col_values):\n col_values = [float(v) for v in col_values]\n combined_data[col] = col_values\n\n return self._handle_output(combined_data, output_format, csv_file)"
},
{
"class_start_lineno": 16,
"class_end_lineno": 222,
"func_start_lineno": 164,
"func_end_lineno": 222,
"func_code": " def comparative_detailed_report(\n self,\n other: \"EvaluationRunResult\",\n keep_columns: Optional[List[str]] = None,\n output_format: Literal[\"json\", \"csv\", \"df\"] = \"json\",\n csv_file: Optional[str] = None,\n ) -> Union[str, \"DataFrame\", None]:\n \"\"\"\n Generates a report with detailed scores for each metric from two evaluation runs for comparison.\n\n :param other: Results of another evaluation run to compare with.\n :param keep_columns: List of common column names to keep from the inputs of the evaluation runs to compare.\n :param output_format: The output format for the report, \"json\", \"csv\", or \"df\", default to \"json\".\n :param csv_file: Filepath to save CSV output if `output_format` is \"csv\", must be provided.\n\n :returns:\n JSON or DataFrame with a comparison of the detailed scores, in case the output is set to a CSV file,\n a message confirming the successful write or an error message.\n \"\"\"\n\n if not isinstance(other, EvaluationRunResult):\n raise ValueError(\"Comparative scores can only be computed between EvaluationRunResults.\")\n\n if not hasattr(other, \"run_name\") or not hasattr(other, \"inputs\") or not hasattr(other, \"results\"):\n raise ValueError(\"The 'other' parameter must have 'run_name', 'inputs', and 'results' attributes.\")\n\n if self.run_name == other.run_name:\n warn(f\"The run names of the two evaluation results are the same ('{self.run_name}')\")\n\n if self.inputs.keys() != other.inputs.keys():\n warn(f\"The input columns differ between the results; using the input columns of '{self.run_name}'.\")\n\n # got both detailed reports\n detailed_a = self.detailed_report(output_format=\"json\")\n detailed_b = other.detailed_report(output_format=\"json\")\n\n # ensure both detailed reports are in dictionaries format\n if not isinstance(detailed_a, dict) or not isinstance(detailed_b, dict):\n raise ValueError(\"Detailed reports must be dictionaries.\")\n\n # determine which columns to ignore\n if keep_columns is None:\n ignore = list(self.inputs.keys())\n else:\n ignore = [col for col in list(self.inputs.keys()) if col not in keep_columns]\n\n # filter out ignored columns from pipe_b_dict\n filtered_detailed_b = {\n f\"{other.run_name}_{key}\": value for key, value in detailed_b.items() if key not in ignore\n }\n\n # rename columns in pipe_a_dict based on ignore list\n renamed_detailed_a = {\n (key if key in ignore else f\"{self.run_name}_{key}\"): value for key, value in detailed_a.items()\n }\n\n # combine both detailed reports\n combined_results = {**renamed_detailed_a, **filtered_detailed_b}\n return self._handle_output(combined_results, output_format, csv_file)"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.evaluation.eval_run_result.EvaluationRunResult.detailed_report",
"haystack.evaluation.eval_run_result.EvaluationRunResult.comparative_detailed_report"
] |
Python
| 1 | 2 |
{
"total_num": 4,
"base_passed_num": 2
}
|
[
"haystack.haystack.core.type_utils._strict_types_are_compatible",
"haystack.haystack.core.type_utils._types_are_compatible",
"haystack.haystack.components.websearch.serper_dev.SerperDevWebSearch::to_dict",
"haystack.haystack.components.tools.tool_invoker.ToolInvoker::to_dict",
"haystack.haystack.core.serialization.component_to_dict"
] |
haystack
|
[
"haystack/core/type_utils.py",
"haystack/core/type_utils.py",
"haystack/components/websearch/serper_dev.py",
"haystack/components/tools/tool_invoker.py",
"haystack/core/serialization.py"
] |
[
"test/tools/test_component_tool.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 29,
"func_end_lineno": 76,
"func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))"
},
{
"class_start_lineno": 1,
"class_end_lineno": 105,
"func_start_lineno": 14,
"func_end_lineno": 26,
"func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True"
},
{
"class_start_lineno": 23,
"class_end_lineno": 175,
"func_start_lineno": 67,
"func_end_lineno": 80,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n top_k=self.top_k,\n allowed_domains=self.allowed_domains,\n search_params=self.search_params,\n api_key=self.api_key.to_dict(),\n )"
},
{
"class_start_lineno": 38,
"class_end_lineno": 242,
"func_start_lineno": 216,
"func_end_lineno": 229,
"func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_tools = [tool.to_dict() for tool in self.tools]\n return default_to_dict(\n self,\n tools=serialized_tools,\n raise_on_failure=self.raise_on_failure,\n convert_result_to_json_string=self.convert_result_to_json_string,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 36,
"func_end_lineno": 82,
"func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.core.type_utils._strict_types_are_compatible",
"haystack.core.type_utils._types_are_compatible",
"haystack.components.websearch.serper_dev.SerperDevWebSearch.to_dict",
"haystack.components.tools.tool_invoker.ToolInvoker.to_dict",
"haystack.core.serialization.component_to_dict"
] |
Python
| 4 | 5 |
{
"total_num": 10,
"base_passed_num": 8
}
|
[
"haystack.haystack.tools.from_function.create_tool_from_function",
"haystack.haystack.tools.from_function.tool"
] |
haystack
|
[
"haystack/tools/from_function.py",
"haystack/tools/from_function.py"
] |
[
"test/tools/test_from_function.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 166,
"func_start_lineno": 14,
"func_end_lineno": 112,
"func_code": "def create_tool_from_function(\n function: Callable, name: Optional[str] = None, description: Optional[str] = None\n) -> \"Tool\":\n \"\"\"\n Create a Tool instance from a function.\n\n Allows customizing the Tool name and description.\n For simpler use cases, consider using the `@tool` decorator.\n\n ### Usage example\n\n ```python\n from typing import Annotated, Literal\n from haystack.tools import create_tool_from_function\n\n def get_weather(\n city: Annotated[str, \"the city for which to get the weather\"] = \"Munich\",\n unit: Annotated[Literal[\"Celsius\", \"Fahrenheit\"], \"the unit for the temperature\"] = \"Celsius\"):\n '''A simple function to get the current weather for a location.'''\n return f\"Weather report for {city}: 20 {unit}, sunny\"\n\n tool = create_tool_from_function(get_weather)\n\n print(tool)\n >>> Tool(name='get_weather', description='A simple function to get the current weather for a location.',\n >>> parameters={\n >>> 'type': 'object',\n >>> 'properties': {\n >>> 'city': {'type': 'string', 'description': 'the city for which to get the weather', 'default': 'Munich'},\n >>> 'unit': {\n >>> 'type': 'string',\n >>> 'enum': ['Celsius', 'Fahrenheit'],\n >>> 'description': 'the unit for the temperature',\n >>> 'default': 'Celsius',\n >>> },\n >>> }\n >>> },\n >>> function=<function get_weather at 0x7f7b3a8a9b80>)\n ```\n\n :param function:\n The function to be converted into a Tool.\n The function must include type hints for all parameters.\n The function is expected to have basic python input types (str, int, float, bool, list, dict, tuple).\n Other input types may work but are not guaranteed.\n If a parameter is annotated using `typing.Annotated`, its metadata will be used as parameter description.\n :param name:\n The name of the Tool. If not provided, the name of the function will be used.\n :param description:\n The description of the Tool. If not provided, the docstring of the function will be used.\n To intentionally leave the description empty, pass an empty string.\n\n :returns:\n The Tool created from the function.\n\n :raises ValueError:\n If any parameter of the function lacks a type hint.\n :raises SchemaGenerationError:\n If there is an error generating the JSON schema for the Tool.\n \"\"\"\n\n tool_description = description if description is not None else (function.__doc__ or \"\")\n\n signature = inspect.signature(function)\n\n # collect fields (types and defaults) and descriptions from function parameters\n fields: Dict[str, Any] = {}\n descriptions = {}\n\n for param_name, param in signature.parameters.items():\n if param.annotation is param.empty:\n raise ValueError(f\"Function '{function.__name__}': parameter '{param_name}' does not have a type hint.\")\n\n # if the parameter has not a default value, Pydantic requires an Ellipsis (...)\n # to explicitly indicate that the parameter is required\n default = param.default if param.default is not param.empty else ...\n fields[param_name] = (param.annotation, default)\n\n if hasattr(param.annotation, \"__metadata__\"):\n descriptions[param_name] = param.annotation.__metadata__[0]\n\n # create Pydantic model and generate JSON schema\n try:\n model = create_model(function.__name__, **fields)\n schema = model.model_json_schema()\n except Exception as e:\n raise SchemaGenerationError(f\"Failed to create JSON schema for function '{function.__name__}'\") from e\n\n # we don't want to include title keywords in the schema, as they contain redundant information\n # there is no programmatic way to prevent Pydantic from adding them, so we remove them later\n # see https://github.com/pydantic/pydantic/discussions/8504\n _remove_title_from_schema(schema)\n\n # add parameters descriptions to the schema\n for param_name, param_description in descriptions.items():\n if param_name in schema[\"properties\"]:\n schema[\"properties\"][param_name][\"description\"] = param_description\n\n return Tool(name=name or function.__name__, description=tool_description, parameters=schema, function=function)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 166,
"func_start_lineno": 115,
"func_end_lineno": 151,
"func_code": "def tool(function: Callable) -> Tool:\n \"\"\"\n Decorator to convert a function into a Tool.\n\n Tool name, description, and parameters are inferred from the function.\n If you need to customize more the Tool, use `create_tool_from_function` instead.\n\n ### Usage example\n ```python\n from typing import Annotated, Literal\n from haystack.tools import tool\n\n @tool\n def get_weather(\n city: Annotated[str, \"the city for which to get the weather\"] = \"Munich\",\n unit: Annotated[Literal[\"Celsius\", \"Fahrenheit\"], \"the unit for the temperature\"] = \"Celsius\"):\n '''A simple function to get the current weather for a location.'''\n return f\"Weather report for {city}: 20 {unit}, sunny\"\n\n print(get_weather)\n >>> Tool(name='get_weather', description='A simple function to get the current weather for a location.',\n >>> parameters={\n >>> 'type': 'object',\n >>> 'properties': {\n >>> 'city': {'type': 'string', 'description': 'the city for which to get the weather', 'default': 'Munich'},\n >>> 'unit': {\n >>> 'type': 'string',\n >>> 'enum': ['Celsius', 'Fahrenheit'],\n >>> 'description': 'the unit for the temperature',\n >>> 'default': 'Celsius',\n >>> },\n >>> }\n >>> },\n >>> function=<function get_weather at 0x7f7b3a8a9b80>)\n ```\n \"\"\"\n return create_tool_from_function(function)"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.tools.from_function.create_tool_from_function",
"haystack.tools.from_function.tool"
] |
Python
| 1 | 2 |
{
"total_num": 12,
"base_passed_num": 3
}
|
[
"haystack.haystack.core.serialization.import_class_by_name",
"haystack.haystack.tools.tool.deserialize_tools_inplace"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/tools/tool.py"
] |
[
"test/tools/test_tool.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 243,
"func_end_lineno": 264,
"func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error"
},
{
"class_start_lineno": 1,
"class_end_lineno": 136,
"func_start_lineno": 106,
"func_end_lineno": 136,
"func_code": "def deserialize_tools_inplace(data: Dict[str, Any], key: str = \"tools\"):\n \"\"\"\n Deserialize Tools in a dictionary inplace.\n\n :param data:\n The dictionary with the serialized data.\n :param key:\n The key in the dictionary where the Tools are stored.\n \"\"\"\n if key in data:\n serialized_tools = data[key]\n\n if serialized_tools is None:\n return\n\n if not isinstance(serialized_tools, list):\n raise TypeError(f\"The value of '{key}' is not a list\")\n\n deserialized_tools = []\n for tool in serialized_tools:\n if not isinstance(tool, dict):\n raise TypeError(f\"Serialized tool '{tool}' is not a dictionary\")\n\n # different classes are allowed: Tool, ComponentTool, etc.\n tool_class = import_class_by_name(tool[\"type\"])\n if not issubclass(tool_class, Tool):\n raise TypeError(f\"Class '{tool_class}' is not a subclass of Tool\")\n\n deserialized_tools.append(tool_class.from_dict(tool))\n\n data[key] = deserialized_tools"
}
] |
[
"Development"
] |
[
"haystack.core.serialization.import_class_by_name",
"haystack.tools.tool.deserialize_tools_inplace"
] |
Python
| 0 | 2 |
{
"total_num": 10,
"base_passed_num": 7
}
|
[
"haystack.haystack.dataclasses.document.Document::to_dict",
"haystack.haystack.tracing.utils.coerce_tag_value"
] |
haystack
|
[
"haystack/dataclasses/document.py",
"haystack/tracing/utils.py",
"haystack/tracing/utils.py"
] |
[
"test/tracing/test_utils.py"
] |
[
{
"class_start_lineno": 49,
"class_end_lineno": 186,
"func_start_lineno": 123,
"func_end_lineno": 140,
"func_code": " def to_dict(self, flatten=True) -> Dict[str, Any]:\n \"\"\"\n Converts Document into a dictionary.\n\n `blob` field is converted to a JSON-serializable type.\n\n :param flatten:\n Whether to flatten `meta` field or not. Defaults to `True` to be backward-compatible with Haystack 1.x.\n \"\"\"\n data = asdict(self)\n if (blob := data.get(\"blob\")) is not None:\n data[\"blob\"] = {\"data\": list(blob[\"data\"]), \"mime_type\": blob[\"mime_type\"]}\n\n if flatten:\n meta = data.pop(\"meta\")\n return {**data, **meta}\n\n return data"
},
{
"class_start_lineno": 1,
"class_end_lineno": 52,
"func_start_lineno": 42,
"func_end_lineno": 52,
"func_code": "def _serializable_value(value: Any) -> Any:\n if isinstance(value, list):\n return [_serializable_value(v) for v in value]\n\n if isinstance(value, dict):\n return {k: _serializable_value(v) for k, v in value.items()}\n\n if getattr(value, \"to_dict\", None):\n return _serializable_value(value.to_dict())\n\n return value"
},
{
"class_start_lineno": 1,
"class_end_lineno": 52,
"func_start_lineno": 15,
"func_end_lineno": 39,
"func_code": "def coerce_tag_value(value: Any) -> Union[bool, str, int, float]:\n \"\"\"\n Coerces span tag values to compatible types for the tracing backend.\n\n Most tracing libraries don't support sending complex types to the backend. Hence, we need to convert them to\n compatible types.\n\n :param value: an arbitrary value which should be coerced to a compatible type\n :return: the value coerced to a compatible type\n \"\"\"\n if isinstance(value, PRIMITIVE_TYPES):\n return value\n\n if value is None:\n return \"\"\n\n try:\n # do that with-in try-except because who knows what kind of objects are being passed\n serializable = _serializable_value(value)\n return json.dumps(serializable)\n except Exception as error:\n logger.debug(\"Failed to coerce tag value to string: {error}\", error=error)\n\n # Our last resort is to convert the value to a string\n return str(value)"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.dataclasses.document.Document.to_dict",
"haystack.tracing.utils._serializable_value",
"haystack.tracing.utils.coerce_tag_value"
] |
Python
| 1 | 2 |
{
"total_num": 11,
"base_passed_num": 5
}
|
[
"haystack.haystack.core.serialization.import_class_by_name",
"haystack.haystack.utils.base_serialization.deserialize_class_instance"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/utils/base_serialization.py"
] |
[
"test/utils/test_base_serialization.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 243,
"func_end_lineno": 264,
"func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error"
},
{
"class_start_lineno": 1,
"class_end_lineno": 54,
"func_start_lineno": 29,
"func_end_lineno": 54,
"func_code": "def deserialize_class_instance(data: Dict[str, Any]) -> Any:\n \"\"\"\n Deserializes an object from a dictionary representation generated by `auto_serialize_class_instance`.\n\n :param data:\n The dictionary to deserialize from.\n :returns:\n The deserialized object.\n :raises DeserializationError:\n If the serialization data is malformed, the class type cannot be imported, or the\n class does not have a `from_dict` method.\n \"\"\"\n if \"type\" not in data:\n raise DeserializationError(\"Missing 'type' in serialization data\")\n if \"data\" not in data:\n raise DeserializationError(\"Missing 'data' in serialization data\")\n\n try:\n obj_class = import_class_by_name(data[\"type\"])\n except ImportError as e:\n raise DeserializationError(f\"Class '{data['type']}' not correctly imported\") from e\n\n if not hasattr(obj_class, \"from_dict\"):\n raise DeserializationError(f\"Class '{data['type']}' does not have a 'from_dict' method\")\n\n return obj_class.from_dict(data[\"data\"])"
}
] |
[
"Development"
] |
[
"haystack.core.serialization.import_class_by_name",
"haystack.utils.base_serialization.deserialize_class_instance"
] |
Python
| 0 | 2 |
{
"total_num": 4,
"base_passed_num": 2
}
|
[
"haystack.haystack.utils.type_serialization.thread_safe_import",
"haystack.haystack.utils.callable_serialization.deserialize_callable"
] |
haystack
|
[
"haystack/utils/type_serialization.py",
"haystack/utils/callable_serialization.py"
] |
[
"test/utils/test_callable_serialization.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 170,
"func_start_lineno": 159,
"func_end_lineno": 170,
"func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 80,
"func_start_lineno": 45,
"func_end_lineno": 80,
"func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")"
}
] |
[
"function_empty",
"Development"
] |
[
"haystack.utils.type_serialization.thread_safe_import",
"haystack.utils.callable_serialization.deserialize_callable"
] |
Python
| 1 | 2 |
{
"total_num": 11,
"base_passed_num": 5
}
|
[
"haystack.haystack.utils.device.ComponentDevice::to_hf",
"haystack.haystack.utils.device.ComponentDevice::update_hf_kwargs"
] |
haystack
|
[
"haystack/utils/device.py",
"haystack/utils/device.py"
] |
[
"test/utils/test_device.py"
] |
[
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 359,
"func_end_lineno": 379,
"func_code": " def to_hf(self) -> Union[Union[int, str], Dict[str, Union[int, str]]]:\n \"\"\"\n Convert the component device representation to HuggingFace format.\n\n :returns:\n The HuggingFace device representation.\n \"\"\"\n self._validate()\n\n def convert_device(device: Device, *, gpu_id_only: bool = False) -> Union[int, str]:\n if gpu_id_only and device.type == DeviceType.GPU:\n assert device.id is not None\n return device.id\n else:\n return str(device)\n\n if self._single_device is not None:\n return convert_device(self._single_device)\n\n assert self._multiple_devices is not None\n return {key: convert_device(device, gpu_id_only=True) for key, device in self._multiple_devices.mapping.items()}"
},
{
"class_start_lineno": 240,
"class_end_lineno": 480,
"func_start_lineno": 381,
"func_end_lineno": 402,
"func_code": " def update_hf_kwargs(self, hf_kwargs: Dict[str, Any], *, overwrite: bool) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to HuggingFace format.\n\n Add them as canonical keyword arguments to the keyword arguments dictionary.\n\n :param hf_kwargs:\n The HuggingFace keyword arguments dictionary.\n :param overwrite:\n Whether to overwrite existing device arguments.\n :returns:\n The HuggingFace keyword arguments dictionary.\n \"\"\"\n self._validate()\n\n if not overwrite and any(x in hf_kwargs for x in (\"device\", \"device_map\")):\n return hf_kwargs\n\n converted = self.to_hf()\n key = \"device_map\" if self.has_multiple_devices else \"device\"\n hf_kwargs[key] = converted\n return hf_kwargs"
}
] |
[
"function_empty"
] |
[
"haystack.utils.device.ComponentDevice.to_hf",
"haystack.utils.device.ComponentDevice.update_hf_kwargs"
] |
Python
| 2 | 2 |
{
"total_num": 7,
"base_passed_num": 4
}
|
[
"haystack.haystack.core.serialization.import_class_by_name",
"haystack.haystack.utils.docstore_deserialization.deserialize_document_store_in_init_params_inplace"
] |
haystack
|
[
"haystack/core/serialization.py",
"haystack/utils/docstore_deserialization.py"
] |
[
"test/utils/test_docstore_deserialization.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 264,
"func_start_lineno": 243,
"func_end_lineno": 264,
"func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error"
},
{
"class_start_lineno": 1,
"class_end_lineno": 39,
"func_start_lineno": 11,
"func_end_lineno": 39,
"func_code": "def deserialize_document_store_in_init_params_inplace(data: Dict[str, Any], key: str = \"document_store\"):\n \"\"\"\n Deserializes a generic document store from the init_parameters of a serialized component in place.\n\n :param data:\n The dictionary to deserialize from.\n :param key:\n The key in the `data[\"init_parameters\"]` dictionary where the document store is specified.\n :returns:\n The dictionary, with the document store deserialized.\n\n :raises DeserializationError:\n If the document store is not properly specified in the serialization data or its type cannot be imported.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if key not in init_params:\n raise DeserializationError(f\"Missing '{key}' in serialization data\")\n if \"type\" not in init_params[key]:\n raise DeserializationError(f\"Missing 'type' in {key} serialization data\")\n\n doc_store_data = data[\"init_parameters\"][key]\n try:\n doc_store_class = import_class_by_name(doc_store_data[\"type\"])\n except ImportError as e:\n raise DeserializationError(f\"Class '{doc_store_data['type']}' not correctly imported\") from e\n if hasattr(doc_store_class, \"from_dict\"):\n data[\"init_parameters\"][key] = doc_store_class.from_dict(doc_store_data)\n else:\n data[\"init_parameters\"][key] = default_from_dict(doc_store_class, doc_store_data)"
}
] |
[
"Development"
] |
[
"haystack.core.serialization.import_class_by_name",
"haystack.utils.docstore_deserialization.deserialize_document_store_in_init_params_inplace"
] |
Python
| 0 | 2 |
{
"total_num": 6,
"base_passed_num": 0
}
|
[
"inference.inference.core.utils.image_utils.validate_numpy_image",
"inference.inference.core.utils.image_utils.load_image_with_inferred_type"
] |
inference
|
[
"inference/core/utils/image_utils.py",
"inference/core/utils/image_utils.py"
] |
[
"tests/inference/unit_tests/core/active_learning/test_middlewares.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 353,
"func_end_lineno": 377,
"func_code": "def validate_numpy_image(data: np.ndarray) -> None:\n \"\"\"\n Validate if the provided data is a valid numpy image.\n\n Args:\n data (np.ndarray): The numpy array representing an image.\n\n Raises:\n InvalidNumpyInput: If the provided data is not a valid numpy image.\n \"\"\"\n if not issubclass(type(data), np.ndarray):\n raise InvalidNumpyInput(\n message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n public_message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n )\n if len(data.shape) != 3 and len(data.shape) != 2:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 2 or 3 dimensions, got {len(data.shape)} dimensions.\",\n public_message=f\"For image given as np.ndarray expected 2 or 3 dimensions.\",\n )\n if data.shape[-1] != 3 and data.shape[-1] != 1:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 1 or 3 channels, got {data.shape[-1]} channels.\",\n public_message=\"For image given as np.ndarray expected 1 or 3 channels.\",\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 180,
"func_end_lineno": 212,
"func_code": "def load_image_with_inferred_type(\n value: Any,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"Load an image by inferring its type.\n\n Args:\n value (Any): The image data.\n cv_imread_flags (int): Flags used for OpenCV's imread function.\n\n Returns:\n Tuple[np.ndarray, bool]: Loaded image as a numpy array and a boolean indicating if the image is in BGR format.\n\n Raises:\n NotImplementedError: If the image type could not be inferred.\n \"\"\"\n if isinstance(value, (np.ndarray, np.generic)):\n validate_numpy_image(data=value)\n return value, True\n elif isinstance(value, Image.Image):\n return np.asarray(value.convert(\"RGB\")), False\n elif isinstance(value, str) and (value.startswith(\"http\")):\n return load_image_from_url(value=value, cv_imread_flags=cv_imread_flags), True\n elif (\n isinstance(value, str)\n and ALLOW_LOADING_IMAGES_FROM_LOCAL_FILESYSTEM\n and os.path.isfile(value)\n ):\n return cv2.imread(value, cv_imread_flags), True\n else:\n return attempt_loading_image_from_string(\n value=value, cv_imread_flags=cv_imread_flags\n )"
}
] |
[
"function_empty"
] |
[
"inference.core.utils.image_utils.validate_numpy_image",
"inference.core.utils.image_utils.load_image_with_inferred_type"
] |
Python
| 2 | 2 |
{
"total_num": 7,
"base_passed_num": 4
}
|
[
"inference.inference.core.active_learning.samplers.close_to_threshold.count_detections_close_to_threshold",
"inference.inference.core.active_learning.samplers.close_to_threshold.prediction_is_close_to_threshold"
] |
inference
|
[
"inference/core/active_learning/samplers/close_to_threshold.py",
"inference/core/active_learning/samplers/close_to_threshold.py",
"inference/core/active_learning/samplers/close_to_threshold.py"
] |
[
"tests/inference/unit_tests/core/active_learning/samplers/test_close_to_threshold.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 227,
"func_start_lineno": 200,
"func_end_lineno": 217,
"func_code": "def count_detections_close_to_threshold(\n prediction: Prediction,\n selected_class_names: Optional[Set[str]],\n threshold: float,\n epsilon: float,\n) -> int:\n counter = 0\n for prediction_details in prediction[\"predictions\"]:\n if class_to_be_excluded(\n class_name=prediction_details[\"class\"],\n selected_class_names=selected_class_names,\n ):\n continue\n if is_close_to_threshold(\n value=prediction_details[\"confidence\"], threshold=threshold, epsilon=epsilon\n ):\n counter += 1\n return counter"
},
{
"class_start_lineno": 1,
"class_end_lineno": 227,
"func_start_lineno": 184,
"func_end_lineno": 197,
"func_code": "def detections_are_close_to_threshold(\n prediction: Prediction,\n selected_class_names: Optional[Set[str]],\n threshold: float,\n epsilon: float,\n minimum_objects_close_to_threshold: int,\n) -> bool:\n detections_close_to_threshold = count_detections_close_to_threshold(\n prediction=prediction,\n selected_class_names=selected_class_names,\n threshold=threshold,\n epsilon=epsilon,\n )\n return detections_close_to_threshold >= minimum_objects_close_to_threshold"
},
{
"class_start_lineno": 1,
"class_end_lineno": 227,
"func_start_lineno": 90,
"func_end_lineno": 116,
"func_code": "def prediction_is_close_to_threshold(\n prediction: Prediction,\n prediction_type: PredictionType,\n selected_class_names: Optional[Set[str]],\n threshold: float,\n epsilon: float,\n only_top_classes: bool,\n minimum_objects_close_to_threshold: int,\n) -> bool:\n if CLASSIFICATION_TASK not in prediction_type:\n return detections_are_close_to_threshold(\n prediction=prediction,\n selected_class_names=selected_class_names,\n threshold=threshold,\n epsilon=epsilon,\n minimum_objects_close_to_threshold=minimum_objects_close_to_threshold,\n )\n checker = multi_label_classification_prediction_is_close_to_threshold\n if \"top\" in prediction:\n checker = multi_class_classification_prediction_is_close_to_threshold\n return checker(\n prediction=prediction,\n selected_class_names=selected_class_names,\n threshold=threshold,\n epsilon=epsilon,\n only_top_classes=only_top_classes,\n )"
}
] |
[
"Development"
] |
[
"inference.core.active_learning.samplers.close_to_threshold.count_detections_close_to_threshold",
"inference.core.active_learning.samplers.close_to_threshold.detections_are_close_to_threshold",
"inference.core.active_learning.samplers.close_to_threshold.prediction_is_close_to_threshold"
] |
Python
| 0 | 2 |
{
"total_num": 52,
"base_passed_num": 36
}
|
[
"inference.inference.core.interfaces.camera.video_source.VideoSource::read_frame",
"inference.inference.core.interfaces.camera.video_source.VideoSource::__next__",
"inference.inference.core.interfaces.camera.utils.get_video_frames_generator"
] |
inference
|
[
"inference/core/interfaces/camera/video_source.py",
"inference/core/interfaces/camera/video_source.py",
"inference/core/interfaces/camera/utils.py",
"inference/core/interfaces/camera/utils.py"
] |
[
"tests/inference/unit_tests/core/interfaces/camera/test_utils.py"
] |
[
{
"class_start_lineno": 181,
"class_end_lineno": 738,
"func_start_lineno": 526,
"func_end_lineno": 555,
"func_code": " def read_frame(self, timeout: Optional[float] = None) -> Optional[VideoFrame]:\n \"\"\"\n Method to be used by the consumer to get decoded source frame.\n\n Returns: VideoFrame object with decoded frame and its metadata.\n Throws:\n * EndOfStreamError: when trying to get the frame from closed source.\n \"\"\"\n video_frame: Optional[Union[VideoFrame, str]] = get_from_queue(\n queue=self._frames_buffer,\n on_successful_read=self._video_consumer.notify_frame_consumed,\n timeout=timeout,\n purge=self._buffer_consumption_strategy is BufferConsumptionStrategy.EAGER,\n )\n if video_frame == POISON_PILL:\n raise EndOfStreamError(\n \"Attempted to retrieve frame from stream that already ended.\"\n )\n if video_frame is not None:\n send_video_source_status_update(\n severity=UpdateSeverity.DEBUG,\n event_type=FRAME_CONSUMED_EVENT,\n payload={\n \"frame_timestamp\": video_frame.frame_timestamp,\n \"frame_id\": video_frame.frame_id,\n \"source_id\": video_frame.source_id,\n },\n status_update_handlers=self._status_update_handlers,\n )\n return video_frame"
},
{
"class_start_lineno": 181,
"class_end_lineno": 738,
"func_start_lineno": 720,
"func_end_lineno": 738,
"func_code": " def __next__(self) -> VideoFrame:\n \"\"\"\n Method allowing to use `VideoSource` convenient to read frames\n\n Returns: VideoFrame\n\n Example:\n ```python\n source = VideoSource.init(video_reference=\"./some.mp4\")\n source.start()\n\n for frame in source:\n pass\n ```\n \"\"\"\n try:\n return self.read_frame()\n except EndOfStreamError:\n raise StopIteration()"
},
{
"class_start_lineno": 1,
"class_end_lineno": 516,
"func_start_lineno": 479,
"func_end_lineno": 494,
"func_code": "def limit_frame_rate(\n frames_generator: Iterable[T],\n max_fps: Union[float, int],\n strategy: FPSLimiterStrategy,\n) -> Generator[T, None, None]:\n rate_limiter = RateLimiter(desired_fps=max_fps)\n for frame_data in frames_generator:\n delay = rate_limiter.estimate_next_action_delay()\n if delay <= 0.0:\n rate_limiter.tick()\n yield frame_data\n continue\n if strategy is FPSLimiterStrategy.WAIT:\n time.sleep(delay)\n rate_limiter.tick()\n yield frame_data"
},
{
"class_start_lineno": 1,
"class_end_lineno": 516,
"func_start_lineno": 46,
"func_end_lineno": 97,
"func_code": "def get_video_frames_generator(\n video: Union[VideoSource, str, int],\n max_fps: Optional[Union[float, int]] = None,\n limiter_strategy: Optional[FPSLimiterStrategy] = None,\n) -> Generator[VideoFrame, None, None]:\n \"\"\"\n Util function to create a frames generator from `VideoSource` with possibility to\n limit FPS of consumed frames and dictate what to do if frames are produced to fast.\n\n Args:\n video (Union[VideoSource, str, int]): Either instance of VideoSource or video reference accepted\n by VideoSource.init(...)\n max_fps (Optional[Union[float, int]]): value of maximum FPS rate of generated frames - can be used to limit\n generation frequency\n limiter_strategy (Optional[FPSLimiterStrategy]): strategy used to deal with frames decoding exceeding\n limit of `max_fps`. By default - for files, in the interest of processing all frames -\n generation will be awaited, for streams - frames will be dropped on the floor.\n Returns: generator of `VideoFrame`\n\n Example:\n ```python\n from inference.core.interfaces.camera.utils import get_video_frames_generator\n\n for frame in get_video_frames_generator(\n video=\"./some.mp4\",\n max_fps=50,\n ):\n pass\n ```\n \"\"\"\n is_managed_source = False\n if issubclass(type(video), str) or issubclass(type(video), int):\n video = VideoSource.init(\n video_reference=video,\n )\n video.start()\n is_managed_source = True\n if max_fps is None:\n yield from video\n if is_managed_source:\n video.terminate(purge_frames_buffer=True)\n return None\n limiter_strategy = resolve_limiter_strategy(\n explicitly_defined_strategy=limiter_strategy,\n source_properties=video.describe_source().source_properties,\n )\n yield from limit_frame_rate(\n frames_generator=video, max_fps=max_fps, strategy=limiter_strategy\n )\n if is_managed_source:\n video.terminate(purge_frames_buffer=True)\n return None"
}
] |
[
"function_empty"
] |
[
"inference.core.interfaces.camera.video_source.VideoSource.read_frame",
"inference.core.interfaces.camera.video_source.VideoSource.__next__",
"inference.core.interfaces.camera.utils.limit_frame_rate",
"inference.core.interfaces.camera.utils.get_video_frames_generator"
] |
Python
| 3 | 3 |
{
"total_num": 42,
"base_passed_num": 0
}
|
[
"inference.inference.core.interfaces.camera.video_source.VideoSource::read_frame",
"inference.inference.core.interfaces.camera.video_source.VideoSource::__next__",
"inference.inference.core.interfaces.camera.video_source.send_video_source_status_update",
"inference.inference.core.interfaces.camera.video_source.VideoConsumer::_consume_stream_frame"
] |
inference
|
[
"inference/core/interfaces/camera/video_source.py",
"inference/core/interfaces/camera/video_source.py",
"inference/core/interfaces/camera/video_source.py",
"inference/core/interfaces/camera/video_source.py",
"inference/core/interfaces/camera/video_source.py"
] |
[
"tests/inference/unit_tests/core/interfaces/camera/test_video_source.py"
] |
[
{
"class_start_lineno": 181,
"class_end_lineno": 738,
"func_start_lineno": 526,
"func_end_lineno": 555,
"func_code": " def read_frame(self, timeout: Optional[float] = None) -> Optional[VideoFrame]:\n \"\"\"\n Method to be used by the consumer to get decoded source frame.\n\n Returns: VideoFrame object with decoded frame and its metadata.\n Throws:\n * EndOfStreamError: when trying to get the frame from closed source.\n \"\"\"\n video_frame: Optional[Union[VideoFrame, str]] = get_from_queue(\n queue=self._frames_buffer,\n on_successful_read=self._video_consumer.notify_frame_consumed,\n timeout=timeout,\n purge=self._buffer_consumption_strategy is BufferConsumptionStrategy.EAGER,\n )\n if video_frame == POISON_PILL:\n raise EndOfStreamError(\n \"Attempted to retrieve frame from stream that already ended.\"\n )\n if video_frame is not None:\n send_video_source_status_update(\n severity=UpdateSeverity.DEBUG,\n event_type=FRAME_CONSUMED_EVENT,\n payload={\n \"frame_timestamp\": video_frame.frame_timestamp,\n \"frame_id\": video_frame.frame_id,\n \"source_id\": video_frame.source_id,\n },\n status_update_handlers=self._status_update_handlers,\n )\n return video_frame"
},
{
"class_start_lineno": 181,
"class_end_lineno": 738,
"func_start_lineno": 720,
"func_end_lineno": 738,
"func_code": " def __next__(self) -> VideoFrame:\n \"\"\"\n Method allowing to use `VideoSource` convenient to read frames\n\n Returns: VideoFrame\n\n Example:\n ```python\n source = VideoSource.init(video_reference=\"./some.mp4\")\n source.start()\n\n for frame in source:\n pass\n ```\n \"\"\"\n try:\n return self.read_frame()\n except EndOfStreamError:\n raise StopIteration()"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1209,
"func_start_lineno": 1133,
"func_end_lineno": 1156,
"func_code": "def send_video_source_status_update(\n severity: UpdateSeverity,\n event_type: str,\n status_update_handlers: List[Callable[[StatusUpdate], None]],\n sub_context: Optional[str] = None,\n payload: Optional[dict] = None,\n) -> None:\n if payload is None:\n payload = {}\n context = VIDEO_SOURCE_CONTEXT\n if sub_context is not None:\n context = f\"{context}.{sub_context}\"\n status_update = StatusUpdate(\n timestamp=datetime.now(),\n severity=severity,\n event_type=event_type,\n payload=payload,\n context=context,\n )\n for handler in status_update_handlers:\n try:\n handler(status_update)\n except Exception as error:\n logger.warning(f\"Could not execute handler update. Cause: {error}\")"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1209,
"func_start_lineno": 1112,
"func_end_lineno": 1130,
"func_code": "def send_frame_drop_update(\n frame_timestamp: datetime,\n frame_id: int,\n cause: str,\n status_update_handlers: List[Callable[[StatusUpdate], None]],\n source_id: Optional[int],\n) -> None:\n send_video_source_status_update(\n severity=UpdateSeverity.DEBUG,\n event_type=FRAME_DROPPED_EVENT,\n payload={\n \"frame_timestamp\": frame_timestamp,\n \"frame_id\": frame_id,\n \"cause\": cause,\n \"source_id\": source_id,\n },\n status_update_handlers=status_update_handlers,\n sub_context=VIDEO_CONSUMER_CONTEXT,\n )"
},
{
"class_start_lineno": 741,
"class_end_lineno": 1061,
"func_start_lineno": 919,
"func_end_lineno": 985,
"func_code": " def _consume_stream_frame(\n self,\n video: VideoFrameProducer,\n declared_source_fps: Optional[float],\n measured_source_fps: Optional[float],\n is_source_video_file: Optional[bool],\n frame_timestamp: datetime,\n buffer: Queue,\n frames_buffering_allowed: bool,\n source_id: Optional[int],\n ) -> bool:\n \"\"\"\n Returns: boolean flag with success status\n \"\"\"\n if not frames_buffering_allowed:\n send_frame_drop_update(\n frame_timestamp=frame_timestamp,\n frame_id=self._frame_counter,\n cause=\"Buffering not allowed at the moment\",\n status_update_handlers=self._status_update_handlers,\n source_id=source_id,\n )\n return True\n if self._frame_should_be_adaptively_dropped(\n declared_source_fps=declared_source_fps\n ):\n self._adaptive_frames_dropped_in_row += 1\n send_frame_drop_update(\n frame_timestamp=frame_timestamp,\n frame_id=self._frame_counter,\n cause=\"ADAPTIVE strategy\",\n status_update_handlers=self._status_update_handlers,\n source_id=source_id,\n )\n return True\n self._adaptive_frames_dropped_in_row = 0\n if (\n not buffer.full()\n or self._buffer_filling_strategy is BufferFillingStrategy.WAIT\n ):\n return decode_video_frame_to_buffer(\n frame_timestamp=frame_timestamp,\n frame_id=self._frame_counter,\n video=video,\n buffer=buffer,\n decoding_pace_monitor=self._decoding_pace_monitor,\n source_id=source_id,\n declared_source_fps=declared_source_fps,\n measured_source_fps=measured_source_fps,\n comes_from_video_file=is_source_video_file,\n )\n if self._buffer_filling_strategy in DROP_OLDEST_STRATEGIES:\n return self._process_stream_frame_dropping_oldest(\n frame_timestamp=frame_timestamp,\n video=video,\n buffer=buffer,\n source_id=source_id,\n is_video_file=is_source_video_file,\n )\n send_frame_drop_update(\n frame_timestamp=frame_timestamp,\n frame_id=self._frame_counter,\n cause=\"DROP_LATEST strategy\",\n status_update_handlers=self._status_update_handlers,\n source_id=source_id,\n )\n return True"
}
] |
[
"function_empty",
"Development"
] |
[
"inference.core.interfaces.camera.video_source.VideoSource.read_frame",
"inference.core.interfaces.camera.video_source.VideoSource.__next__",
"inference.core.interfaces.camera.video_source.send_video_source_status_update",
"inference.core.interfaces.camera.video_source.send_frame_drop_update",
"inference.core.interfaces.camera.video_source.VideoConsumer._consume_stream_frame"
] |
Python
| 2 | 4 |
{
"total_num": 45,
"base_passed_num": 0
}
|
[
"inference.inference.core.utils.preprocess.letterbox_image",
"inference.inference.core.interfaces.stream.sinks._handle_frame_rendering"
] |
inference
|
[
"inference/core/utils/preprocess.py",
"inference/core/interfaces/stream/sinks.py"
] |
[
"tests/inference/unit_tests/core/interfaces/stream/test_sinks.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 298,
"func_start_lineno": 190,
"func_end_lineno": 241,
"func_code": "def letterbox_image(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n color: Tuple[int, int, int] = (0, 0, 0),\n) -> ImageMetaType:\n \"\"\"\n Resize and pad image to fit the desired size, preserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n - color: tuple (B, G, R) representing the color to pad with.\n\n Returns:\n - letterboxed image.\n \"\"\"\n resized_img = resize_image_keeping_aspect_ratio(\n image=image,\n desired_size=desired_size,\n )\n new_height, new_width = (\n resized_img.shape[:2]\n if isinstance(resized_img, np.ndarray)\n else resized_img.shape[-2:]\n )\n top_padding = (desired_size[1] - new_height) // 2\n bottom_padding = desired_size[1] - new_height - top_padding\n left_padding = (desired_size[0] - new_width) // 2\n right_padding = desired_size[0] - new_width - left_padding\n if isinstance(resized_img, np.ndarray):\n return cv2.copyMakeBorder(\n resized_img,\n top_padding,\n bottom_padding,\n left_padding,\n right_padding,\n cv2.BORDER_CONSTANT,\n value=color,\n )\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.pad(\n resized_img,\n (left_padding, right_padding, top_padding, bottom_padding),\n \"constant\",\n color[0],\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(resized_img)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 570,
"func_start_lineno": 155,
"func_end_lineno": 196,
"func_code": "def _handle_frame_rendering(\n frame: Optional[VideoFrame],\n prediction: dict,\n annotators: List[BaseAnnotator],\n display_size: Optional[Tuple[int, int]],\n display_statistics: bool,\n fps_value: Optional[float],\n) -> np.ndarray:\n if frame is None:\n image = np.zeros((256, 256, 3), dtype=np.uint8)\n else:\n try:\n labels = [p[\"class\"] for p in prediction[\"predictions\"]]\n if hasattr(sv.Detections, \"from_inference\"):\n detections = sv.Detections.from_inference(prediction)\n else:\n detections = sv.Detections.from_inference(prediction)\n image = frame.image.copy()\n for annotator in annotators:\n kwargs = {\n \"scene\": image,\n \"detections\": detections,\n }\n if isinstance(annotator, sv.LabelAnnotator):\n kwargs[\"labels\"] = labels\n image = annotator.annotate(**kwargs)\n except (TypeError, KeyError):\n logger.warning(\n f\"Used `render_boxes(...)` sink, but predictions that were provided do not match the expected \"\n f\"format of object detection prediction that could be accepted by \"\n f\"`supervision.Detection.from_inference(...)\"\n )\n image = frame.image.copy()\n if display_size is not None:\n image = letterbox_image(image, desired_size=display_size)\n if display_statistics:\n image = render_statistics(\n image=image,\n frame_timestamp=(frame.frame_timestamp if frame is not None else None),\n fps=fps_value,\n )\n return image"
}
] |
[
"function_empty",
"Development"
] |
[
"inference.core.utils.preprocess.letterbox_image",
"inference.core.interfaces.stream.sinks._handle_frame_rendering"
] |
Python
| 1 | 2 |
{
"total_num": 11,
"base_passed_num": 8
}
|
[
"inference.inference.core.utils.preprocess.resize_image_keeping_aspect_ratio",
"inference.inference.core.utils.preprocess.letterbox_image"
] |
inference
|
[
"inference/core/utils/preprocess.py",
"inference/core/utils/preprocess.py"
] |
[
"tests/inference/unit_tests/core/utils/test_drawing.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 298,
"func_start_lineno": 253,
"func_end_lineno": 298,
"func_code": "def resize_image_keeping_aspect_ratio(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n) -> ImageMetaType:\n \"\"\"\n Resize reserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n \"\"\"\n if isinstance(image, np.ndarray):\n img_ratio = image.shape[1] / image.shape[0]\n elif USE_PYTORCH_FOR_PREPROCESSING:\n img_ratio = image.shape[-1] / image.shape[-2]\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(image)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )\n desired_ratio = desired_size[0] / desired_size[1]\n\n # Determine the new dimensions\n if img_ratio >= desired_ratio:\n # Resize by width\n new_width = desired_size[0]\n new_height = int(desired_size[0] / img_ratio)\n else:\n # Resize by height\n new_height = desired_size[1]\n new_width = int(desired_size[1] * img_ratio)\n\n # Resize the image to new dimensions\n if isinstance(image, np.ndarray):\n return cv2.resize(image, (new_width, new_height))\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.interpolate(\n image, size=(new_height, new_width), mode=\"bilinear\"\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(image)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 298,
"func_start_lineno": 190,
"func_end_lineno": 241,
"func_code": "def letterbox_image(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n color: Tuple[int, int, int] = (0, 0, 0),\n) -> ImageMetaType:\n \"\"\"\n Resize and pad image to fit the desired size, preserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n - color: tuple (B, G, R) representing the color to pad with.\n\n Returns:\n - letterboxed image.\n \"\"\"\n resized_img = resize_image_keeping_aspect_ratio(\n image=image,\n desired_size=desired_size,\n )\n new_height, new_width = (\n resized_img.shape[:2]\n if isinstance(resized_img, np.ndarray)\n else resized_img.shape[-2:]\n )\n top_padding = (desired_size[1] - new_height) // 2\n bottom_padding = desired_size[1] - new_height - top_padding\n left_padding = (desired_size[0] - new_width) // 2\n right_padding = desired_size[0] - new_width - left_padding\n if isinstance(resized_img, np.ndarray):\n return cv2.copyMakeBorder(\n resized_img,\n top_padding,\n bottom_padding,\n left_padding,\n right_padding,\n cv2.BORDER_CONSTANT,\n value=color,\n )\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.pad(\n resized_img,\n (left_padding, right_padding, top_padding, bottom_padding),\n \"constant\",\n color[0],\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(resized_img)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )"
}
] |
[
"function_empty"
] |
[
"inference.core.utils.preprocess.resize_image_keeping_aspect_ratio",
"inference.core.utils.preprocess.letterbox_image"
] |
Python
| 2 | 2 |
{
"total_num": 10,
"base_passed_num": 2
}
|
[
"inference.inference.core.utils.image_utils.validate_numpy_image",
"inference.inference.core.utils.image_utils.load_image_from_numpy_str",
"inference.inference.core.utils.image_utils.load_image_base64",
"inference.inference.core.utils.image_utils.load_image_from_encoded_bytes",
"inference.inference.core.utils.image_utils.attempt_loading_image_from_string",
"inference.inference.core.utils.image_utils.load_image_with_inferred_type"
] |
inference
|
[
"inference/core/utils/image_utils.py",
"inference/core/utils/image_utils.py",
"inference/core/utils/image_utils.py",
"inference/core/utils/image_utils.py",
"inference/core/utils/image_utils.py",
"inference/core/utils/image_utils.py"
] |
[
"tests/inference/unit_tests/core/utils/test_image_utils.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 353,
"func_end_lineno": 377,
"func_code": "def validate_numpy_image(data: np.ndarray) -> None:\n \"\"\"\n Validate if the provided data is a valid numpy image.\n\n Args:\n data (np.ndarray): The numpy array representing an image.\n\n Raises:\n InvalidNumpyInput: If the provided data is not a valid numpy image.\n \"\"\"\n if not issubclass(type(data), np.ndarray):\n raise InvalidNumpyInput(\n message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n public_message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n )\n if len(data.shape) != 3 and len(data.shape) != 2:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 2 or 3 dimensions, got {len(data.shape)} dimensions.\",\n public_message=f\"For image given as np.ndarray expected 2 or 3 dimensions.\",\n )\n if data.shape[-1] != 3 and data.shape[-1] != 1:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 1 or 3 channels, got {data.shape[-1]} channels.\",\n public_message=\"For image given as np.ndarray expected 1 or 3 channels.\",\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 318,
"func_end_lineno": 345,
"func_code": "def load_image_from_numpy_str(value: Union[bytes, str]) -> np.ndarray:\n \"\"\"Loads an image from a numpy array string.\n\n Args:\n value (Union[bytes, str]): Base64 string or byte sequence representing the pickled numpy array of the image.\n\n Returns:\n Image.Image: The loaded PIL image.\n\n Raises:\n InvalidNumpyInput: If the numpy data is invalid.\n \"\"\"\n if not ALLOW_NUMPY_INPUT:\n raise InvalidImageTypeDeclared(\n message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n public_message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n )\n try:\n if isinstance(value, str):\n value = pybase64.b64decode(value)\n data = pickle.loads(value)\n except (EOFError, TypeError, pickle.UnpicklingError, binascii.Error) as error:\n raise InvalidNumpyInput(\n message=f\"Could not unpickle image data. Cause: {error}\",\n public_message=\"Could not deserialize pickle payload.\",\n ) from error\n validate_numpy_image(data=data)\n return data"
},
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 258,
"func_end_lineno": 292,
"func_code": "def load_image_base64(\n value: Union[str, bytes], cv_imread_flags=cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"Loads an image from a base64 encoded string using OpenCV.\n\n Args:\n value (str): Base64 encoded string representing the image.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n # New routes accept images via json body (str), legacy routes accept bytes which need to be decoded as strings\n if not isinstance(value, str):\n value = value.decode(\"utf-8\")\n value = BASE64_DATA_TYPE_PATTERN.sub(\"\", value)\n try:\n value = pybase64.b64decode(value)\n except binascii.Error as error:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n ) from error\n if len(value) == 0:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Empty image payload.\",\n )\n image_np = np.frombuffer(value, np.uint8)\n result = cv2.imdecode(image_np, cv_imread_flags)\n if result is None:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n )\n return result"
},
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 496,
"func_end_lineno": 516,
"func_code": "def load_image_from_encoded_bytes(\n value: bytes, cv_imread_flags: int = cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"\n Load an image from encoded bytes.\n\n Args:\n value (bytes): The byte sequence representing the image.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n image_np = np.asarray(bytearray(value), dtype=np.uint8)\n image = cv2.imdecode(image_np, cv_imread_flags)\n if image is None:\n raise InputImageLoadError(\n message=f\"Could not decode bytes as image.\",\n public_message=\"Data is not image.\",\n )\n return image"
},
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 215,
"func_end_lineno": 255,
"func_code": "def attempt_loading_image_from_string(\n value: Union[str, bytes, bytearray, _IOBase],\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Attempt to load an image from a string.\n\n Args:\n value (Union[str, bytes, bytearray, _IOBase]): The image data in string format.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n Tuple[np.ndarray, bool]: A tuple of the loaded image in numpy array format and a boolean flag indicating if the image is in BGR format.\n \"\"\"\n try:\n return load_image_base64(value=value, cv_imread_flags=cv_imread_flags), True\n except:\n pass\n try:\n return (\n load_image_from_encoded_bytes(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return (\n load_image_from_buffer(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return load_image_from_numpy_str(value=value), True\n except InvalidImageTypeDeclared as error:\n raise error\n except InvalidNumpyInput as error:\n raise InputFormatInferenceFailed(\n message=\"Input image format could not be inferred from string.\",\n public_message=\"Input image format could not be inferred from string.\",\n ) from error"
},
{
"class_start_lineno": 1,
"class_end_lineno": 599,
"func_start_lineno": 180,
"func_end_lineno": 212,
"func_code": "def load_image_with_inferred_type(\n value: Any,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"Load an image by inferring its type.\n\n Args:\n value (Any): The image data.\n cv_imread_flags (int): Flags used for OpenCV's imread function.\n\n Returns:\n Tuple[np.ndarray, bool]: Loaded image as a numpy array and a boolean indicating if the image is in BGR format.\n\n Raises:\n NotImplementedError: If the image type could not be inferred.\n \"\"\"\n if isinstance(value, (np.ndarray, np.generic)):\n validate_numpy_image(data=value)\n return value, True\n elif isinstance(value, Image.Image):\n return np.asarray(value.convert(\"RGB\")), False\n elif isinstance(value, str) and (value.startswith(\"http\")):\n return load_image_from_url(value=value, cv_imread_flags=cv_imread_flags), True\n elif (\n isinstance(value, str)\n and ALLOW_LOADING_IMAGES_FROM_LOCAL_FILESYSTEM\n and os.path.isfile(value)\n ):\n return cv2.imread(value, cv_imread_flags), True\n else:\n return attempt_loading_image_from_string(\n value=value, cv_imread_flags=cv_imread_flags\n )"
}
] |
[
"function_empty",
"Development"
] |
[
"inference.core.utils.image_utils.validate_numpy_image",
"inference.core.utils.image_utils.load_image_from_numpy_str",
"inference.core.utils.image_utils.load_image_base64",
"inference.core.utils.image_utils.load_image_from_encoded_bytes",
"inference.core.utils.image_utils.attempt_loading_image_from_string",
"inference.core.utils.image_utils.load_image_with_inferred_type"
] |
Python
| 5 | 6 |
{
"total_num": 152,
"base_passed_num": 77
}
|
[
"inference.inference.core.utils.postprocess.get_static_crop_dimensions",
"inference.inference.core.utils.postprocess.post_process_bboxes",
"inference.inference.core.utils.postprocess.post_process_polygons",
"inference.inference.core.utils.postprocess.post_process_keypoints"
] |
inference
|
[
"inference/core/utils/postprocess.py",
"inference/core/utils/postprocess.py",
"inference/core/utils/postprocess.py",
"inference/core/utils/postprocess.py"
] |
[
"tests/inference/unit_tests/core/utils/test_postprocess.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 658,
"func_start_lineno": 473,
"func_end_lineno": 513,
"func_code": "def get_static_crop_dimensions(\n orig_shape: Tuple[int, int],\n preproc: dict,\n disable_preproc_static_crop: bool = False,\n) -> Tuple[Tuple[int, int], Tuple[int, int]]:\n \"\"\"\n Generates a transformation based on preprocessing configuration.\n\n Args:\n orig_shape (tuple): The original shape of the object (e.g., image) - (height, width).\n preproc (dict): Preprocessing configuration dictionary, containing information such as static cropping.\n disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.\n\n Returns:\n tuple: A tuple containing the shift in the x and y directions, and the updated original shape after cropping.\n \"\"\"\n try:\n if static_crop_should_be_applied(\n preprocessing_config=preproc,\n disable_preproc_static_crop=disable_preproc_static_crop,\n ):\n x_min, y_min, x_max, y_max = standardise_static_crop(\n static_crop_config=preproc[STATIC_CROP_KEY]\n )\n else:\n x_min, y_min, x_max, y_max = 0, 0, 1, 1\n crop_shift_x, crop_shift_y = (\n round(x_min * orig_shape[1]),\n round(y_min * orig_shape[0]),\n )\n cropped_percent_x = x_max - x_min\n cropped_percent_y = y_max - y_min\n orig_shape = (\n round(orig_shape[0] * cropped_percent_y),\n round(orig_shape[1] * cropped_percent_x),\n )\n return (crop_shift_x, crop_shift_y), orig_shape\n except KeyError as error:\n raise PostProcessingError(\n f\"Could not find a proper configuration key {error} in post-processing.\"\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 658,
"func_start_lineno": 98,
"func_end_lineno": 163,
"func_code": "def post_process_bboxes(\n predictions: List[List[List[float]]],\n infer_shape: Tuple[int, int],\n img_dims: List[Tuple[int, int]],\n preproc: dict,\n disable_preproc_static_crop: bool = False,\n resize_method: str = \"Stretch to\",\n) -> List[List[List[float]]]:\n \"\"\"\n Postprocesses each patch of detections by scaling them to the original image coordinates and by shifting them based on a static crop preproc (if applied).\n\n Args:\n predictions (List[List[List[float]]]): The predictions output from NMS, indices are: batch x prediction x [x1, y1, x2, y2, ...].\n infer_shape (Tuple[int, int]): The shape of the inference image.\n img_dims (List[Tuple[int, int]]): The dimensions of the original image for each batch, indices are: batch x [height, width].\n preproc (dict): Preprocessing configuration dictionary.\n disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.\n resize_method (str, optional): Resize method for image. Defaults to \"Stretch to\".\n\n Returns:\n List[List[List[float]]]: The scaled and shifted predictions, indices are: batch x prediction x [x1, y1, x2, y2, ...].\n \"\"\"\n\n # Get static crop params\n scaled_predictions = []\n # Loop through batches\n for i, batch_predictions in enumerate(predictions):\n if len(batch_predictions) == 0:\n scaled_predictions.append([])\n continue\n np_batch_predictions = np.array(batch_predictions)\n # Get bboxes from predictions (x1,y1,x2,y2)\n predicted_bboxes = np_batch_predictions[:, :4]\n (crop_shift_x, crop_shift_y), origin_shape = get_static_crop_dimensions(\n img_dims[i],\n preproc,\n disable_preproc_static_crop=disable_preproc_static_crop,\n )\n if resize_method == \"Stretch to\":\n predicted_bboxes = stretch_bboxes(\n predicted_bboxes=predicted_bboxes,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n elif (\n resize_method == \"Fit (black edges) in\"\n or resize_method == \"Fit (white edges) in\"\n or resize_method == \"Fit (grey edges) in\"\n ):\n predicted_bboxes = undo_image_padding_for_predicted_boxes(\n predicted_bboxes=predicted_bboxes,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n predicted_bboxes = clip_boxes_coordinates(\n predicted_bboxes=predicted_bboxes,\n origin_shape=origin_shape,\n )\n predicted_bboxes = shift_bboxes(\n bboxes=predicted_bboxes,\n shift_x=crop_shift_x,\n shift_y=crop_shift_y,\n )\n np_batch_predictions[:, :4] = predicted_bboxes\n scaled_predictions.append(np_batch_predictions.tolist())\n return scaled_predictions"
},
{
"class_start_lineno": 1,
"class_end_lineno": 658,
"func_start_lineno": 393,
"func_end_lineno": 441,
"func_code": "def post_process_polygons(\n origin_shape: Tuple[int, int],\n polys: List[List[Tuple[float, float]]],\n infer_shape: Tuple[int, int],\n preproc: dict,\n resize_method: str = \"Stretch to\",\n) -> List[List[Tuple[float, float]]]:\n \"\"\"Scales and shifts polygons based on the given image shapes and preprocessing method.\n\n This function performs polygon scaling and shifting based on the specified resizing method and\n pre-processing steps. The polygons are transformed according to the ratio and padding between two images.\n\n Args:\n origin_shape (tuple of int): Shape of the source image (height, width).\n infer_shape (tuple of int): Shape of the target image (height, width).\n polys (list of list of tuple): List of polygons, where each polygon is represented by a list of (x, y) coordinates.\n preproc (object): Preprocessing details used for generating the transformation.\n resize_method (str, optional): Resizing method, either \"Stretch to\", \"Fit (black edges) in\", \"Fit (white edges) in\", or \"Fit (grey edges) in\". Defaults to \"Stretch to\".\n\n Returns:\n list of list of tuple: A list of shifted and scaled polygons.\n \"\"\"\n (crop_shift_x, crop_shift_y), origin_shape = get_static_crop_dimensions(\n origin_shape, preproc\n )\n new_polys = []\n if resize_method == \"Stretch to\":\n width_ratio = origin_shape[1] / infer_shape[1]\n height_ratio = origin_shape[0] / infer_shape[0]\n new_polys = scale_polygons(\n polygons=polys,\n x_scale=width_ratio,\n y_scale=height_ratio,\n )\n elif resize_method in {\n \"Fit (black edges) in\",\n \"Fit (white edges) in\",\n \"Fit (grey edges) in\",\n }:\n new_polys = undo_image_padding_for_predicted_polygons(\n polygons=polys,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n shifted_polys = []\n for poly in new_polys:\n poly = [(p[0] + crop_shift_x, p[1] + crop_shift_y) for p in poly]\n shifted_polys.append(poly)\n return shifted_polys"
},
{
"class_start_lineno": 1,
"class_end_lineno": 658,
"func_start_lineno": 522,
"func_end_lineno": 585,
"func_code": "def post_process_keypoints(\n predictions: List[List[List[float]]],\n keypoints_start_index: int,\n infer_shape: Tuple[int, int],\n img_dims: List[Tuple[int, int]],\n preproc: dict,\n disable_preproc_static_crop: bool = False,\n resize_method: str = \"Stretch to\",\n) -> List[List[List[float]]]:\n \"\"\"Scales and shifts keypoints based on the given image shapes and preprocessing method.\n\n This function performs polygon scaling and shifting based on the specified resizing method and\n pre-processing steps. The polygons are transformed according to the ratio and padding between two images.\n\n Args:\n predictions: predictions from model\n keypoints_start_index: offset in the 3rd dimension pointing where in the prediction start keypoints [(x, y, cfg), ...] for each keypoint class\n img_dims list of (tuple of int): Shape of the source image (height, width).\n infer_shape (tuple of int): Shape of the target image (height, width).\n preproc (object): Preprocessing details used for generating the transformation.\n resize_method (str, optional): Resizing method, either \"Stretch to\", \"Fit (black edges) in\", \"Fit (white edges) in\", or \"Fit (grey edges) in\". Defaults to \"Stretch to\".\n disable_preproc_static_crop: flag to disable static crop\n Returns:\n list of list of list: predictions with post-processed keypoints\n \"\"\"\n # Get static crop params\n scaled_predictions = []\n # Loop through batches\n for i, batch_predictions in enumerate(predictions):\n if len(batch_predictions) == 0:\n scaled_predictions.append([])\n continue\n np_batch_predictions = np.array(batch_predictions)\n keypoints = np_batch_predictions[:, keypoints_start_index:]\n (crop_shift_x, crop_shift_y), origin_shape = get_static_crop_dimensions(\n img_dims[i],\n preproc,\n disable_preproc_static_crop=disable_preproc_static_crop,\n )\n if resize_method == \"Stretch to\":\n keypoints = stretch_keypoints(\n keypoints=keypoints,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n elif (\n resize_method == \"Fit (black edges) in\"\n or resize_method == \"Fit (white edges) in\"\n or resize_method == \"Fit (grey edges) in\"\n ):\n keypoints = undo_image_padding_for_predicted_keypoints(\n keypoints=keypoints,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n keypoints = clip_keypoints_coordinates(\n keypoints=keypoints, origin_shape=origin_shape\n )\n keypoints = shift_keypoints(\n keypoints=keypoints, shift_x=crop_shift_x, shift_y=crop_shift_y\n )\n np_batch_predictions[:, keypoints_start_index:] = keypoints\n scaled_predictions.append(np_batch_predictions.tolist())\n return scaled_predictions"
}
] |
[
"function_empty",
"Development"
] |
[
"inference.core.utils.postprocess.get_static_crop_dimensions",
"inference.core.utils.postprocess.post_process_bboxes",
"inference.core.utils.postprocess.post_process_polygons",
"inference.core.utils.postprocess.post_process_keypoints"
] |
Python
| 3 | 4 |
{
"total_num": 54,
"base_passed_num": 24
}
|
[
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_action_agent",
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action",
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_iter_next_step",
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_take_next_step"
] |
langchain
|
[
"langchain/agents/agent.py",
"langchain/agents/agent.py",
"langchain/agents/agent.py",
"langchain/agents/agent.py"
] |
[
"libs/langchain/tests/unit_tests/agents/test_agent.py"
] |
[
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1177,
"func_end_lineno": 1189,
"func_code": " def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:\n \"\"\"Type cast self.agent.\n\n If the `agent` attribute is a Runnable, it will be converted one of\n RunnableAgentType in the validate_runnable_agent root_validator.\n\n To support instantiating with a Runnable, here we explicitly cast the type\n to reflect the changes made in the root_validator.\n \"\"\"\n if isinstance(self.agent, Runnable):\n return cast(RunnableAgentType, self.agent)\n else:\n return self.agent"
},
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1419,
"func_end_lineno": 1456,
"func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)"
},
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1342,
"func_end_lineno": 1417,
"func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )"
},
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1321,
"func_end_lineno": 1340,
"func_code": " def _take_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:\n return self._consume_next_step(\n [\n a\n for a in self._iter_next_step(\n name_to_tool_map,\n color_mapping,\n inputs,\n intermediate_steps,\n run_manager,\n )\n ]\n )"
}
] |
[
"function_empty",
"Development"
] |
[
"langchain.agents.agent.AgentExecutor._action_agent",
"langchain.agents.agent.AgentExecutor._perform_agent_action",
"langchain.agents.agent.AgentExecutor._iter_next_step",
"langchain.agents.agent.AgentExecutor._take_next_step"
] |
Python
| 1 | 4 |
{
"total_num": 14,
"base_passed_num": 2
}
|
[
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_action_agent",
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action",
"langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_iter_next_step"
] |
langchain
|
[
"langchain/agents/agent.py",
"langchain/agents/agent.py",
"langchain/agents/agent.py"
] |
[
"libs/langchain/tests/unit_tests/agents/test_agent_iterator.py"
] |
[
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1177,
"func_end_lineno": 1189,
"func_code": " def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:\n \"\"\"Type cast self.agent.\n\n If the `agent` attribute is a Runnable, it will be converted one of\n RunnableAgentType in the validate_runnable_agent root_validator.\n\n To support instantiating with a Runnable, here we explicitly cast the type\n to reflect the changes made in the root_validator.\n \"\"\"\n if isinstance(self.agent, Runnable):\n return cast(RunnableAgentType, self.agent)\n else:\n return self.agent"
},
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1419,
"func_end_lineno": 1456,
"func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)"
},
{
"class_start_lineno": 1047,
"class_end_lineno": 1806,
"func_start_lineno": 1342,
"func_end_lineno": 1417,
"func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )"
}
] |
[
"function_empty",
"Development"
] |
[
"langchain.agents.agent.AgentExecutor._action_agent",
"langchain.agents.agent.AgentExecutor._perform_agent_action",
"langchain.agents.agent.AgentExecutor._iter_next_step"
] |
Python
| 1 | 3 |
{
"total_num": 14,
"base_passed_num": 0
}
|
[
"langchain.libs.langchain.langchain.agents.format_scratchpad.openai_functions._create_function_message",
"langchain.libs.langchain.langchain.agents.format_scratchpad.openai_functions._convert_agent_action_to_messages",
"langchain.libs.langchain.langchain.agents.format_scratchpad.openai_functions.format_to_openai_function_messages"
] |
langchain
|
[
"langchain/agents/format_scratchpad/openai_functions.py",
"langchain/agents/format_scratchpad/openai_functions.py",
"langchain/agents/format_scratchpad/openai_functions.py"
] |
[
"libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_functions.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 78,
"func_start_lineno": 30,
"func_end_lineno": 53,
"func_code": "def _create_function_message(\n agent_action: AgentAction, observation: str\n) -> FunctionMessage:\n \"\"\"Convert agent action and observation into a function message.\n Args:\n agent_action: the tool invocation request from the agent.\n observation: the result of the tool invocation.\n Returns:\n FunctionMessage that corresponds to the original tool invocation.\n\n Raises:\n ValueError: if the observation cannot be converted to a string.\n \"\"\"\n if not isinstance(observation, str):\n try:\n content = json.dumps(observation, ensure_ascii=False)\n except Exception:\n content = str(observation)\n else:\n content = observation\n return FunctionMessage(\n name=agent_action.tool,\n content=content,\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 78,
"func_start_lineno": 8,
"func_end_lineno": 27,
"func_code": "def _convert_agent_action_to_messages(\n agent_action: AgentAction, observation: str\n) -> List[BaseMessage]:\n \"\"\"Convert an agent action to a message.\n\n This code is used to reconstruct the original AI message from the agent action.\n\n Args:\n agent_action: Agent action to convert.\n\n Returns:\n AIMessage or the previous messages plus a FunctionMessage that corresponds to\n the original tool invocation\n \"\"\"\n if isinstance(agent_action, AgentActionMessageLog):\n return list(agent_action.message_log) + [\n _create_function_message(agent_action, observation)\n ]\n else:\n return [AIMessage(content=agent_action.log)]"
},
{
"class_start_lineno": 1,
"class_end_lineno": 78,
"func_start_lineno": 56,
"func_end_lineno": 74,
"func_code": "def format_to_openai_function_messages(\n intermediate_steps: Sequence[Tuple[AgentAction, str]],\n) -> List[BaseMessage]:\n \"\"\"Convert (AgentAction, tool output) tuples into FunctionMessages.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date, along with observations\n\n Returns:\n list of messages to send to the LLM for the next prediction\n Raises:\n ValueError: if the observation cannot be converted to a string.\n \"\"\"\n messages = []\n\n for agent_action, observation in intermediate_steps:\n messages.extend(_convert_agent_action_to_messages(agent_action, observation))\n\n return messages"
}
] |
[
"function_empty"
] |
[
"langchain.agents.format_scratchpad.openai_functions._create_function_message",
"langchain.agents.format_scratchpad.openai_functions._convert_agent_action_to_messages",
"langchain.agents.format_scratchpad.openai_functions.format_to_openai_function_messages"
] |
Python
| 3 | 3 |
{
"total_num": 2,
"base_passed_num": 0
}
|
[
"langchain.libs.langchain.langchain.agents.format_scratchpad.tools._create_tool_message",
"langchain.libs.langchain.langchain.agents.format_scratchpad.tools.format_to_tool_messages"
] |
langchain
|
[
"langchain/agents/format_scratchpad/tools.py",
"langchain/agents/format_scratchpad/tools.py"
] |
[
"libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_tools.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 63,
"func_start_lineno": 14,
"func_end_lineno": 39,
"func_code": "def _create_tool_message(\n agent_action: ToolAgentAction, observation: str\n) -> ToolMessage:\n \"\"\"Convert agent action and observation into a tool message.\n\n Args:\n agent_action: the tool invocation request from the agent.\n observation: the result of the tool invocation.\n Returns:\n ToolMessage that corresponds to the original tool invocation.\n\n Raises:\n ValueError: if the observation cannot be converted to a string.\n \"\"\"\n if not isinstance(observation, str):\n try:\n content = json.dumps(observation, ensure_ascii=False)\n except Exception:\n content = str(observation)\n else:\n content = observation\n return ToolMessage(\n tool_call_id=agent_action.tool_call_id,\n content=content,\n additional_kwargs={\"name\": agent_action.tool},\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 63,
"func_start_lineno": 42,
"func_end_lineno": 63,
"func_code": "def format_to_tool_messages(\n intermediate_steps: Sequence[Tuple[AgentAction, str]],\n) -> List[BaseMessage]:\n \"\"\"Convert (AgentAction, tool output) tuples into ToolMessages.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date, along with observations.\n\n Returns:\n list of messages to send to the LLM for the next prediction.\n\n \"\"\"\n messages = []\n for agent_action, observation in intermediate_steps:\n if isinstance(agent_action, ToolAgentAction):\n new_messages = list(agent_action.message_log) + [\n _create_tool_message(agent_action, observation)\n ]\n messages.extend([new for new in new_messages if new not in messages])\n else:\n messages.append(AIMessage(content=agent_action.log))\n return messages"
}
] |
[
"function_empty"
] |
[
"langchain.agents.format_scratchpad.tools._create_tool_message",
"langchain.agents.format_scratchpad.tools.format_to_tool_messages"
] |
Python
| 2 | 2 |
{
"total_num": 2,
"base_passed_num": 0
}
|
[
"langchain.libs.langchain.langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain::_prepare_input",
"langchain.libs.langchain.langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain::_evaluate_string_pairs"
] |
langchain
|
[
"langchain/evaluation/comparison/eval_chain.py",
"langchain/evaluation/comparison/eval_chain.py",
"langchain/evaluation/schema.py"
] |
[
"libs/langchain/tests/unit_tests/evaluation/comparison/test_eval_chain.py"
] |
[
{
"class_start_lineno": 154,
"class_end_lineno": 391,
"func_start_lineno": 274,
"func_end_lineno": 300,
"func_code": " def _prepare_input(\n self,\n prediction: str,\n prediction_b: str,\n input: Optional[str],\n reference: Optional[str],\n ) -> dict:\n \"\"\"Prepare the input for the chain.\n\n Args:\n prediction (str): The output string from the first model.\n prediction_b (str): The output string from the second model.\n input (str, optional): The input or task string.\n reference (str, optional): The reference string, if any.\n\n Returns:\n dict: The prepared input for the chain.\n\n \"\"\"\n input_ = {\n \"prediction\": prediction,\n \"prediction_b\": prediction_b,\n \"input\": input,\n }\n if self.requires_reference:\n input_[\"reference\"] = reference\n return input_"
},
{
"class_start_lineno": 154,
"class_end_lineno": 391,
"func_start_lineno": 309,
"func_end_lineno": 349,
"func_code": " def _evaluate_string_pairs(\n self,\n *,\n prediction: str,\n prediction_b: str,\n input: Optional[str] = None,\n reference: Optional[str] = None,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n include_run_info: bool = False,\n **kwargs: Any,\n ) -> dict:\n \"\"\"Evaluate whether output A is preferred to output B.\n\n Args:\n prediction (str): The output string from the first model.\n prediction_b (str): The output string from the second model.\n input (str, optional): The input or task string.\n callbacks (Callbacks, optional): The callbacks to use.\n reference (str, optional): The reference string, if any.\n **kwargs (Any): Additional keyword arguments.\n\n Returns:\n dict: A dictionary containing:\n - reasoning: The reasoning for the preference.\n - value: The preference value, which is either 'A', 'B', or None\n for no preference.\n - score: The preference score, which is 1 for 'A', 0 for 'B',\n and 0.5 for None.\n\n \"\"\"\n input_ = self._prepare_input(prediction, prediction_b, input, reference)\n result = self(\n inputs=input_,\n callbacks=callbacks,\n tags=tags,\n metadata=metadata,\n include_run_info=include_run_info,\n )\n return self._prepare_output(result)"
},
{
"class_start_lineno": null,
"class_end_lineno": null,
"func_start_lineno": null,
"func_end_lineno": null,
"func_code": "未找到 PairwiseStringEvalChain::evaluate_string_pairs"
}
] |
[
"function_empty"
] |
[
"langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain._prepare_input",
"langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain._evaluate_string_pairs",
"langchain.evaluation.schema.PairwiseStringEvalChain.evaluate_string_pairs"
] |
Python
| 2 | 2 |
{
"total_num": 18,
"base_passed_num": 17
}
|
[
"langchain.libs.langchain.langchain.evaluation.scoring.eval_chain.ScoreStringEvalChain::_prepare_input",
"langchain.libs.langchain.langchain.evaluation.scoring.eval_chain.ScoreStringEvalChain::_evaluate_strings"
] |
langchain
|
[
"langchain/evaluation/scoring/eval_chain.py",
"langchain/evaluation/scoring/eval_chain.py",
"langchain/evaluation/schema.py"
] |
[
"libs/langchain/tests/unit_tests/evaluation/scoring/test_eval_chain.py"
] |
[
{
"class_start_lineno": 147,
"class_end_lineno": 396,
"func_start_lineno": 289,
"func_end_lineno": 313,
"func_code": " def _prepare_input(\n self,\n prediction: str,\n input: Optional[str],\n reference: Optional[str],\n ) -> dict:\n \"\"\"Prepare the input for the chain.\n\n Args:\n prediction (str): The output string from the first model.\n prediction_b (str): The output string from the second model.\n input (str, optional): The input or task string.\n reference (str, optional): The reference string, if any.\n\n Returns:\n dict: The prepared input for the chain.\n\n \"\"\"\n input_ = {\n \"prediction\": prediction,\n \"input\": input,\n }\n if self.requires_reference:\n input_[\"reference\"] = reference\n return input_"
},
{
"class_start_lineno": 147,
"class_end_lineno": 396,
"func_start_lineno": 324,
"func_end_lineno": 359,
"func_code": " def _evaluate_strings(\n self,\n *,\n prediction: str,\n input: Optional[str] = None,\n reference: Optional[str] = None,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n include_run_info: bool = False,\n **kwargs: Any,\n ) -> dict:\n \"\"\"Score the output string.\n\n Args:\n prediction (str): The output string from the first model.\n input (str, optional): The input or task string.\n callbacks (Callbacks, optional): The callbacks to use.\n reference (str, optional): The reference string, if any.\n **kwargs (Any): Additional keyword arguments.\n\n Returns:\n dict: A dictionary containing:\n - reasoning: The reasoning for the preference.\n - score: A score between 1 and 10.\n\n \"\"\"\n input_ = self._prepare_input(prediction, input, reference)\n result = self(\n inputs=input_,\n callbacks=callbacks,\n tags=tags,\n metadata=metadata,\n include_run_info=include_run_info,\n )\n return self._prepare_output(result)"
},
{
"class_start_lineno": null,
"class_end_lineno": null,
"func_start_lineno": null,
"func_end_lineno": null,
"func_code": "未找到 ScoreStringEvalChain::evaluate_strings"
}
] |
[
"function_empty"
] |
[
"langchain.evaluation.scoring.eval_chain.ScoreStringEvalChain._prepare_input",
"langchain.evaluation.scoring.eval_chain.ScoreStringEvalChain._evaluate_strings",
"langchain.evaluation.schema.ScoreStringEvalChain.evaluate_strings"
] |
Python
| 2 | 2 |
{
"total_num": 3,
"base_passed_num": 2
}
|
[
"langchain.libs.langchain.langchain.retrievers.ensemble.unique_by_key",
"langchain.libs.langchain.langchain.retrievers.ensemble.EnsembleRetriever::weighted_reciprocal_rank"
] |
langchain
|
[
"langchain/retrievers/ensemble.py",
"langchain/retrievers/ensemble.py"
] |
[
"libs/langchain/tests/unit_tests/retrievers/test_ensemble.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 337,
"func_start_lineno": 40,
"func_end_lineno": 54,
"func_code": "def unique_by_key(iterable: Iterable[T], key: Callable[[T], H]) -> Iterator[T]:\n \"\"\"Yield unique elements of an iterable based on a key function.\n\n Args:\n iterable: The iterable to filter.\n key: A function that returns a hashable key for each element.\n\n Yields:\n Unique elements of the iterable based on the key function.\n \"\"\"\n seen = set()\n for e in iterable:\n if (k := key(e)) not in seen:\n seen.add(k)\n yield e"
},
{
"class_start_lineno": 57,
"class_end_lineno": 337,
"func_start_lineno": 288,
"func_end_lineno": 337,
"func_code": " def weighted_reciprocal_rank(\n self, doc_lists: List[List[Document]]\n ) -> List[Document]:\n \"\"\"\n Perform weighted Reciprocal Rank Fusion on multiple rank lists.\n You can find more details about RRF here:\n https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf\n\n Args:\n doc_lists: A list of rank lists, where each rank list contains unique items.\n\n Returns:\n list: The final aggregated list of items sorted by their weighted RRF\n scores in descending order.\n \"\"\"\n if len(doc_lists) != len(self.weights):\n raise ValueError(\n \"Number of rank lists must be equal to the number of weights.\"\n )\n\n # Associate each doc's content with its RRF score for later sorting by it\n # Duplicated contents across retrievers are collapsed & scored cumulatively\n rrf_score: Dict[str, float] = defaultdict(float)\n for doc_list, weight in zip(doc_lists, self.weights):\n for rank, doc in enumerate(doc_list, start=1):\n rrf_score[\n (\n doc.page_content\n if self.id_key is None\n else doc.metadata[self.id_key]\n )\n ] += weight / (rank + self.c)\n\n # Docs are deduplicated by their contents then sorted by their scores\n all_docs = chain.from_iterable(doc_lists)\n sorted_docs = sorted(\n unique_by_key(\n all_docs,\n lambda doc: (\n doc.page_content\n if self.id_key is None\n else doc.metadata[self.id_key]\n ),\n ),\n reverse=True,\n key=lambda doc: rrf_score[\n doc.page_content if self.id_key is None else doc.metadata[self.id_key]\n ],\n )\n return sorted_docs"
}
] |
[
"function_empty",
"Development"
] |
[
"langchain.retrievers.ensemble.unique_by_key",
"langchain.retrievers.ensemble.EnsembleRetriever.weighted_reciprocal_rank"
] |
Python
| 1 | 2 |
{
"total_num": 1,
"base_passed_num": 0
}
|
[
"langchain.libs.langchain.langchain.smith.evaluation.runner_utils._get_prompt",
"langchain.libs.langchain.langchain.smith.evaluation.runner_utils._get_messages",
"langchain.libs.langchain.langchain.smith.evaluation.runner_utils._run_llm"
] |
langchain
|
[
"langchain/smith/evaluation/runner_utils.py",
"langchain/smith/evaluation/runner_utils.py",
"langchain/smith/evaluation/runner_utils.py"
] |
[
"libs/langchain/tests/unit_tests/smith/evaluation/test_runner_utils.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 1523,
"func_start_lineno": 232,
"func_end_lineno": 279,
"func_code": "def _get_prompt(inputs: Dict[str, Any]) -> str:\n \"\"\"Get prompt from inputs.\n\n Args:\n inputs: The input dictionary.\n\n Returns:\n A string prompt.\n Raises:\n InputFormatError: If the input format is invalid.\n \"\"\"\n if not inputs:\n raise InputFormatError(\"Inputs should not be empty.\")\n\n prompts = []\n if \"prompt\" in inputs:\n if not isinstance(inputs[\"prompt\"], str):\n raise InputFormatError(\n f\"Expected string for 'prompt', got {type(inputs['prompt']).__name__}\"\n )\n prompts = [inputs[\"prompt\"]]\n elif \"prompts\" in inputs:\n if not isinstance(inputs[\"prompts\"], list) or not all(\n isinstance(i, str) for i in inputs[\"prompts\"]\n ):\n raise InputFormatError(\n \"Expected list of strings for 'prompts',\"\n f\" got {type(inputs['prompts']).__name__}\"\n )\n prompts = inputs[\"prompts\"]\n elif len(inputs) == 1:\n prompt_ = next(iter(inputs.values()))\n if isinstance(prompt_, str):\n prompts = [prompt_]\n elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_):\n prompts = prompt_\n else:\n raise InputFormatError(f\"LLM Run expects string prompt input. Got {inputs}\")\n else:\n raise InputFormatError(\n f\"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}\"\n )\n if len(prompts) == 1:\n return prompts[0]\n else:\n raise InputFormatError(\n f\"LLM Run expects single prompt input. Got {len(prompts)} prompts.\"\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1523,
"func_start_lineno": 292,
"func_end_lineno": 328,
"func_code": "def _get_messages(inputs: Dict[str, Any]) -> dict:\n \"\"\"Get Chat Messages from inputs.\n\n Args:\n inputs: The input dictionary.\n\n Returns:\n A list of chat messages.\n Raises:\n InputFormatError: If the input format is invalid.\n \"\"\"\n if not inputs:\n raise InputFormatError(\"Inputs should not be empty.\")\n input_copy = inputs.copy()\n if \"messages\" in inputs:\n input_copy[\"input\"] = input_copy.pop(\"messages\")\n elif len(inputs) == 1:\n input_copy[\"input\"] = next(iter(inputs.values()))\n if \"input\" in input_copy:\n raw_messages = input_copy[\"input\"]\n if isinstance(raw_messages, list) and all(\n isinstance(i, dict) for i in raw_messages\n ):\n raw_messages = [raw_messages]\n if len(raw_messages) == 1:\n input_copy[\"input\"] = messages_from_dict(raw_messages[0])\n else:\n raise InputFormatError(\n \"Batch messages not supported. Please provide a\"\n \" single list of messages.\"\n )\n return input_copy\n else:\n raise InputFormatError(\n f\"Chat Run expects single List[dict] or List[List[dict]] 'messages'\"\n f\" input. Got {inputs}\"\n )"
},
{
"class_start_lineno": 1,
"class_end_lineno": 1523,
"func_start_lineno": 816,
"func_end_lineno": 875,
"func_code": "def _run_llm(\n llm: BaseLanguageModel,\n inputs: Dict[str, Any],\n callbacks: Callbacks,\n *,\n tags: Optional[List[str]] = None,\n input_mapper: Optional[Callable[[Dict], Any]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n) -> Union[str, BaseMessage]:\n \"\"\"\n Run the language model on the example.\n\n Args:\n llm: The language model to run.\n inputs: The input dictionary.\n callbacks: The callbacks to use during the run.\n tags: Optional tags to add to the run.\n input_mapper: function to map to the inputs dictionary from an Example\n Returns:\n The LLMResult or ChatResult.\n Raises:\n ValueError: If the LLM type is unsupported.\n InputFormatError: If the input format is invalid.\n \"\"\"\n # Most of this is legacy code; we could probably remove a lot of it.\n if input_mapper is not None:\n prompt_or_messages = input_mapper(inputs)\n if (\n isinstance(prompt_or_messages, str)\n or isinstance(prompt_or_messages, list)\n and all(isinstance(msg, BaseMessage) for msg in prompt_or_messages)\n ):\n llm_output: Union[str, BaseMessage] = llm.invoke(\n prompt_or_messages,\n config=RunnableConfig(\n callbacks=callbacks, tags=tags or [], metadata=metadata or {}\n ),\n )\n else:\n raise InputFormatError(\n \"Input mapper returned invalid format: \"\n f\" {prompt_or_messages}\"\n \"\\nExpected a single string or list of chat messages.\"\n )\n else:\n try:\n llm_prompts = _get_prompt(inputs)\n llm_output = llm.invoke(\n llm_prompts,\n config=RunnableConfig(\n callbacks=callbacks, tags=tags or [], metadata=metadata or {}\n ),\n )\n except InputFormatError:\n llm_inputs = _get_messages(inputs)\n llm_output = llm.invoke(\n **llm_inputs,\n config=RunnableConfig(callbacks=callbacks, metadata=metadata or {}),\n )\n return llm_output"
}
] |
[
"function_empty"
] |
[
"langchain.smith.evaluation.runner_utils._get_prompt",
"langchain.smith.evaluation.runner_utils._get_messages",
"langchain.smith.evaluation.runner_utils._run_llm"
] |
Python
| 3 | 3 |
{
"total_num": 43,
"base_passed_num": 36
}
|
[
"open-iris.src.iris.utils.math.estimate_diameter",
"open-iris.src.iris.nodes.normalization.nonlinear_normalization.NonlinearNormalization::_generate_correspondences"
] |
open-iris
|
[
"iris/utils/math.py",
"iris/nodes/normalization/nonlinear_normalization.py"
] |
[
"tests/unit_tests/nodes/normalization/test_nonlinear_normalization.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 203,
"func_start_lineno": 38,
"func_end_lineno": 50,
"func_code": "def estimate_diameter(polygon: np.ndarray) -> float:\n \"\"\"Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.\n\n Args:\n polygon (np.ndarray): Polygon points.\n\n Returns:\n float: Estimated diameter length.\n\n Reference:\n [1] https://sparrow.dev/pairwise-distance-in-numpy/\n \"\"\"\n return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())"
},
{
"class_start_lineno": 13,
"class_end_lineno": 113,
"func_start_lineno": 89,
"func_end_lineno": 113,
"func_code": " def _generate_correspondences(self, pupil_points: np.ndarray, iris_points: np.ndarray) -> np.ndarray:\n \"\"\"Generate corresponding positions in original image.\n\n Args:\n pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points x 2).\n iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points x 2).\n\n Returns:\n np.ndarray: generated corresponding points.\n \"\"\"\n pupil_diameter = math.estimate_diameter(pupil_points)\n iris_diameter = math.estimate_diameter(iris_points)\n p2i_ratio = pupil_diameter / iris_diameter\n\n if p2i_ratio <= 0 or p2i_ratio >= 1:\n raise NormalizationError(f\"Invalid pupil to iris ratio, not in the range (0,1): {p2i_ratio}.\")\n\n src_points = np.array(\n [\n pupil_points + x * (iris_points - pupil_points)\n for x in self.params.intermediate_radiuses[round(100 * (p2i_ratio))]\n ]\n )\n\n return np.round(src_points).astype(int)"
}
] |
[
"function_empty"
] |
[
"iris.utils.math.estimate_diameter",
"iris.nodes.normalization.nonlinear_normalization.NonlinearNormalization._generate_correspondences"
] |
Python
| 2 | 2 |
{
"total_num": 3,
"base_passed_num": 2
}
|
[
"open-iris.src.iris.utils.math.area",
"open-iris.src.iris.nodes.vectorization.contouring.filter_polygon_areas",
"open-iris.src.iris.nodes.vectorization.contouring.ContouringAlgorithm::_filter_contours"
] |
open-iris
|
[
"iris/utils/math.py",
"iris/nodes/vectorization/contouring.py",
"iris/nodes/vectorization/contouring.py"
] |
[
"tests/unit_tests/nodes/vectorization/test_contouring.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 203,
"func_start_lineno": 7,
"func_end_lineno": 35,
"func_code": "def area(array: np.ndarray, signed: bool = False) -> float:\n \"\"\"Shoelace formula for simple polygon area calculation.\n\n WARNING: This formula only works for \"simple polygons\", i.e planar polygon without self-intersection nor holes.\n These conditions are not checked within this function.\n\n Args:\n array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).\n signed (bool): If True, the area is signed, i.e. negative if the polygon is oriented clockwise.\n\n Returns:\n float: Polygon area\n\n Raises:\n ValueError: if the input array does not have shape (_, 2)\n\n References:\n [1] https://en.wikipedia.org/wiki/Shoelace_formula\n [2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n \"\"\"\n if len(array.shape) != 2 or array.shape[1] != 2:\n raise ValueError(f\"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).\")\n\n xs, ys = array.T\n area = 0.5 * (np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))\n if not signed:\n area = abs(area)\n\n return float(area)"
},
{
"class_start_lineno": 1,
"class_end_lineno": 133,
"func_start_lineno": 13,
"func_end_lineno": 35,
"func_code": "def filter_polygon_areas(\n polygons: List[np.ndarray], rel_tr: NonNegativeFloat = 0.03, abs_tr: NonNegativeFloat = 0.0\n) -> List[np.ndarray]:\n \"\"\"Filter out polygons whose area is below either an absolute threshold or a fraction of the largest area.\n\n Args:\n polygons (List[np.ndarray]): List of polygons to filter.\n rel_tr (NonNegativeFloat, optional): Relative threshold. Defaults to 0.03.\n abs_tr (NonNegativeFloat, optional): Absolute threshold. Defaults to 0.0.\n\n Returns:\n List[np.ndarray]: Filtered polygons' list.\n \"\"\"\n areas = [area(polygon) if len(polygon) > 2 else 1.0 for polygon in polygons]\n area_factors = np.array(areas) / np.max(areas)\n\n filtered_polygons = [\n polygon\n for area, area_factor, polygon in zip(areas, area_factors, polygons)\n if area > abs_tr and area_factor > rel_tr\n ]\n\n return filtered_polygons"
},
{
"class_start_lineno": 38,
"class_end_lineno": 133,
"func_start_lineno": 121,
"func_end_lineno": 133,
"func_code": " def _filter_contours(self, contours: List[np.ndarray]) -> List[np.ndarray]:\n \"\"\"Filter contours based on predefined filters.\n\n Args:\n contours (List[np.ndarray]): Contours list.\n\n Returns:\n List[np.ndarray]: Filtered list of contours.\n \"\"\"\n for filter_func in self.params.contour_filters:\n contours = filter_func(contours)\n\n return contours"
}
] |
[
"function_empty"
] |
[
"iris.utils.math.area",
"iris.nodes.vectorization.contouring.filter_polygon_areas",
"iris.nodes.vectorization.contouring.ContouringAlgorithm._filter_contours"
] |
Python
| 3 | 3 |
{
"total_num": 12,
"base_passed_num": 9
}
|
[
"open-iris.src.iris.utils.math.cartesian2polar",
"open-iris.src.iris.nodes.geometry_estimation.linear_extrapolation.LinearExtrapolation::_estimate"
] |
open-iris
|
[
"iris/utils/math.py",
"iris/nodes/geometry_estimation/linear_extrapolation.py"
] |
[
"tests/unit_tests/nodes/geometry_estimation/test_linear_extrapolation.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 203,
"func_start_lineno": 53,
"func_end_lineno": 73,
"func_code": "def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert xs and ys cartesian coordinates to polar coordinates.\n\n Args:\n xs (np.ndarray): x values.\n ys (np.ndarray): y values.\n center_x (float): center's x.\n center_y (float): center's y.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).\n \"\"\"\n x_rel: np.ndarray = xs - center_x\n y_rel: np.ndarray = ys - center_y\n\n C = np.vectorize(complex)(x_rel, y_rel)\n\n rho = np.abs(C)\n phi = np.angle(C) % (2 * np.pi)\n\n return rho, phi"
},
{
"class_start_lineno": 12,
"class_end_lineno": 82,
"func_start_lineno": 58,
"func_end_lineno": 82,
"func_code": " def _estimate(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:\n \"\"\"Estimate a circle fit for a single contour.\n\n Args:\n vertices (np.ndarray): Contour's vertices.\n center_xy (Tuple[float, float]): Contour's center position.\n\n Returns:\n np.ndarray: Estimated polygon.\n \"\"\"\n rhos, phis = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)\n\n padded_rhos = np.concatenate([rhos, rhos, rhos])\n padded_phis = np.concatenate([phis - 2 * np.pi, phis, phis + 2 * np.pi])\n\n interpolated_phis = np.arange(padded_phis.min(), padded_phis.max(), np.radians(self.params.dphi))\n interpolated_rhos = np.interp(interpolated_phis, xp=padded_phis, fp=padded_rhos, period=2 * np.pi)\n\n mask = (interpolated_phis >= 0) & (interpolated_phis < 2 * np.pi)\n interpolated_phis, interpolated_rhos = interpolated_phis[mask], interpolated_rhos[mask]\n\n xs, ys = math.polar2cartesian(interpolated_rhos, interpolated_phis, *center_xy)\n estimated_vertices = np.column_stack([xs, ys])\n\n return estimated_vertices"
}
] |
[
"function_empty",
"Development"
] |
[
"iris.utils.math.cartesian2polar",
"iris.nodes.geometry_estimation.linear_extrapolation.LinearExtrapolation._estimate"
] |
Python
| 1 | 2 |
{
"total_num": 12,
"base_passed_num": 0
}
|
[
"open-iris.src.iris.callbacks.pipeline_trace.PipelineCallTraceStorage::get",
"open-iris.src.iris.callbacks.pipeline_trace.PipelineCallTraceStorage::__getitem__"
] |
open-iris
|
[
"iris/callbacks/pipeline_trace.py",
"iris/callbacks/pipeline_trace.py"
] |
[
"tests/unit_tests/callbacks/test_pipeline_trace.py"
] |
[
{
"class_start_lineno": 16,
"class_end_lineno": 146,
"func_start_lineno": 52,
"func_end_lineno": 67,
"func_code": " def get(self, result_name: str) -> Any:\n \"\"\"Get result_name result.\n\n Args:\n result_name (str): Result name.\n\n Raises:\n PipelineCallTraceStorageError: Raised if result_name is not found.\n\n Returns:\n Any: Result object.\n \"\"\"\n if result_name not in self._storage.keys():\n raise PipelineCallTraceStorageError(f\"Unknown result name: {result_name}\")\n\n return self._storage[result_name]"
},
{
"class_start_lineno": 16,
"class_end_lineno": 146,
"func_start_lineno": 30,
"func_end_lineno": 42,
"func_code": " def __getitem__(self, result_name: str) -> Any:\n \"\"\"Get result_name result.\n\n Args:\n result_name (str): Result name.\n\n Raises:\n PipelineCallTraceStorageError: Raised if result_name is not found.\n\n Returns:\n Any: Result object.\n \"\"\"\n return self.get(result_name)"
}
] |
[
"function_empty"
] |
[
"iris.callbacks.pipeline_trace.PipelineCallTraceStorage.get",
"iris.callbacks.pipeline_trace.PipelineCallTraceStorage.__getitem__"
] |
Python
| 2 | 2 |
{
"total_num": 8,
"base_passed_num": 2
}
|
[
"open-iris.src.iris.utils.math.cartesian2polar",
"open-iris.src.iris.nodes.eye_properties_estimation.occlusion_calculator.OcclusionCalculator::_get_quantile_points"
] |
open-iris
|
[
"iris/utils/math.py",
"iris/nodes/eye_properties_estimation/occlusion_calculator.py"
] |
[
"tests/unit_tests/nodes/eye_properties_estimation/test_occlusion_calculator.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 203,
"func_start_lineno": 53,
"func_end_lineno": 73,
"func_code": "def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert xs and ys cartesian coordinates to polar coordinates.\n\n Args:\n xs (np.ndarray): x values.\n ys (np.ndarray): y values.\n center_x (float): center's x.\n center_y (float): center's y.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).\n \"\"\"\n x_rel: np.ndarray = xs - center_x\n y_rel: np.ndarray = ys - center_y\n\n C = np.vectorize(complex)(x_rel, y_rel)\n\n rho = np.abs(C)\n phi = np.angle(C) % (2 * np.pi)\n\n return rho, phi"
},
{
"class_start_lineno": 12,
"class_end_lineno": 142,
"func_start_lineno": 99,
"func_end_lineno": 142,
"func_code": " def _get_quantile_points(\n self, iris_coords: np.ndarray, eye_orientation: EyeOrientation, eye_centers: EyeCenters\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get those iris's points which fall into a specified quantile.\n\n Args:\n iris_coords (np.ndarray): Iris polygon coordinates.\n eye_orientation: (EyeOrientation): Eye orientation.\n eye_centers: (EyeCenters): Eye centers.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Tuple with xs and ys that falls into quantile region.\n \"\"\"\n orientation_angle = np.degrees(eye_orientation.angle)\n num_rotations = -round(orientation_angle * len(iris_coords) / 360.0)\n\n iris_xs, iris_ys = iris_coords[:, 0], iris_coords[:, 1]\n iris_rhos, iris_phis = math.cartesian2polar(iris_xs, iris_ys, eye_centers.iris_x, eye_centers.iris_y)\n\n iris_phis = np.roll(iris_phis, num_rotations, axis=0)\n iris_rhos = np.roll(iris_rhos, num_rotations, axis=0)\n\n scaled_quantile = round(self.params.quantile_angle * len(iris_coords) / 360.0)\n\n phis2mask = np.concatenate(\n [\n iris_phis[:scaled_quantile],\n iris_phis[-scaled_quantile:],\n iris_phis[len(iris_phis) // 2 : len(iris_phis) // 2 + scaled_quantile],\n iris_phis[len(iris_phis) // 2 - scaled_quantile : len(iris_phis) // 2],\n ]\n )\n rhos2mask = np.concatenate(\n [\n iris_rhos[:scaled_quantile],\n iris_rhos[-scaled_quantile:],\n iris_rhos[len(iris_rhos) // 2 : len(iris_rhos) // 2 + scaled_quantile],\n iris_rhos[len(iris_rhos) // 2 - scaled_quantile : len(iris_rhos) // 2],\n ]\n )\n phis2mask, rhos2mask = zip(*sorted(zip(phis2mask, rhos2mask)))\n xs2mask, ys2mask = math.polar2cartesian(rhos2mask, phis2mask, eye_centers.iris_x, eye_centers.iris_y)\n\n return xs2mask, ys2mask"
}
] |
[
"function_empty",
"Development"
] |
[
"iris.utils.math.cartesian2polar",
"iris.nodes.eye_properties_estimation.occlusion_calculator.OcclusionCalculator._get_quantile_points"
] |
Python
| 1 | 2 |
{
"total_num": 19,
"base_passed_num": 1
}
|
[
"open-iris.src.iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod::_calculate_perpendicular_bisectors",
"open-iris.src.iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod::_find_center_coords"
] |
open-iris
|
[
"iris/nodes/eye_properties_estimation/bisectors_method.py",
"iris/nodes/eye_properties_estimation/bisectors_method.py"
] |
[
"tests/unit_tests/nodes/eye_properties_estimation/test_bisectors_method.py"
] |
[
{
"class_start_lineno": 11,
"class_end_lineno": 170,
"func_start_lineno": 84,
"func_end_lineno": 140,
"func_code": " def _calculate_perpendicular_bisectors(\n self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.\n A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.\n\n Args:\n polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.\n min_distance_between_sector_points_in_px (float): Minimum distance between sector points.\n\n Raises:\n EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.\n \"\"\"\n np.random.seed(142857)\n\n bisectors_first_points = np.empty([0, 2])\n bisectors_second_points = np.empty([0, 2])\n for _ in range(self.params.max_iterations):\n random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))\n\n first_drawn_points = polygon[random_indices[:, 0]]\n second_drawn_points = polygon[random_indices[:, 1]]\n\n norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)\n mask = norms > min_distance_between_sector_points_in_px\n\n bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])\n bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])\n\n if len(bisectors_first_points) >= self.params.num_bisectors:\n break\n else:\n raise EyeCentersEstimationError(\n \"Not able to find enough random pairs of points on the arc with a large enough distance!\"\n )\n\n bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]\n bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]\n\n bisectors_center = (bisectors_first_points + bisectors_second_points) / 2\n\n # Flip xs with ys and flip sign of on of them to create a 90deg rotation\n inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)\n inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]\n\n # Add perpendicular vector to center and normalize\n norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)\n inv_bisectors_center_slope[:, 0] /= norm\n inv_bisectors_center_slope[:, 1] /= norm\n\n first_bisectors_point = bisectors_center - inv_bisectors_center_slope\n second_bisectors_point = bisectors_center + inv_bisectors_center_slope\n\n return first_bisectors_point, second_bisectors_point"
},
{
"class_start_lineno": 11,
"class_end_lineno": 170,
"func_start_lineno": 66,
"func_end_lineno": 82,
"func_code": " def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:\n \"\"\"Find center coordinates of a polygon.\n\n Args:\n polygon (np.ndarray): np.ndarray.\n diameter (float): diameter of the polygon.\n\n Returns:\n Tuple[float, float]: Tuple with the center location coordinates (x, y).\n \"\"\"\n min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter\n\n first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(\n polygon, min_distance_between_sector_points_in_px\n )\n\n return self._find_best_intersection(first_bisectors_point, second_bisectors_point)"
}
] |
[
"function_empty",
"Development"
] |
[
"iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod._calculate_perpendicular_bisectors",
"iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod._find_center_coords"
] |
Python
| 1 | 2 |
{
"total_num": 7,
"base_passed_num": 4
}
|
[
"open-iris.src.iris.utils.math.cartesian2polar",
"open-iris.src.iris.nodes.geometry_refinement.smoothing.Smoothing::_cut_into_arcs",
"open-iris.src.iris.nodes.geometry_refinement.smoothing.Smoothing::_smooth_arc",
"open-iris.src.iris.nodes.geometry_refinement.smoothing.Smoothing::_smooth_circular_shape"
] |
open-iris
|
[
"iris/utils/math.py",
"iris/nodes/geometry_refinement/smoothing.py",
"iris/nodes/geometry_refinement/smoothing.py",
"iris/nodes/geometry_refinement/smoothing.py"
] |
[
"tests/unit_tests/nodes/geometry_refinement/test_smoothing.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 203,
"func_start_lineno": 53,
"func_end_lineno": 73,
"func_code": "def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert xs and ys cartesian coordinates to polar coordinates.\n\n Args:\n xs (np.ndarray): x values.\n ys (np.ndarray): y values.\n center_x (float): center's x.\n center_y (float): center's y.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).\n \"\"\"\n x_rel: np.ndarray = xs - center_x\n y_rel: np.ndarray = ys - center_y\n\n C = np.vectorize(complex)(x_rel, y_rel)\n\n rho = np.abs(C)\n phi = np.angle(C) % (2 * np.pi)\n\n return rho, phi"
},
{
"class_start_lineno": 12,
"class_end_lineno": 256,
"func_start_lineno": 84,
"func_end_lineno": 119,
"func_code": " def _cut_into_arcs(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> Tuple[List[np.ndarray], int]:\n \"\"\"Cut contour into arcs.\n\n Args:\n polygon (np.ndarray): Contour polygon.\n center_xy (Tuple[float, float]): Polygon's center.\n\n Returns:\n Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).\n \"\"\"\n rho, phi = math.cartesian2polar(polygon[:, 0], polygon[:, 1], *center_xy)\n phi, rho = self._sort_two_arrays(phi, rho)\n\n differences = np.abs(phi - np.roll(phi, -1))\n # True distance between first and last point\n differences[-1] = 2 * np.pi - differences[-1]\n\n gap_indices = np.argwhere(differences > np.radians(self.params.gap_threshold)).flatten()\n\n if gap_indices.size < 2:\n return [polygon], gap_indices.size\n\n gap_indices += 1\n phi, rho = np.split(phi, gap_indices), np.split(rho, gap_indices)\n\n arcs = [\n np.column_stack(math.polar2cartesian(rho_coords, phi_coords, *center_xy))\n for rho_coords, phi_coords in zip(rho, phi)\n ]\n\n # Connect arc which lies between 0 and 2π.\n if len(arcs) == gap_indices.size + 1:\n arcs[0] = np.vstack([arcs[0], arcs[-1]])\n arcs = arcs[:-1]\n\n return arcs, gap_indices.size"
},
{
"class_start_lineno": 12,
"class_end_lineno": 256,
"func_start_lineno": 121,
"func_end_lineno": 144,
"func_code": " def _smooth_arc(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:\n \"\"\"Smooth a single contour arc.\n\n Args:\n vertices (np.ndarray): Arc's vertices.\n center_xy (Tuple[float, float]): Center of an entire contour.\n\n Returns:\n np.ndarray: Smoothed arc's vertices.\n \"\"\"\n rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)\n phi, rho = self._sort_two_arrays(phi, rho)\n\n idx = self._find_start_index(phi)\n offset = phi[idx]\n relative_phi = (phi - offset) % (2 * np.pi)\n\n smoothed_relative_phi, smoothed_rho = self._smooth_array(relative_phi, rho)\n\n smoothed_phi = (smoothed_relative_phi + offset) % (2 * np.pi)\n\n x_smoothed, y_smoothed = math.polar2cartesian(smoothed_rho, smoothed_phi, *center_xy)\n\n return np.column_stack([x_smoothed, y_smoothed])"
},
{
"class_start_lineno": 12,
"class_end_lineno": 256,
"func_start_lineno": 146,
"func_end_lineno": 168,
"func_code": " def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:\n \"\"\"Smooth arc in a form of a circular shape.\n\n Args:\n vertices (np.ndarray): Arc's vertices.\n center_xy (Tuple[float, float]): Center of an entire contour.\n\n Returns:\n np.ndarray: Smoothed arc's vertices.\n \"\"\"\n rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)\n\n padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])\n padded_rho = np.concatenate([rho, rho, rho])\n\n smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)\n\n mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)\n rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]\n\n x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)\n\n return np.column_stack([x_smoothed, y_smoothed])"
}
] |
[
"function_empty",
"Development"
] |
[
"iris.utils.math.cartesian2polar",
"iris.nodes.geometry_refinement.smoothing.Smoothing._cut_into_arcs",
"iris.nodes.geometry_refinement.smoothing.Smoothing._smooth_arc",
"iris.nodes.geometry_refinement.smoothing.Smoothing._smooth_circular_shape"
] |
Python
| 3 | 4 |
{
"total_num": 12,
"base_passed_num": 5
}
|
[
"open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_class",
"open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_pipeline",
"open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_node",
"open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_nodes"
] |
open-iris
|
[
"iris/pipelines/iris_pipeline.py",
"iris/pipelines/iris_pipeline.py",
"iris/pipelines/iris_pipeline.py",
"iris/pipelines/iris_pipeline.py"
] |
[
"tests/unit_tests/pipelines/test_iris_pipeline.py"
] |
[
{
"class_start_lineno": 27,
"class_end_lineno": 324,
"func_start_lineno": 225,
"func_end_lineno": 245,
"func_code": " def instanciate_class(self, class_name: str, kwargs: Dict[str, Any]) -> Callable:\n \"\"\"Instanciate a class from its string definition and its kwargs.\n\n This function relies on pydoc.locate, a safe way to instanciate a class from its string definition, which itself relies on pydoc.safe_import.\n\n Args:\n class_name (str): name of the class.\n kwargs (Dict): kwargs to pass to the class at instanciation time\n\n Returns:\n Callable: the instanciated class\n\n Raises:\n IRISPipelineError: Raised if the class cannot be located.\n \"\"\"\n object_class = pydoc.locate(class_name)\n\n if object_class is None:\n raise IRISPipelineError(f\"Could not locate class {class_name}\")\n\n return object_class(**kwargs)"
},
{
"class_start_lineno": 27,
"class_end_lineno": 324,
"func_start_lineno": 179,
"func_end_lineno": 200,
"func_code": " def instanciate_pipeline(self) -> List[PipelineNode]:\n \"\"\"Given a list of PipelineNodes, crawl the parameters and instanciate the PipelineClass available.\n\n Returns:\n List[PipelineNode]: pipeline with instanciated parameters\n \"\"\"\n instanciated_pipeline = []\n for node in self.params.pipeline:\n current_node = node\n for param_name, param_value in node.algorithm.params.items():\n if isinstance(param_value, (tuple, list)):\n for i, value in enumerate(param_value):\n if isinstance(value, PipelineClass):\n current_node.algorithm.params[param_name][i] = self.instanciate_class(\n class_name=value.class_name, kwargs=value.params\n )\n elif isinstance(param_value, PipelineClass):\n current_node.algorithm.params[param_name] = self.instanciate_class(\n class_name=param_value.class_name, kwargs=param_value.params\n )\n instanciated_pipeline.append(current_node)\n return instanciated_pipeline"
},
{
"class_start_lineno": 27,
"class_end_lineno": 324,
"func_start_lineno": 202,
"func_end_lineno": 223,
"func_code": " def instanciate_node(\n self, node_class: str, algorithm_params: Dict[str, Any], callbacks: Optional[List[PipelineClass]]\n ) -> Algorithm:\n \"\"\"Instanciate an Algorithm from its class, kwargs and optional Callbacks.\n\n NOTE: All callbacks of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.\n\n Args:\n node_class (str): Node's class.\n algorithm_params (Dict[str, Any]): Node's kwargs.\n callbacks (Optional[List[PipelineClass]]): list of callbacks.\n\n Returns:\n Algorithm: instanciated node.\n \"\"\"\n if callbacks is not None and len(callbacks):\n instanciated_callbacks = [self.instanciate_class(cb.class_name, cb.params) for cb in callbacks]\n instanciated_callbacks = [cb for cb in instanciated_callbacks if type(cb) not in self.env.disabled_qa]\n\n algorithm_params = {**algorithm_params, **{\"callbacks\": instanciated_callbacks}}\n\n return self.instanciate_class(node_class, algorithm_params)"
},
{
"class_start_lineno": 27,
"class_end_lineno": 324,
"func_start_lineno": 159,
"func_end_lineno": 177,
"func_code": " def instanciate_nodes(self) -> Dict[str, Algorithm]:\n \"\"\"Given a list of PipelineNode, return the associated instanciated nodes.\n\n NOTE: All nodes of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.\n\n Returns:\n Dict[str, Algorithm]: instanciated nodes.\n \"\"\"\n instanciated_pipeline = self.instanciate_pipeline()\n nodes = {\n node.name: self.instanciate_node(\n node_class=node.algorithm.class_name,\n algorithm_params=node.algorithm.params,\n callbacks=node.callbacks,\n )\n for node in instanciated_pipeline\n }\n nodes = {node_name: node for node_name, node in nodes.items() if type(node) not in self.env.disabled_qa}\n return nodes"
}
] |
[
"function_empty",
"Development"
] |
[
"iris.pipelines.iris_pipeline.IRISPipeline.instanciate_class",
"iris.pipelines.iris_pipeline.IRISPipeline.instanciate_pipeline",
"iris.pipelines.iris_pipeline.IRISPipeline.instanciate_node",
"iris.pipelines.iris_pipeline.IRISPipeline.instanciate_nodes"
] |
Python
| 3 | 4 |
{
"total_num": 33,
"base_passed_num": 0
}
|
[
"open-iris.src.iris.io.validators.is_binary",
"open-iris.src.iris.nodes.binarization.specular_reflection_detection.SpecularReflectionDetection::run"
] |
open-iris
|
[
"iris/io/validators.py",
"iris/nodes/binarization/specular_reflection_detection.py"
] |
[
"tests/unit_tests/nodes/binarization/test_specular_reflection_detection.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 277,
"func_start_lineno": 40,
"func_end_lineno": 57,
"func_code": "def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:\n \"\"\"Check if array has only boolean values, i.e. is binary.\n\n Args:\n cls (type): Class type.\n v (np.ndarray): Value to check.\n field (fields.ModelField): Field descriptor.\n\n Raises:\n ValueError: Exception raised if array doesn't contain bool datatypes.\n\n Returns:\n np.ndarray: `v` sent for further processing.\n \"\"\"\n if v.dtype != np.dtype(\"bool\"):\n raise ValueError(f\"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}\")\n\n return v"
},
{
"class_start_lineno": 8,
"class_end_lineno": 40,
"func_start_lineno": 26,
"func_end_lineno": 40,
"func_code": " def run(self, ir_image: IRImage) -> NoiseMask:\n \"\"\"Thresholds an IRImage to detect Specular Reflection.\n\n Args:\n ir_image (IRImage): Infrared image object.\n\n Returns:\n NoiseMask: a binary map of the thresholded IRImage.\n \"\"\"\n _, reflection_segmap = cv2.threshold(\n ir_image.img_data, self.params.reflection_threshold, 255, cv2.THRESH_BINARY\n )\n reflection_segmap = (reflection_segmap / 255.0).astype(bool)\n\n return NoiseMask(mask=reflection_segmap)"
}
] |
[
"function_empty"
] |
[
"iris.io.validators.is_binary",
"iris.nodes.binarization.specular_reflection_detection.SpecularReflectionDetection.run"
] |
Python
| 2 | 2 |
{
"total_num": 4,
"base_passed_num": 0
}
|
[
"open-iris.src.iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod::_calculate_perpendicular_bisectors",
"open-iris.src.iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod::_find_center_coords"
] |
open-iris
|
[
"iris/nodes/eye_properties_estimation/bisectors_method.py",
"iris/nodes/eye_properties_estimation/bisectors_method.py"
] |
[
"tests/unit_tests/nodes/eye_properties_estimation/test_pupil_iris_property_calculator.py"
] |
[
{
"class_start_lineno": 11,
"class_end_lineno": 170,
"func_start_lineno": 84,
"func_end_lineno": 140,
"func_code": " def _calculate_perpendicular_bisectors(\n self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.\n A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.\n\n Args:\n polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.\n min_distance_between_sector_points_in_px (float): Minimum distance between sector points.\n\n Raises:\n EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.\n \"\"\"\n np.random.seed(142857)\n\n bisectors_first_points = np.empty([0, 2])\n bisectors_second_points = np.empty([0, 2])\n for _ in range(self.params.max_iterations):\n random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))\n\n first_drawn_points = polygon[random_indices[:, 0]]\n second_drawn_points = polygon[random_indices[:, 1]]\n\n norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)\n mask = norms > min_distance_between_sector_points_in_px\n\n bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])\n bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])\n\n if len(bisectors_first_points) >= self.params.num_bisectors:\n break\n else:\n raise EyeCentersEstimationError(\n \"Not able to find enough random pairs of points on the arc with a large enough distance!\"\n )\n\n bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]\n bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]\n\n bisectors_center = (bisectors_first_points + bisectors_second_points) / 2\n\n # Flip xs with ys and flip sign of on of them to create a 90deg rotation\n inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)\n inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]\n\n # Add perpendicular vector to center and normalize\n norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)\n inv_bisectors_center_slope[:, 0] /= norm\n inv_bisectors_center_slope[:, 1] /= norm\n\n first_bisectors_point = bisectors_center - inv_bisectors_center_slope\n second_bisectors_point = bisectors_center + inv_bisectors_center_slope\n\n return first_bisectors_point, second_bisectors_point"
},
{
"class_start_lineno": 11,
"class_end_lineno": 170,
"func_start_lineno": 66,
"func_end_lineno": 82,
"func_code": " def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:\n \"\"\"Find center coordinates of a polygon.\n\n Args:\n polygon (np.ndarray): np.ndarray.\n diameter (float): diameter of the polygon.\n\n Returns:\n Tuple[float, float]: Tuple with the center location coordinates (x, y).\n \"\"\"\n min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter\n\n first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(\n polygon, min_distance_between_sector_points_in_px\n )\n\n return self._find_best_intersection(first_bisectors_point, second_bisectors_point)"
}
] |
[
"function_empty",
"Development"
] |
[
"iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod._calculate_perpendicular_bisectors",
"iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod._find_center_coords"
] |
Python
| 1 | 2 |
{
"total_num": 13,
"base_passed_num": 8
}
|
[
"rdt.rdt.transformers.utils.strings_from_regex",
"rdt.rdt.transformers.id.RegexGenerator::__setstate__"
] |
rdt
|
[
"rdt/transformers/utils.py",
"rdt/transformers/id.py"
] |
[
"tests/unit/transformers/test___init__.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 385,
"func_start_lineno": 141,
"func_end_lineno": 171,
"func_code": "def strings_from_regex(regex, max_repeat=16):\n \"\"\"Generate strings that match the given regular expression.\n\n The output is a generator that produces regular expressions that match\n the indicated regular expressions alongside an integer indicating the\n total length of the generator.\n\n WARNING: Subpatterns are currently not supported.\n\n Args:\n regex (str):\n String representing a valid python regular expression.\n max_repeat (int):\n Maximum number of repetitions to produce when the regular\n expression allows an infinte amount. Defaults to 16.\n\n Returns:\n tuple:\n * Generator that produces strings that match the given regex.\n * Total length of the generator.\n \"\"\"\n parsed = sre_parse.parse(regex, flags=sre_parse.SRE_FLAG_UNICODE)\n generators = []\n sizes = []\n for option, args in reversed(parsed):\n if option != sre_parse.AT:\n generator, size = _GENERATORS[option](args, max_repeat)\n generators.append((generator, option, args))\n sizes.append(size)\n\n return _from_generators(generators, max_repeat), np.prod(sizes, dtype=np.complex128).real"
},
{
"class_start_lineno": 93,
"class_end_lineno": 285,
"func_start_lineno": 127,
"func_end_lineno": 142,
"func_code": " def __setstate__(self, state):\n \"\"\"Set the generator when pickling.\"\"\"\n generator_size = state.get('generator_size')\n generated = state.get('generated')\n generator, size = strings_from_regex(state.get('regex_format'))\n if generator_size is None:\n state['generator_size'] = size\n if generated is None:\n state['generated'] = 0\n\n if generated:\n for _ in range(generated):\n next(generator)\n\n state['generator'] = generator\n self.__dict__ = state"
}
] |
[
"Development"
] |
[
"rdt.transformers.utils.strings_from_regex",
"rdt.transformers.id.RegexGenerator.__setstate__"
] |
Python
| 0 | 2 |
{
"total_num": 5,
"base_passed_num": 4
}
|
[
"rdt.rdt.transformers.null.NullTransformer::_get_missing_value_replacement",
"rdt.rdt.transformers.null.NullTransformer::fit",
"rdt.rdt.transformers.boolean.BinaryEncoder::_fit"
] |
rdt
|
[
"rdt/transformers/null.py",
"rdt/transformers/null.py",
"rdt/transformers/boolean.py"
] |
[
"tests/unit/transformers/test_boolean.py"
] |
[
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 60,
"func_end_lineno": 93,
"func_code": " def _get_missing_value_replacement(self, data):\n \"\"\"Get the fill value to use for the given data.\n\n Args:\n data (pd.Series):\n The data that is being transformed.\n\n Return:\n object:\n The fill value that needs to be used.\n\n Raise:\n TransformerInputError:\n Error raised when data only contains nans and ``_missing_value_replacement``\n is set to 'mean' or 'mode'.\n \"\"\"\n if self._missing_value_replacement is None:\n return None\n\n if self._missing_value_replacement in {'mean', 'mode', 'random'} and pd.isna(data).all():\n msg = (\n f\"'missing_value_replacement' cannot be set to '{self._missing_value_replacement}'\"\n ' when the provided data only contains NaNs. Using 0 instead.'\n )\n LOGGER.info(msg)\n return 0\n\n if self._missing_value_replacement == 'mean':\n return data.mean()\n\n if self._missing_value_replacement == 'mode':\n return data.mode(dropna=True)[0]\n\n return self._missing_value_replacement"
},
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 95,
"func_end_lineno": 122,
"func_code": " def fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Evaluate if the transformer has to create the null column or not.\n\n Args:\n data (pandas.Series):\n Data to transform.\n \"\"\"\n self._missing_value_replacement = self._get_missing_value_replacement(data)\n if self._missing_value_replacement == 'random':\n self._min_value = data.min()\n self._max_value = data.max()\n\n if self._missing_value_generation is not None:\n null_values = data.isna().to_numpy()\n self.nulls = null_values.any()\n\n if not self.nulls and self.models_missing_values():\n self._missing_value_generation = None\n guidance_message = (\n f'Guidance: There are no missing values in column {data.name}. '\n 'Extra column not created.'\n )\n LOGGER.info(guidance_message)\n\n if self._missing_value_generation == 'random':\n self._null_percentage = null_values.sum() / len(data)"
},
{
"class_start_lineno": 10,
"class_end_lineno": 129,
"func_start_lineno": 54,
"func_end_lineno": 69,
"func_code": " def _fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Args:\n data (pandas.Series):\n Data to fit to.\n \"\"\"\n self.null_transformer = NullTransformer(\n self.missing_value_replacement, self.missing_value_generation\n )\n self.null_transformer.fit(data)\n if self.null_transformer.models_missing_values():\n self.output_properties['is_null'] = {\n 'sdtype': 'float',\n 'next_transformer': None,\n }"
}
] |
[
"function_empty",
"Development"
] |
[
"rdt.transformers.null.NullTransformer._get_missing_value_replacement",
"rdt.transformers.null.NullTransformer.fit",
"rdt.transformers.boolean.BinaryEncoder._fit"
] |
Python
| 2 | 3 |
{
"total_num": 16,
"base_passed_num": 13
}
|
[
"rdt.rdt.transformers.utils.check_nan_in_transform",
"rdt.rdt.transformers.categorical.UniformEncoder::_reverse_transform",
"rdt.rdt.transformers.categorical.OneHotEncoder::_reverse_transform",
"rdt.rdt.transformers.categorical.LabelEncoder::_reverse_transform"
] |
rdt
|
[
"rdt/transformers/utils.py",
"rdt/transformers/categorical.py",
"rdt/transformers/categorical.py",
"rdt/transformers/categorical.py"
] |
[
"tests/unit/transformers/test_categorical.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 385,
"func_start_lineno": 208,
"func_end_lineno": 228,
"func_code": "def check_nan_in_transform(data, dtype):\n \"\"\"Check if there are null values in the transformed data.\n\n Args:\n data (pd.Series or numpy.ndarray):\n Data that has been transformed.\n dtype (str):\n Data type of the transformed data.\n \"\"\"\n if pd.isna(data).any().any():\n message = (\n 'There are null values in the transformed data. The reversed '\n 'transformed data will contain null values'\n )\n is_integer = pd.api.types.is_integer_dtype(dtype)\n if is_integer:\n message += \" of type 'float'.\"\n else:\n message += '.'\n\n warnings.warn(message)"
},
{
"class_start_lineno": 21,
"class_end_lineno": 223,
"func_start_lineno": 192,
"func_end_lineno": 223,
"func_code": " def _reverse_transform(self, data):\n \"\"\"Convert float values back to the original categorical values.\n\n Args:\n data (pandas.Series):\n Data to revert.\n\n Returns:\n pandas.Series\n \"\"\"\n check_nan_in_transform(data, self.dtype)\n data = data.clip(0, 1)\n bins = [0]\n labels = []\n nan_name = 'NaN'\n while nan_name in self.intervals.keys():\n nan_name += '_'\n\n for key, interval in self.intervals.items():\n bins.append(interval[1])\n if pd.isna(key):\n labels.append(nan_name)\n else:\n labels.append(key)\n\n result = pd.cut(data, bins=bins, labels=labels, include_lowest=True)\n if nan_name in result.cat.categories:\n result = result.cat.remove_categories(nan_name)\n\n result = try_convert_to_dtype(result, self.dtype)\n\n return result"
},
{
"class_start_lineno": 562,
"class_end_lineno": 705,
"func_start_lineno": 684,
"func_end_lineno": 705,
"func_code": " def _reverse_transform(self, data):\n \"\"\"Convert float values back to the original categorical values.\n\n Args:\n data (pd.Series or numpy.ndarray):\n Data to revert.\n\n Returns:\n pandas.Series\n \"\"\"\n check_nan_in_transform(data, self.dtype)\n if not isinstance(data, np.ndarray):\n data = data.to_numpy()\n\n if data.ndim == 1:\n data = data.reshape(-1, 1)\n\n indices = np.argmax(data, axis=1)\n result = pd.Series(indices).map(self.dummies.__getitem__)\n result = try_convert_to_dtype(result, self.dtype)\n\n return result"
},
{
"class_start_lineno": 708,
"class_end_lineno": 845,
"func_start_lineno": 827,
"func_end_lineno": 845,
"func_code": " def _reverse_transform(self, data):\n \"\"\"Convert float values back to the original categorical values.\n\n Args:\n data (pd.Series or numpy.ndarray):\n Data to revert.\n\n Returns:\n pandas.Series\n \"\"\"\n check_nan_in_transform(data, self.dtype)\n if self.add_noise:\n data = np.floor(data)\n\n data = data.clip(min(self.values_to_categories), max(self.values_to_categories))\n data = data.round().map(self.values_to_categories)\n data = try_convert_to_dtype(data, self.dtype)\n\n return data"
}
] |
[
"function_empty",
"Development"
] |
[
"rdt.transformers.utils.check_nan_in_transform",
"rdt.transformers.categorical.UniformEncoder._reverse_transform",
"rdt.transformers.categorical.OneHotEncoder._reverse_transform",
"rdt.transformers.categorical.LabelEncoder._reverse_transform"
] |
Python
| 3 | 4 |
{
"total_num": 95,
"base_passed_num": 85
}
|
[
"rdt.rdt.transformers.null.NullTransformer::_get_missing_value_replacement",
"rdt.rdt.transformers.null.NullTransformer::fit",
"rdt.rdt.transformers.datetime.UnixTimestampEncoder::_convert_to_datetime",
"rdt.rdt.transformers.datetime.UnixTimestampEncoder::_transform"
] |
rdt
|
[
"rdt/transformers/null.py",
"rdt/transformers/null.py",
"rdt/transformers/datetime.py",
"rdt/transformers/datetime.py",
"rdt/transformers/datetime.py"
] |
[
"tests/unit/transformers/test_datetime.py"
] |
[
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 60,
"func_end_lineno": 93,
"func_code": " def _get_missing_value_replacement(self, data):\n \"\"\"Get the fill value to use for the given data.\n\n Args:\n data (pd.Series):\n The data that is being transformed.\n\n Return:\n object:\n The fill value that needs to be used.\n\n Raise:\n TransformerInputError:\n Error raised when data only contains nans and ``_missing_value_replacement``\n is set to 'mean' or 'mode'.\n \"\"\"\n if self._missing_value_replacement is None:\n return None\n\n if self._missing_value_replacement in {'mean', 'mode', 'random'} and pd.isna(data).all():\n msg = (\n f\"'missing_value_replacement' cannot be set to '{self._missing_value_replacement}'\"\n ' when the provided data only contains NaNs. Using 0 instead.'\n )\n LOGGER.info(msg)\n return 0\n\n if self._missing_value_replacement == 'mean':\n return data.mean()\n\n if self._missing_value_replacement == 'mode':\n return data.mode(dropna=True)[0]\n\n return self._missing_value_replacement"
},
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 95,
"func_end_lineno": 122,
"func_code": " def fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Evaluate if the transformer has to create the null column or not.\n\n Args:\n data (pandas.Series):\n Data to transform.\n \"\"\"\n self._missing_value_replacement = self._get_missing_value_replacement(data)\n if self._missing_value_replacement == 'random':\n self._min_value = data.min()\n self._max_value = data.max()\n\n if self._missing_value_generation is not None:\n null_values = data.isna().to_numpy()\n self.nulls = null_values.any()\n\n if not self.nulls and self.models_missing_values():\n self._missing_value_generation = None\n guidance_message = (\n f'Guidance: There are no missing values in column {data.name}. '\n 'Extra column not created.'\n )\n LOGGER.info(guidance_message)\n\n if self._missing_value_generation == 'random':\n self._null_percentage = null_values.sum() / len(data)"
},
{
"class_start_lineno": 13,
"class_end_lineno": 230,
"func_start_lineno": 72,
"func_end_lineno": 107,
"func_code": " def _convert_to_datetime(self, data):\n \"\"\"Convert datetime column into datetime dtype.\n\n Convert the datetime column to datetime dtype using the ``datetime_format``.\n All non-numeric columns will automatically be cast to datetimes. Numeric columns\n with a ``datetime_format`` will be treated as strings and cast to datetime. Numeric\n columns without a ``datetime_format`` will be treated as already converted datetimes.\n\n Args:\n data (pandas.Series):\n The datetime column.\n\n Raises:\n - ``TypeError`` if data cannot be converted to datetime.\n - ``ValueError`` if data does not match the specified datetime format\n\n Returns:\n pandas.Series:\n The datetime column converted to the datetime dtype.\n \"\"\"\n if self.datetime_format or not is_numeric_dtype(data):\n try:\n pandas_datetime_format = None\n if self.datetime_format:\n pandas_datetime_format = self.datetime_format.replace('%-', '%')\n\n data = pd.to_datetime(data, format=pandas_datetime_format)\n\n except ValueError as error:\n if 'Unknown string' in str(error) or 'Unknown datetime string' in str(error):\n message = 'Data must be of dtype datetime, or castable to datetime.'\n raise TypeError(message) from None\n\n raise ValueError('Data does not match specified datetime format.') from None\n\n return data"
},
{
"class_start_lineno": 13,
"class_end_lineno": 230,
"func_start_lineno": 109,
"func_end_lineno": 117,
"func_code": " def _transform_helper(self, datetimes):\n \"\"\"Transform datetime values to integer.\"\"\"\n datetimes = self._convert_to_datetime(datetimes)\n nulls = datetimes.isna()\n integers = pd.to_numeric(datetimes, errors='coerce').to_numpy().astype(np.float64)\n integers[nulls] = np.nan\n transformed = pd.Series(integers)\n\n return transformed"
},
{
"class_start_lineno": 13,
"class_end_lineno": 230,
"func_start_lineno": 190,
"func_end_lineno": 201,
"func_code": " def _transform(self, data):\n \"\"\"Transform datetime values to float values.\n\n Args:\n data (pandas.Series):\n Data to transform.\n\n Returns:\n numpy.ndarray\n \"\"\"\n data = self._transform_helper(data)\n return self.null_transformer.transform(data)"
}
] |
[
"function_empty",
"Development"
] |
[
"rdt.transformers.null.NullTransformer._get_missing_value_replacement",
"rdt.transformers.null.NullTransformer.fit",
"rdt.transformers.datetime.UnixTimestampEncoder._convert_to_datetime",
"rdt.transformers.datetime.UnixTimestampEncoder._transform_helper",
"rdt.transformers.datetime.UnixTimestampEncoder._transform"
] |
Python
| 3 | 4 |
{
"total_num": 32,
"base_passed_num": 21
}
|
[
"rdt.rdt.transformers.null.NullTransformer::_get_missing_value_replacement",
"rdt.rdt.transformers.null.NullTransformer::fit"
] |
rdt
|
[
"rdt/transformers/null.py",
"rdt/transformers/null.py"
] |
[
"tests/unit/transformers/test_null.py"
] |
[
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 60,
"func_end_lineno": 93,
"func_code": " def _get_missing_value_replacement(self, data):\n \"\"\"Get the fill value to use for the given data.\n\n Args:\n data (pd.Series):\n The data that is being transformed.\n\n Return:\n object:\n The fill value that needs to be used.\n\n Raise:\n TransformerInputError:\n Error raised when data only contains nans and ``_missing_value_replacement``\n is set to 'mean' or 'mode'.\n \"\"\"\n if self._missing_value_replacement is None:\n return None\n\n if self._missing_value_replacement in {'mean', 'mode', 'random'} and pd.isna(data).all():\n msg = (\n f\"'missing_value_replacement' cannot be set to '{self._missing_value_replacement}'\"\n ' when the provided data only contains NaNs. Using 0 instead.'\n )\n LOGGER.info(msg)\n return 0\n\n if self._missing_value_replacement == 'mean':\n return data.mean()\n\n if self._missing_value_replacement == 'mode':\n return data.mode(dropna=True)[0]\n\n return self._missing_value_replacement"
},
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 95,
"func_end_lineno": 122,
"func_code": " def fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Evaluate if the transformer has to create the null column or not.\n\n Args:\n data (pandas.Series):\n Data to transform.\n \"\"\"\n self._missing_value_replacement = self._get_missing_value_replacement(data)\n if self._missing_value_replacement == 'random':\n self._min_value = data.min()\n self._max_value = data.max()\n\n if self._missing_value_generation is not None:\n null_values = data.isna().to_numpy()\n self.nulls = null_values.any()\n\n if not self.nulls and self.models_missing_values():\n self._missing_value_generation = None\n guidance_message = (\n f'Guidance: There are no missing values in column {data.name}. '\n 'Extra column not created.'\n )\n LOGGER.info(guidance_message)\n\n if self._missing_value_generation == 'random':\n self._null_percentage = null_values.sum() / len(data)"
}
] |
[
"function_empty"
] |
[
"rdt.transformers.null.NullTransformer._get_missing_value_replacement",
"rdt.transformers.null.NullTransformer.fit"
] |
Python
| 2 | 2 |
{
"total_num": 24,
"base_passed_num": 13
}
|
[
"rdt.rdt.transformers.null.NullTransformer::_get_missing_value_replacement",
"rdt.rdt.transformers.null.NullTransformer::fit",
"rdt.rdt.transformers.null.NullTransformer::reverse_transform",
"rdt.rdt.transformers.numerical.FloatFormatter::_reverse_transform"
] |
rdt
|
[
"rdt/transformers/null.py",
"rdt/transformers/null.py",
"rdt/transformers/null.py",
"rdt/transformers/numerical.py"
] |
[
"tests/unit/transformers/test_numerical.py"
] |
[
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 60,
"func_end_lineno": 93,
"func_code": " def _get_missing_value_replacement(self, data):\n \"\"\"Get the fill value to use for the given data.\n\n Args:\n data (pd.Series):\n The data that is being transformed.\n\n Return:\n object:\n The fill value that needs to be used.\n\n Raise:\n TransformerInputError:\n Error raised when data only contains nans and ``_missing_value_replacement``\n is set to 'mean' or 'mode'.\n \"\"\"\n if self._missing_value_replacement is None:\n return None\n\n if self._missing_value_replacement in {'mean', 'mode', 'random'} and pd.isna(data).all():\n msg = (\n f\"'missing_value_replacement' cannot be set to '{self._missing_value_replacement}'\"\n ' when the provided data only contains NaNs. Using 0 instead.'\n )\n LOGGER.info(msg)\n return 0\n\n if self._missing_value_replacement == 'mean':\n return data.mean()\n\n if self._missing_value_replacement == 'mode':\n return data.mode(dropna=True)[0]\n\n return self._missing_value_replacement"
},
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 95,
"func_end_lineno": 122,
"func_code": " def fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Evaluate if the transformer has to create the null column or not.\n\n Args:\n data (pandas.Series):\n Data to transform.\n \"\"\"\n self._missing_value_replacement = self._get_missing_value_replacement(data)\n if self._missing_value_replacement == 'random':\n self._min_value = data.min()\n self._max_value = data.max()\n\n if self._missing_value_generation is not None:\n null_values = data.isna().to_numpy()\n self.nulls = null_values.any()\n\n if not self.nulls and self.models_missing_values():\n self._missing_value_generation = None\n guidance_message = (\n f'Guidance: There are no missing values in column {data.name}. '\n 'Extra column not created.'\n )\n LOGGER.info(guidance_message)\n\n if self._missing_value_generation == 'random':\n self._null_percentage = null_values.sum() / len(data)"
},
{
"class_start_lineno": 13,
"class_end_lineno": 194,
"func_start_lineno": 165,
"func_end_lineno": 194,
"func_code": " def reverse_transform(self, data):\n \"\"\"Restore null values to the data.\n\n If a null indicator column was created during fit, use it as a reference.\n Otherwise, randomly replace values with ``np.nan``. The percentage of values\n that will be replaced is the percentage of null values seen in the fitted data.\n\n Args:\n data (numpy.ndarray):\n Data to transform.\n\n Returns:\n pandas.Series\n \"\"\"\n data = data.copy()\n if self._missing_value_generation == 'from_column':\n if self.nulls:\n isna = data[:, 1] > 0.5\n\n data = data[:, 0]\n\n elif self.nulls:\n isna = np.random.random((len(data),)) < self._null_percentage\n\n data = pd.Series(data)\n\n if self.nulls and isna.any():\n data.loc[isna] = np.nan\n\n return data"
},
{
"class_start_lineno": 29,
"class_end_lineno": 245,
"func_start_lineno": 170,
"func_end_lineno": 201,
"func_code": " def _reverse_transform(self, data):\n \"\"\"Convert data back into the original format.\n\n Args:\n data (pd.Series or numpy.ndarray):\n Data to transform.\n\n Returns:\n numpy.ndarray\n \"\"\"\n if not isinstance(data, np.ndarray):\n data = data.to_numpy()\n\n data = self.null_transformer.reverse_transform(data)\n if self.enforce_min_max_values:\n data = data.clip(self._min_value, self._max_value)\n elif not self.computer_representation.startswith('Float'):\n min_bound, max_bound = INTEGER_BOUNDS[self.computer_representation]\n data = data.clip(min_bound, max_bound)\n\n is_integer = pd.api.types.is_integer_dtype(self._dtype)\n np_integer_with_nans = (\n not pd.api.types.is_extension_array_dtype(self._dtype)\n and is_integer\n and pd.isna(data).any()\n )\n if self.learn_rounding_scheme and self._rounding_digits is not None:\n data = data.round(self._rounding_digits)\n elif is_integer:\n data = data.round(0)\n\n return data.astype(self._dtype if not np_integer_with_nans else 'float64')"
}
] |
[
"function_empty"
] |
[
"rdt.transformers.null.NullTransformer._get_missing_value_replacement",
"rdt.transformers.null.NullTransformer.fit",
"rdt.transformers.null.NullTransformer.reverse_transform",
"rdt.transformers.numerical.FloatFormatter._reverse_transform"
] |
Python
| 4 | 4 |
{
"total_num": 90,
"base_passed_num": 61
}
|
[
"transformers.src.transformers.image_utils.infer_channel_dimension_format",
"transformers.src.transformers.image_transforms.to_channel_dimension_format",
"transformers.src.transformers.image_utils.get_image_size"
] |
transformers
|
[
"transformers/image_utils.py",
"transformers/image_transforms.py",
"transformers/image_utils.py",
"transformers/image_transforms.py",
"transformers/image_transforms.py"
] |
[
"tests/test_image_transforms.py"
] |
[
{
"class_start_lineno": 1,
"class_end_lineno": 811,
"func_start_lineno": 220,
"func_end_lineno": 254,
"func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"
},
{
"class_start_lineno": 1,
"class_end_lineno": 854,
"func_start_lineno": 58,
"func_end_lineno": 94,
"func_code": "def to_channel_dimension_format(\n image: np.ndarray,\n channel_dim: Union[ChannelDimension, str],\n input_channel_dim: Optional[Union[ChannelDimension, str]] = None,\n) -> np.ndarray:\n \"\"\"\n Converts `image` to the channel dimension format specified by `channel_dim`.\n\n Args:\n image (`numpy.ndarray`):\n The image to have its channel dimension set.\n channel_dim (`ChannelDimension`):\n The channel dimension format to use.\n input_channel_dim (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input image.\n\n Returns:\n `np.ndarray`: The image with the channel dimension set to `channel_dim`.\n \"\"\"\n if not isinstance(image, np.ndarray):\n raise TypeError(f\"Input image must be of type np.ndarray, got {type(image)}\")\n\n if input_channel_dim is None:\n input_channel_dim = infer_channel_dimension_format(image)\n\n target_channel_dim = ChannelDimension(channel_dim)\n if input_channel_dim == target_channel_dim:\n return image\n\n if target_channel_dim == ChannelDimension.FIRST:\n image = image.transpose((2, 0, 1))\n elif target_channel_dim == ChannelDimension.LAST:\n image = image.transpose((1, 2, 0))\n else:\n raise ValueError(\"Unsupported channel dimension format: {}\".format(channel_dim))\n\n return image"
},
{
"class_start_lineno": 1,
"class_end_lineno": 811,
"func_start_lineno": 281,
"func_end_lineno": 302,
"func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"
},
{
"class_start_lineno": 1,
"class_end_lineno": 854,
"func_start_lineno": 774,
"func_end_lineno": 809,
"func_code": "def flip_channel_order(\n image: np.ndarray,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Flips the channel order of the image.\n\n If the image is in RGB format, it will be converted to BGR and vice versa.\n\n Args:\n image (`np.ndarray`):\n The image to flip.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n \"\"\"\n input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format\n\n if input_data_format == ChannelDimension.LAST:\n image = image[..., ::-1]\n elif input_data_format == ChannelDimension.FIRST:\n image = image[::-1, ...]\n else:\n raise ValueError(f\"Unsupported channel dimension: {input_data_format}\")\n\n if data_format is not None:\n image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n return image"
},
{
"class_start_lineno": 1,
"class_end_lineno": 854,
"func_start_lineno": 214,
"func_end_lineno": 278,
"func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)"
}
] |
[
"function_empty"
] |
[
"transformers.image_utils.infer_channel_dimension_format",
"transformers.image_transforms.to_channel_dimension_format",
"transformers.image_utils.get_image_size",
"transformers.image_transforms.flip_channel_order",
"transformers.image_transforms.get_resize_output_image_size"
] |
Python
| 3 | 3 |
{
"total_num": 24,
"base_passed_num": 5
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.