id
list
project
string
origin_file
list
test_list
list
prob_info
list
type
list
node
list
language
string
toolfunc_count
int64
func_count
int64
pytest_info
dict
[ "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/sdk/output.py", "finam/sdk/output.py", "finam/sdk/component.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/components/test_control.py", "tests/components/test_parametric.py", "tests/core/test_units.py" ]
[ { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0" }, { "class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.sdk.component.IOList.add", "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
5
5
{ "total_num": 16, "base_passed_num": 1 }
[ "finam.src.finam.data.grid_tools.gen_axes", "finam.src.finam.data.grid_spec.EsriGrid::to_uniform", "finam.src.finam.data.grid_tools.prepare_vtk_data", "finam.src.finam.data.grid_tools.prepare_vtk_kwargs", "finam.src.finam.data.grid_spec.UniformGrid::export_vtk" ]
finam
[ "finam/data/grid_tools.py", "finam/data/grid_spec.py", "finam/data/grid_spec.py", "finam/data/grid_tools.py", "finam/data/grid_tools.py", "finam/data/grid_spec.py" ]
[ "tests/data/test_grid_spec.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 526, "func_start_lineno": 78, "func_end_lineno": 107, "func_code": "def gen_axes(dims, spacing, origin, axes_increase=None):\n \"\"\"\n Generate uniform axes.\n\n Parameters\n ----------\n dims : iterable\n Dimensions of the uniform grid for each direction.\n spacing : iterable\n Spacing of the uniform in each dimension. Must be positive.\n origin : iterable\n Origin of the uniform grid.\n axes_increase : arraylike or None, optional\n False to indicate a bottom up axis (in xyz order), by default None\n\n Returns\n -------\n list of np.ndarray\n Axes of the uniform grid.\n \"\"\"\n if axes_increase is None:\n axes_increase = np.full(len(dims), True, dtype=bool)\n if len(axes_increase) != len(dims):\n raise ValueError(\"gen_axes: wrong length of 'axes_increase'\")\n axes = []\n for i, d in enumerate(dims):\n axes.append(np.arange(d) * spacing[i] + origin[i])\n if not axes_increase[i]:\n axes[i] = axes[i][::-1]\n return axes" }, { "class_start_lineno": 236, "class_end_lineno": 364, "func_start_lineno": 267, "func_end_lineno": 296, "func_code": " def __init__(\n self,\n dims,\n spacing=(1.0, 1.0, 1.0),\n origin=(0.0, 0.0, 0.0),\n data_location=Location.CELLS,\n order=\"F\",\n axes_reversed=False,\n axes_increase=None,\n axes_attributes=None,\n axes_names=None,\n crs=None,\n ):\n # at most 3 axes\n dims = tuple(dims)[:3]\n self.spacing = tuple(spacing)[: len(dims)]\n if len(self.spacing) < len(dims):\n raise ValueError(\"UniformGrid: wrong length of 'spacing'\")\n self.origin = tuple(origin)[: len(dims)]\n if len(self.origin) < len(dims):\n raise ValueError(\"UniformGrid: wrong length of 'origin'\")\n super().__init__(\n axes=gen_axes(dims, self.spacing, self.origin, axes_increase),\n data_location=data_location,\n order=order,\n axes_reversed=axes_reversed,\n axes_attributes=axes_attributes,\n axes_names=axes_names,\n crs=crs,\n )" }, { "class_start_lineno": 367, "class_end_lineno": 471, "func_start_lineno": 451, "func_end_lineno": 471, "func_code": " def to_uniform(self):\n \"\"\"\n Cast grid to an uniform grid.\n\n Returns\n -------\n UniformGrid\n Grid as uniform grid.\n \"\"\"\n return UniformGrid(\n dims=self.dims,\n spacing=self.spacing,\n origin=self.origin,\n data_location=self.data_location,\n order=self.order,\n axes_reversed=self.axes_reversed,\n axes_increase=self.axes_increase,\n axes_attributes=self.axes_attributes,\n axes_names=self.axes_names,\n crs=self.crs,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 526, "func_start_lineno": 332, "func_end_lineno": 363, "func_code": "def prepare_vtk_data(\n data, axes_reversed=False, axes_increase=None, flat=False, order=\"F\"\n):\n \"\"\"\n Prepare data dictionary for VTK export.\n\n Parameters\n ----------\n data : dict or None\n Dictionary containing data arrays by name.\n axes_reversed : bool, optional\n Indicate reversed axes order for the associated data, by default False\n axes_increase : arraylike or None, optional\n False to indicate a bottom up axis (xyz order), by default None\n flat : bool, optional\n True to flatten data, by default False\n order : str, optional\n Point and cell ordering.\n Either Fortran-like (\"F\") or C-like (\"C\"), by default \"F\"\n\n Returns\n -------\n dict or None\n Prepared data.\n \"\"\"\n if data is not None:\n data = dict(data)\n for name, value in data.items():\n data[name] = np.ascontiguousarray(\n _prepare(value, axes_reversed, axes_increase, flat, order)\n )\n return data" }, { "class_start_lineno": 1, "class_end_lineno": 526, "func_start_lineno": 295, "func_end_lineno": 329, "func_code": "def prepare_vtk_kwargs(data_location, data, cell_data, point_data, field_data):\n \"\"\"\n Prepare keyword arguments for evtk routines.\n\n Parameters\n ----------\n data_location : Location\n Data location in the grid, by default Location.CELLS\n data : dict or None\n Data in the corresponding shape given by name\n cell_data : dict or None\n Additional cell data\n point_data : dict or None\n Additional point data\n field_data : dict or None\n Additional field data\n\n Returns\n -------\n dict\n Keyword arguments.\n \"\"\"\n cdat = data_location == Location.CELLS\n kw = {\"cellData\": None, \"pointData\": None, \"fieldData\": None}\n kw[\"cellData\" if cdat else \"pointData\"] = data\n if kw[\"cellData\"]:\n kw[\"cellData\"].update(cell_data if cell_data is not None else {})\n else:\n kw[\"cellData\"] = cell_data\n if kw[\"pointData\"]:\n kw[\"pointData\"].update(point_data if point_data is not None else {})\n else:\n kw[\"pointData\"] = point_data\n kw[\"fieldData\"] = field_data\n return kw" }, { "class_start_lineno": 236, "class_end_lineno": 364, "func_start_lineno": 298, "func_end_lineno": 342, "func_code": " def export_vtk(\n self,\n path,\n data=None,\n cell_data=None,\n point_data=None,\n field_data=None,\n mesh_type=\"uniform\",\n ):\n \"\"\"\n Export grid and data to a VTK file.\n\n Parameters\n ----------\n path : pathlike\n File path.\n Suffix will be replaced according to mesh type (.vti, .vtr, .vtu)\n data : dict or None, optional\n Data in the corresponding shape given by name, by default None\n cell_data : dict or None, optional\n Additional cell data, by default None\n point_data : dict or None, optional\n Additional point data, by default None\n field_data : dict or None, optional\n Additional field data, by default None\n mesh_type : str, optional\n Mesh type (\"uniform\"/\"structured\"/\"unstructured\"),\n by default \"structured\"\n\n Raises\n ------\n ValueError\n If mesh type is not supported.\n \"\"\"\n if mesh_type != \"uniform\":\n super().export_vtk(path, data, cell_data, point_data, field_data, mesh_type)\n else:\n data = prepare_vtk_data(data, self.axes_reversed, self.axes_increase)\n kw = prepare_vtk_kwargs(\n self.data_location, data, cell_data, point_data, field_data\n )\n path = str(Path(path).with_suffix(\"\"))\n origin = self.origin + (0.0,) * (3 - self.dim)\n spacing = self.spacing + (0.0,) * (3 - self.dim)\n imageToVTK(path, origin, spacing, **kw)" } ]
[ "function_empty", "TDD" ]
[ "finam.data.grid_tools.gen_axes", "finam.data.grid_spec.UniformGrid.__init__", "finam.data.grid_spec.EsriGrid.to_uniform", "finam.data.grid_tools.prepare_vtk_data", "finam.data.grid_tools.prepare_vtk_kwargs", "finam.data.grid_spec.UniformGrid.export_vtk" ]
Python
2
5
{ "total_num": 10, "base_passed_num": 2 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.stats.assert_is_square", "skfolio.src.skfolio.utils.stats.assert_is_symmetric", "skfolio.src.skfolio.utils.stats.assert_is_distance" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py" ]
[ "tests/test_cluster/test_hierarchical.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 208, "func_end_lineno": 221, "func_code": "def assert_is_square(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not square.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not square.\n \"\"\"\n if x.ndim != 2 or x.shape[0] != x.shape[1]:\n raise ValueError(\"The matrix must be square\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": "def assert_is_symmetric(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not symmetric.\n\n Parameters\n ----------\n x : ndarray of shape (n, m)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not symmetric.\n \"\"\"\n assert_is_square(x)\n if not np.allclose(x, x.T):\n raise ValueError(\"The matrix must be symmetric\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 241, "func_end_lineno": 257, "func_code": "def assert_is_distance(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not a distance matrix.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is a distance matrix.\n \"\"\"\n assert_is_symmetric(x)\n if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):\n raise ValueError(\n \"The distance matrix must have diagonal elements close to zeros\"\n )" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.stats.assert_is_square", "skfolio.utils.stats.assert_is_symmetric", "skfolio.utils.stats.assert_is_distance" ]
Python
5
5
{ "total_num": 65, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.stats.assert_is_square", "skfolio.src.skfolio.utils.stats.assert_is_symmetric", "skfolio.src.skfolio.utils.stats.cov_nearest" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py" ]
[ "tests/test_distance/test_distance.py", "tests/test_metrics/test_scorer.py", "tests/test_moment/test_expected_returns/test_expected_returns.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 208, "func_end_lineno": 221, "func_code": "def assert_is_square(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not square.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not square.\n \"\"\"\n if x.ndim != 2 or x.shape[0] != x.shape[1]:\n raise ValueError(\"The matrix must be square\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": "def assert_is_symmetric(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not symmetric.\n\n Parameters\n ----------\n x : ndarray of shape (n, m)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not symmetric.\n \"\"\"\n assert_is_square(x)\n if not np.allclose(x, x.T):\n raise ValueError(\"The matrix must be symmetric\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 308, "func_end_lineno": 400, "func_code": "def cov_nearest(\n cov: np.ndarray,\n higham: bool = False,\n higham_max_iteration: int = 100,\n warn: bool = False,\n):\n \"\"\"Compute the nearest covariance matrix that is positive definite and with a\n cholesky decomposition than can be computed. The variance is left unchanged.\n A covariance matrix that is not positive definite often occurs in high\n dimensional problems. It can be due to multicollinearity, floating-point\n inaccuracies, or when the number of observations is smaller than the number of\n assets.\n\n First, it converts the covariance matrix to a correlation matrix.\n Then, it finds the nearest correlation matrix and converts it back to a covariance\n matrix using the initial standard deviation.\n\n Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due\n to floating point error and inversely, Cholesky decomposition can success for\n non-SPD matrix. Therefore, we need to test for both. We always start by testing\n for Cholesky decomposition which is significantly faster than checking for positive\n eigenvalues.\n\n Parameters\n ----------\n cov : ndarray of shape (n, n)\n Covariance matrix.\n\n higham : bool, default=False\n If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,\n otherwise the eigenvalues are clipped to threshold above zeros (1e-13).\n The default (`False`) is to use the clipping method as the Higham & Nick\n algorithm can be slow for large datasets.\n\n higham_max_iteration : int, default=100\n Maximum number of iteration of the Higham & Nick (2002) algorithm.\n The default value is `100`.\n\n warn : bool, default=False\n If this is set to True, a user warning is emitted when the covariance matrix\n is not positive definite and replaced by the nearest. The default is False.\n\n Returns\n -------\n cov : ndarray\n The nearest covariance matrix.\n\n References\n ----------\n .. [1] \"Computing the nearest correlation matrix - a problem from finance\"\n IMA Journal of Numerical Analysis\n Higham & Nick (2002)\n \"\"\"\n assert_is_square(cov)\n assert_is_symmetric(cov)\n\n # Around 100 times faster than checking eigenvalues with np.linalg.eigh\n if is_cholesky_dec(cov) and is_positive_definite(cov):\n return cov\n\n if warn:\n warnings.warn(\n \"The covariance matrix is not positive definite. \"\n f\"The {'Higham' if higham else 'Clipping'} algorithm will be used to find \"\n \"the nearest positive definite covariance.\",\n stacklevel=2,\n )\n corr, std = cov_to_corr(cov)\n\n if higham:\n eps = np.finfo(np.float64).eps * 5\n diff = np.zeros(corr.shape)\n x = corr.copy()\n for _ in range(higham_max_iteration):\n x_adj = x - diff\n eig_vals, eig_vecs = np.linalg.eigh(x_adj)\n x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T\n diff = x - x_adj\n np.fill_diagonal(x, 1)\n cov = corr_to_cov(x, std)\n if is_cholesky_dec(cov) and is_positive_definite(cov):\n break\n else:\n raise ValueError(\"Unable to find the nearest positive definite matrix\")\n else:\n eig_vals, eig_vecs = np.linalg.eigh(corr)\n # Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to\n # consider the matrix non-psd is some corner cases (see test/test_stats.py)\n x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T\n x, _ = cov_to_corr(x)\n cov = corr_to_cov(x, std)\n\n return cov" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.stats.assert_is_square", "skfolio.utils.stats.assert_is_symmetric", "skfolio.utils.stats.cov_nearest" ]
Python
5
5
{ "total_num": 26, "base_passed_num": 6 }
[ "skfolio.src.skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.src.skfolio.distribution.copula._clayton._neg_log_likelihood", "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._clayton._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_clayton.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 416, "func_end_lineno": 448, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n r\"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate Clayton\n copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n Bivariate samples `(u, v)`, with each component in [0,1].\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n logpdf : ndarray of shape (n_observations,)\n Log-likelihood values for each observation.\n\n Raises\n ------\n ValueError\n If theta is not greater than 0.\n \"\"\"\n if theta <= 0:\n raise ValueError(\"Theta must be greater than 1 for the Clayton copula.\")\n\n x, y = np.log(X).T\n\n log_density = (\n np.log1p(theta)\n - (2.0 + 1.0 / theta) * np.log1p(np.expm1(-theta * x) + np.expm1(-theta * y))\n - (1.0 + theta) * (x + y)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 395, "func_end_lineno": 413, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for the Clayton copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 461, "func_end_lineno": 498, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the partial derivative (h-function) for the unrotated Clayton copula.\n\n For Clayton, the copula is defined as:\n\n .. math::\n C(u,v)=\\Bigl(u^{-\\theta}+v^{-\\theta}-1\\Bigr)^{-1/\\theta}.\n\n The partial derivative with respect to v is:\n\n .. math::\n \\frac{\\partial C(u,v)}{\\partial v} = \\Bigl(u^{-\\theta}+v^{-\\theta}-1\\Bigr)^{-1/\\theta-1}\\,v^{-\\theta-1}.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` with values in [0, 1].\n\n first_margin : bool, default=False\n If True, compute with respect to u (by swapping margins); otherwise\n compute with respect to v.\n\n theta : float\n The dependence parameter (must be > 0).\n\n Returns\n -------\n p : ndarray of shape (n_observations,)\n The computed h-function values.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n x = np.power(X[:, 0], -theta)\n y = np.power(X[:, 1], theta)\n p = np.power(1.0 + y * (x - 1.0), -(1.0 + 1.0 / theta))\n return p" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.distribution.copula._clayton._neg_log_likelihood", "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._clayton._base_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
2
6
{ "total_num": 69, "base_passed_num": 5 }
[ "skfolio.src.skfolio.distribution.copula._gaussian._base_sample_scores", "skfolio.src.skfolio.distribution.copula._gaussian._neg_log_likelihood" ]
skfolio
[ "skfolio/distribution/copula/_gaussian.py", "skfolio/distribution/copula/_gaussian.py" ]
[ "tests/test_distribution/test_copula/test_gaussian.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 407, "func_start_lineno": 373, "func_end_lineno": 407, "func_code": "def _base_sample_scores(X: np.ndarray, rho: float) -> np.ndarray:\n \"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate\n Gaussian copula model.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Gaussian copula parameter.\n\n Returns\n -------\n density : ndarray of shape (n_observations,)\n The log-likelihood of each sample under the fitted copula.\n\n Raises\n ------\n ValueError\n If rho is not in (-1, 1)\n \"\"\"\n if not (-1.0 <= rho <= 1.0):\n raise ValueError(\"rho must be between -1 and 1.\")\n\n # Inverse CDF (ppf) using stdtrit for better performance\n u_inv, v_inv = sp.ndtri(X).T\n\n # Using np.log1p to avoid loss of precision\n log_density = -0.5 * np.log1p(-(rho**2)) - rho * (\n 0.5 * rho * (u_inv**2 + v_inv**2) - u_inv * v_inv\n ) / (1 - rho**2)\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 407, "func_start_lineno": 352, "func_end_lineno": 370, "func_code": "def _neg_log_likelihood(rho: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for optimization.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Correlation copula parameter.\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, rho=rho))" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._gaussian._base_sample_scores", "skfolio.distribution.copula._gaussian._neg_log_likelihood" ]
Python
1
2
{ "total_num": 38, "base_passed_num": 26 }
[ "skfolio.src.skfolio.distribution.copula._gumbel._base_sample_scores", "skfolio.src.skfolio.distribution.copula._gumbel._neg_log_likelihood", "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_gumbel.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 422, "func_end_lineno": 451, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n r\"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate Gumbel\n copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n Bivariate samples `(u, v)`, with each component in [0,1].\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n logpdf : ndarray of shape (n_observations,)\n Log-likelihood values for each observation.\n \"\"\"\n if theta <= 1:\n raise ValueError(\"Theta must be greater than 1 for the Gumbel copula.\")\n Z = -np.log(X)\n s = np.power(np.power(Z, theta).sum(axis=1), 1 / theta)\n s = np.clip(s, a_min=1e-10, a_max=None)\n log_density = (\n -s\n + np.log(s + theta - 1)\n + (1 - 2 * theta) * np.log(s)\n + (theta - 1) * np.log(Z.prod(axis=1))\n + Z.sum(axis=1)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 401, "func_end_lineno": 419, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for the Gumbel copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval [0, 1],\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 464, "func_end_lineno": 507, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the partial derivative (h-function) for the unrotated Gumbel copula.\n\n For Gumbel, the copula is defined as:\n\n .. math::\n C(u,v)=\\exp\\Bigl(-\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{1/\\theta}\\Bigr).\n\n The partial derivative with respect to v is:\n\n .. math::\n \\frac{\\partial C(u,v)}{\\partial v}\n = C(u,v)\\,\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{\\frac{1}{\\theta}-1}\n \\,(-\\ln v)^{\\theta-1}\\,\\frac{1}{v}.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` with values in [0, 1].\n\n first_margin : bool, default=False\n If True, compute with respect to u (by swapping margins); otherwise,\n compute with respect to v.\n\n theta : float\n The dependence parameter (must be > 1).\n\n Returns\n -------\n p : ndarray of shape (n_observations,)\n The computed h-function values.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n _, v = X.T\n x, y = -np.log(X).T\n p = (\n np.exp(-np.power(np.power(x, theta) + np.power(y, theta), 1.0 / theta))\n * np.power(np.power(x / y, theta) + 1.0, 1.0 / theta - 1.0)\n / v\n )\n return p" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._gumbel._base_sample_scores", "skfolio.distribution.copula._gumbel._neg_log_likelihood", "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
2
6
{ "total_num": 69, "base_passed_num": 5 }
[ "skfolio.src.skfolio.distribution.copula._joe._base_sample_scores", "skfolio.src.skfolio.distribution.copula._joe._neg_log_likelihood", "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._joe._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_joe.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 439, "func_end_lineno": 473, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n \"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate\n Joe copula model.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n density : ndarray of shape (n_observations,)\n The log-likelihood of each sample under the fitted copula.\n\n Raises\n ------\n ValueError\n If rho is not in (-1, 1) or dof is not positive.\n \"\"\"\n if theta <= 1.0:\n raise ValueError(\"Theta must be greater than 1 for the Joe copula.\")\n\n # log-space transformation to improve stability near 0 or 1\n x, y = np.log1p(-X).T\n x_y = x + y\n d = np.exp(x * theta) + np.exp(y * theta) - np.exp(x_y * theta)\n log_density = (\n (1.0 / theta - 2.0) * np.log(d) + x_y * (theta - 1.0) + np.log(theta - 1.0 + d)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 418, "func_end_lineno": 436, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for optimization.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 517, "func_end_lineno": 546, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"Compute the h-function (partial derivative) for the bivariate unrotated\n Joe copula with respect to a specified margin.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n first_margin : bool, default=False\n If True, compute the partial derivative with respect to the first\n margin `u`; otherwise, compute the partial derivative with respect to the\n second margin `v`.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n : ndarray of shape (n_observations,)\n h-function values :math:`h(u \\mid v) \\;=\\; p` for each observation in X.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n x, y = np.power(1 - X, theta).T\n p = np.power(1 + x / y - x, 1 / theta - 1) * (1.0 - x)\n return p" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._joe._base_sample_scores", "skfolio.distribution.copula._joe._neg_log_likelihood", "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._joe._base_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
3
6
{ "total_num": 69, "base_passed_num": 5 }
[ "skfolio.src.skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.src.skfolio.distribution.copula._clayton._neg_log_likelihood" ]
skfolio
[ "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_clayton.py" ]
[ "tests/test_distribution/test_copula/test_selection.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 416, "func_end_lineno": 448, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n r\"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate Clayton\n copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n Bivariate samples `(u, v)`, with each component in [0,1].\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n logpdf : ndarray of shape (n_observations,)\n Log-likelihood values for each observation.\n\n Raises\n ------\n ValueError\n If theta is not greater than 0.\n \"\"\"\n if theta <= 0:\n raise ValueError(\"Theta must be greater than 1 for the Clayton copula.\")\n\n x, y = np.log(X).T\n\n log_density = (\n np.log1p(theta)\n - (2.0 + 1.0 / theta) * np.log1p(np.expm1(-theta * x) + np.expm1(-theta * y))\n - (1.0 + theta) * (x + y)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 395, "func_end_lineno": 413, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for the Clayton copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.distribution.copula._clayton._neg_log_likelihood" ]
Python
1
2
{ "total_num": 4, "base_passed_num": 3 }
[ "skfolio.src.skfolio.distribution.copula._student_t._sample_scores", "skfolio.src.skfolio.distribution.copula._student_t._neg_log_likelihood" ]
skfolio
[ "skfolio/distribution/copula/_student_t.py", "skfolio/distribution/copula/_student_t.py" ]
[ "tests/test_distribution/test_copula/test_student_t.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 486, "func_start_lineno": 445, "func_end_lineno": 486, "func_code": "def _sample_scores(X: np.ndarray, rho: float, dof: float) -> np.ndarray:\n \"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate\n Gaussian copula model.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Gaussian copula parameter.\n\n Returns\n -------\n density : ndarray of shape (n_observations,)\n The log-likelihood of each sample under the fitted copula.\n\n Raises\n ------\n ValueError\n If rho is not in (-1, 1) or dof is not positive.\n \"\"\"\n if not (-1.0 <= rho <= 1.0):\n raise ValueError(\"rho must be between -1 and 1.\")\n if not 1.0 <= dof <= 50:\n raise ValueError(\"Degrees of freedom `dof` must be between 1 and 50.\")\n\n # Inverse CDF (ppf) using stdtrit for better performance\n x, y = sp.stdtrit(dof, X).T\n\n a = 1.0 - rho**2\n log_density = (\n sp.gammaln((dof + 2.0) / 2.0)\n + sp.gammaln(dof / 2.0)\n - 2.0 * sp.gammaln((dof + 1.0) / 2.0)\n - np.log(a) / 2\n + (dof + 1.0) / 2.0 * (np.log1p(x**2 / dof) + np.log1p(y**2 / dof))\n - (dof + 2.0) / 2.0 * np.log1p((x**2 - 2 * rho * x * y + y**2) / a / dof)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 486, "func_start_lineno": 421, "func_end_lineno": 442, "func_code": "def _neg_log_likelihood(dof: float, rho: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for optimization.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Correlation copula parameter.\n\n dof : float\n Degree of freedom copula parameter.\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_sample_scores(X=X, rho=rho, dof=dof))" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._student_t._sample_scores", "skfolio.distribution.copula._student_t._neg_log_likelihood" ]
Python
1
2
{ "total_num": 40, "base_passed_num": 17 }
[ "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_cdf", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 409, "func_end_lineno": 449, "func_code": "def _apply_rotation_cdf(\n func: Callable, X: np.ndarray, rotation: CopulaRotation, **kwargs\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding CDF values.\n\n Parameters\n ----------\n func : Callable\n A function that computes the CDF given data X and additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n **kwargs\n Additional keyword arguments to pass to the CDF function.\n\n Returns\n -------\n rotated_cdf : ndarray of shape (n_observations,)\n The transformed CDF values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n cdf = func(X=rotated_X, **kwargs)\n\n match rotation:\n case CopulaRotation.R0:\n pass\n case CopulaRotation.R90:\n cdf = X[:, 1] - cdf\n case CopulaRotation.R180:\n cdf = np.sum(X, axis=1) - 1 + cdf\n case CopulaRotation.R270:\n cdf = X[:, 0] - cdf\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n\n return cdf" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty", "TDD" ]
[ "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_rotation_cdf", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
2
3
{ "total_num": 10, "base_passed_num": 6 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_distribution/test_multivariate/test_utils.py", "tests/test_model_selection/test_walk_forward.py", "tests/test_utils/test_bootstrap.py", "tests/test_utils/test_validation.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset" ]
Python
2
2
{ "total_num": 24, "base_passed_num": 5 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.measures._measures.get_cumulative_returns", "skfolio.src.skfolio.measures._measures.get_drawdowns" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/measures/_measures.py", "skfolio/measures/_measures.py" ]
[ "tests/test_measures/test_measures.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 405, "func_end_lineno": 428, "func_code": "def get_cumulative_returns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:\n \"\"\"Compute the cumulative returns from the returns.\n Non-compounded cumulative returns start at 0.\n Compounded cumulative returns are rescaled to start at 1000.\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n compounded : bool, default=False\n If this is set to True, the cumulative returns are compounded otherwise they\n are uncompounded.\n\n Returns\n -------\n values: ndarray of shape (n_observations,)\n Cumulative returns.\n \"\"\"\n if compounded:\n cumulative_returns = 1000 * np.cumprod(1 + returns) # Rescaled to start at 1000\n else:\n cumulative_returns = np.cumsum(returns)\n return cumulative_returns" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 431, "func_end_lineno": 453, "func_code": "def get_drawdowns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:\n \"\"\"Compute the drawdowns' series from the returns.\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n compounded : bool, default=False\n If this is set to True, the cumulative returns are compounded otherwise they\n are uncompounded.\n\n Returns\n -------\n values: ndarray of shape (n_observations,)\n Drawdowns.\n \"\"\"\n cumulative_returns = get_cumulative_returns(returns=returns, compounded=compounded)\n if compounded:\n drawdowns = cumulative_returns / np.maximum.accumulate(cumulative_returns) - 1\n else:\n drawdowns = cumulative_returns - np.maximum.accumulate(cumulative_returns)\n return drawdowns" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.measures._measures.get_cumulative_returns", "skfolio.measures._measures.get_drawdowns" ]
Python
2
4
{ "total_num": 17, "base_passed_num": 0 }
[ "skfolio.src.skfolio.model_selection._combinatorial._n_splits", "skfolio.src.skfolio.model_selection._combinatorial._n_test_paths" ]
skfolio
[ "skfolio/model_selection/_combinatorial.py", "skfolio/model_selection/_combinatorial.py", "skfolio/model_selection/_combinatorial.py" ]
[ "tests/test_model_selection/test_combinatorial.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 564, "func_start_lineno": 415, "func_end_lineno": 431, "func_code": "def _n_splits(n_folds: int, n_test_folds: int) -> int:\n \"\"\"Number of splits.\n\n Parameters\n ----------\n n_folds : int\n Number of folds.\n\n n_test_folds : int\n Number of test folds.\n\n Returns\n -------\n n_splits : int\n Number of splits\n \"\"\"\n return math.comb(n_folds, n_test_folds)" }, { "class_start_lineno": 1, "class_end_lineno": 564, "func_start_lineno": 434, "func_end_lineno": 453, "func_code": "def _n_test_paths(n_folds: int, n_test_folds: int) -> int:\n \"\"\"Number of test paths that can be reconstructed from the train/test\n combinations.\n\n Parameters\n ----------\n n_folds : int\n Number of folds.\n\n n_test_folds : int\n Number of test folds.\n\n Returns\n -------\n n_splits : int\n Number of test paths.\n \"\"\"\n return (\n _n_splits(n_folds=n_folds, n_test_folds=n_test_folds) * n_test_folds // n_folds\n )" }, { "class_start_lineno": 46, "class_end_lineno": 412, "func_start_lineno": 203, "func_end_lineno": 207, "func_code": " def n_test_paths(self) -> int:\n \"\"\"Number of test paths that can be reconstructed from the train/test\n combinations.\n \"\"\"\n return _n_test_paths(n_folds=self.n_folds, n_test_folds=self.n_test_folds)" } ]
[ "function_empty" ]
[ "skfolio.model_selection._combinatorial._n_splits", "skfolio.model_selection._combinatorial._n_test_paths", "skfolio.model_selection._combinatorial.CombinatorialPurgedCV.n_test_paths" ]
Python
2
2
{ "total_num": 8, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.tools.safe_indexing", "skfolio.src.skfolio.utils.tools.safe_split", "skfolio.src.skfolio.model_selection._validation.cross_val_predict" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/model_selection/_validation.py" ]
[ "tests/test_model_selection/test_validation.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 190, "func_end_lineno": 219, "func_code": "def safe_indexing(\n X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0\n):\n \"\"\"Return rows, items or columns of X using indices.\n\n Parameters\n ----------\n X : array-like\n Data from which to sample rows.\n\n indices : array-like, optional\n Indices of rows or columns.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n subset :\n Subset of X on axis 0.\n \"\"\"\n if indices is None:\n return X\n if hasattr(X, \"iloc\"):\n return X.take(indices, axis=axis)\n if axis == 0:\n return X[indices]\n return X[:, indices]" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 222, "func_end_lineno": 261, "func_code": "def safe_split(\n X: npt.ArrayLike,\n y: npt.ArrayLike | None = None,\n indices: np.ndarray | None = None,\n axis: int = 0,\n):\n \"\"\"Create subset of dataset.\n\n Slice X, y according to indices for cross-validation.\n\n Parameters\n ----------\n X : array-like\n Data to be indexed.\n\n y : array-like\n Data to be indexed.\n\n indices : ndarray of int, optional\n Rows or columns to select from X and y.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n X_subset : array-like\n Indexed data.\n\n y_subset : array-like\n Indexed targets.\n \"\"\"\n X_subset = safe_indexing(X, indices=indices, axis=axis)\n if y is not None:\n y_subset = safe_indexing(y, indices=indices, axis=axis)\n else:\n y_subset = None\n return X_subset, y_subset" }, { "class_start_lineno": 1, "class_end_lineno": 254, "func_start_lineno": 38, "func_end_lineno": 254, "func_code": "def cross_val_predict(\n estimator: skb.BaseEstimator,\n X: npt.ArrayLike,\n y: npt.ArrayLike = None,\n cv: sks.BaseCrossValidator | BaseCombinatorialCV | int | None = None,\n n_jobs: int | None = None,\n method: str = \"predict\",\n verbose: int = 0,\n params: dict | None = None,\n pre_dispatch: str = \"2*n_jobs\",\n column_indices: np.ndarray | None = None,\n portfolio_params: dict | None = None,\n) -> MultiPeriodPortfolio | Population:\n \"\"\"Generate cross-validated `Portfolios` estimates.\n\n The data is split according to the `cv` parameter.\n The optimization estimator is fitted on the training set and portfolios are\n predicted on the corresponding test set.\n\n For non-combinatorial cross-validation like `Kfold`, the output is the predicted\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` where\n each :class:`~skfolio.portfolio.Portfolio` corresponds to the prediction on each\n train/test pair (`k` portfolios for `Kfold`).\n\n For combinatorial cross-validation\n like :class:`~skfolio.model_selection.CombinatorialPurgedCV`, the output is the\n predicted :class:`~skfolio.population.Population` of multiple\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` (each test outputs are a\n collection of multiple paths instead of one single path).\n\n Parameters\n ----------\n estimator : BaseOptimization\n :ref:`Optimization estimators <optimization>` use to fit the data.\n\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Target data (optional).\n For example, the price returns of the factors.\n\n cv : int | cross-validation generator, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n * None, to use the default 5-fold cross validation,\n * int, to specify the number of folds in a `(Stratified)KFold`,\n * `CV splitter`,\n * An iterable that generates (train, test) splits as arrays of indices.\n\n n_jobs : int, optional\n The number of jobs to run in parallel for `fit` of all `estimators`.\n `None` means 1 unless in a `joblib.parallel_backend` context. -1 means\n using all processors.\n\n method : str\n Invokes the passed method name of the passed estimator.\n\n verbose : int, default=0\n The verbosity level.\n\n params : dict, optional\n Parameters to pass to the underlying estimator's ``fit`` and the CV splitter.\n\n pre_dispatch : int or str, default='2*n_jobs'\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n * None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n * An int, giving the exact number of total jobs that are\n spawned\n\n * A str, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n column_indices : ndarray, optional\n Indices of the `X` columns to cross-validate on.\n\n portfolio_params : dict, optional\n Additional portfolio parameters passed to `MultiPeriodPortfolio`.\n\n Returns\n -------\n predictions : MultiPeriodPortfolio | Population\n This is the result of calling `predict`\n \"\"\"\n params = {} if params is None else params\n\n X, y = safe_split(X, y, indices=column_indices, axis=1)\n X, y = sku.indexable(X, y)\n\n if _routing_enabled():\n # For estimators, a MetadataRouter is created in get_metadata_routing\n # methods. For these router methods, we create the router to use\n # `process_routing` on it.\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=\"cross_validate\")\n .add(\n splitter=cv,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"split\"),\n )\n .add(\n estimator=estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n try:\n routed_params = skm.process_routing(router, \"fit\", **params)\n except ske.UnsetMetadataPassedError as e:\n # The default exception would mention `fit` since in the above\n # `process_routing` code, we pass `fit` as the caller. However,\n # the user is not calling `fit` directly, so we change the message\n # to make it more suitable for this case.\n unrequested_params = sorted(e.unrequested_params)\n raise ske.UnsetMetadataPassedError(\n message=(\n f\"{unrequested_params} are passed to `cross_val_predict` but are\"\n \" not explicitly set as requested or not requested for\"\n f\" cross_validate's estimator: {estimator.__class__.__name__} Call\"\n \" `.set_fit_request({{metadata}}=True)` on the estimator for\"\n f\" each metadata in {unrequested_params} that you want to use and\"\n \" `metadata=False` for not using it. See the Metadata Routing User\"\n \" guide <https://scikit-learn.org/stable/metadata_routing.html>\"\n \" for more information.\"\n ),\n unrequested_params=e.unrequested_params,\n routed_params=e.routed_params,\n ) from None\n else:\n routed_params = sku.Bunch()\n routed_params.splitter = sku.Bunch(split={})\n routed_params.estimator = sku.Bunch(fit=params)\n\n cv = sks.check_cv(cv, y)\n splits = list(cv.split(X, y, **routed_params.splitter.split))\n\n portfolio_params = {} if portfolio_params is None else portfolio_params.copy()\n\n # We ensure that the folds are not shuffled\n if not isinstance(cv, BaseCombinatorialCV):\n try:\n if cv.shuffle:\n raise ValueError(\n \"`cross_val_predict` only works with cross-validation setting\"\n \" `shuffle=False`\"\n )\n except AttributeError:\n # If we cannot find the attribute shuffle, we check if the first folds\n # are shuffled\n for fold in splits[0]:\n if not np.all(np.diff(fold) > 0):\n raise ValueError(\n \"`cross_val_predict` only works with un-shuffled folds\"\n ) from None\n\n # We clone the estimator to make sure that all the folds are independent\n # and that it is pickle-able.\n parallel = skp.Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)\n # TODO remove when https://github.com/joblib/joblib/issues/1071 is fixed\n # noinspection PyCallingNonCallable\n predictions = parallel(\n skp.delayed(fit_and_predict)(\n sk.clone(estimator),\n X,\n y,\n train=train,\n test=test,\n fit_params=routed_params.estimator.fit,\n method=method,\n )\n for train, test in splits\n )\n\n if isinstance(cv, BaseCombinatorialCV):\n path_ids = cv.get_path_ids()\n path_nb = np.max(path_ids) + 1\n portfolios = [[] for _ in range(path_nb)]\n for i, prediction in enumerate(predictions):\n for j, p in enumerate(prediction):\n path_id = path_ids[i, j]\n portfolios[path_id].append(p)\n name = portfolio_params.pop(\"name\", \"path\")\n pred = Population(\n [\n MultiPeriodPortfolio(\n name=f\"{name}_{i}\", portfolios=portfolios[i], **portfolio_params\n )\n for i in range(path_nb)\n ]\n )\n else:\n # We need to re-order the test folds in case they were un-ordered by the\n # CV generator.\n # Because the tests folds are not shuffled, we use the first index of each\n # fold to order them.\n test_indices = np.concatenate([test for _, test in splits])\n if np.unique(test_indices, axis=0).shape[0] != test_indices.shape[0]:\n raise ValueError(\n \"`cross_val_predict` only works with non-duplicated test indices\"\n )\n test_indices = [test for _, test in splits]\n sorted_fold_id = np.argsort([x[0] for x in test_indices])\n pred = MultiPeriodPortfolio(\n portfolios=[predictions[fold_id] for fold_id in sorted_fold_id],\n check_observations_order=False,\n **portfolio_params,\n )\n\n return pred" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.tools.safe_indexing", "skfolio.utils.tools.safe_split", "skfolio.model_selection._validation.cross_val_predict" ]
Python
3
5
{ "total_num": 3, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.datasets._base.get_data_home", "skfolio.src.skfolio.datasets._base.download_dataset", "skfolio.src.skfolio.datasets._base.load_sp500_implied_vol_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_moment/test_covariance/test_implied_covariance.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 25, "func_end_lineno": 55, "func_code": "def get_data_home(data_home: str | Path | None = None) -> str:\n \"\"\"Return the path of the skfolio data directory.\n\n This folder is used by some large dataset loaders to avoid downloading the\n data several times.\n\n By default, the data directory is set to a folder named 'skfolio_data' in the\n user home folder.\n\n Alternatively, it can be set by the 'SKFOLIO_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str, optional\n The path to skfolio data directory. If `None`, the default path\n is `~/skfolio_data`.\n\n Returns\n -------\n data_home: str or path-like, optional\n The path to skfolio data directory.\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SKFOLIO_DATA\", os.path.join(\"~\", \"skfolio_data\"))\n data_home = os.path.expanduser(data_home)\n os.makedirs(data_home, exist_ok=True)\n return data_home" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 116, "func_end_lineno": 165, "func_code": "def download_dataset(\n data_filename: str,\n data_home: str | Path | None = None,\n download_if_missing: bool = True,\n) -> pd.DataFrame:\n \"\"\"Download and save locally a dataset from the remote GitHub dataset folder.\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from a remote\n GitHub dataset folder.\n\n data_home : str or path-like, optional\n Specify another download and cache folder for the datasets. By default,\n all skfolio data is stored in `~/skfolio_data` sub-folders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n # Use a CORS proxy when triggering requests from the browser\n url_prefix = \"https://corsproxy.io/?\" if sys.platform == \"emscripten\" else \"\"\n url = url_prefix + (\n f\"https://github.com/skfolio/skfolio-datasets/raw/main/\"\n f\"datasets/{data_filename}.csv.gz\"\n )\n\n data_home = get_data_home(data_home=data_home)\n filepath = os.path.join(data_home, f\"{data_filename}.pkz\")\n\n if os.path.exists(filepath):\n return joblib.load(filepath)\n\n if not download_if_missing:\n raise OSError(\"Data not found and `download_if_missing` is False\")\n\n archive_path = os.path.join(data_home, os.path.basename(url))\n ur.urlretrieve(url, archive_path)\n df = load_gzip_compressed_csv_data(archive_path)\n joblib.dump(df, filepath, compress=6)\n os.remove(archive_path)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 400, "func_end_lineno": 448, "func_code": "def load_sp500_implied_vol_dataset(\n data_home=None, download_if_missing=True\n) -> pd.DataFrame:\n \"\"\"Load the 3 months ATM implied volatility of the 20 assets from the\n SP500 dataset.\n\n This dataset is composed of the 3 months ATM implied volatility of 20 assets\n from the S&P 500 composition starting from 2010-01-04 up to 2022-12-28.\n\n The data comes from the Yahoo public API option chains.\n\n ============== ==================\n Observations 3270\n Assets 20\n ============== ==================\n\n Parameters\n ----------\n data_home : str, optional\n Specify another download and cache folder for the datasets.\n By default, all skfolio data is stored in `~/skfolio_data` subfolders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Implied volatility DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_implied_vol_dataset\n >>> implied_vol = load_sp500_implied_vol_dataset()\n >>> implied_vol.head()\n AAPL AMD BAC ... UNH WMT XOM\n Date ...\n 2010-01-04 0.364353 0.572056 0.382926 ... 0.362751 0.171737 0.201485\n 2010-01-05 0.371865 0.568791 0.374699 ... 0.368504 0.174764 0.203852\n 2010-01-06 0.356746 0.558054 0.349220 ... 0.368514 0.171892 0.197475\n 2010-01-07 0.361084 0.560475 0.354942 ... 0.355792 0.169083 0.200046\n 2010-01-08 0.348085 0.543932 0.360345 ... 0.351130 0.170897 0.204832\n \"\"\"\n data_filename = \"sp500_implied_vol_dataset\"\n df = download_dataset(\n data_filename, data_home=data_home, download_if_missing=download_if_missing\n )\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.datasets._base.get_data_home", "skfolio.datasets._base.download_dataset", "skfolio.datasets._base.load_sp500_implied_vol_dataset" ]
Python
5
5
{ "total_num": 25, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.optimization.cluster._nco.NestedClustersOptimization::get_metadata_routing", "skfolio.src.skfolio.optimization.cluster._nco.NestedClustersOptimization::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/optimization/cluster/_nco.py", "skfolio/optimization/cluster/_nco.py" ]
[ "tests/test_optimization/test_cluster/test_nco.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 33, "class_end_lineno": 392, "func_start_lineno": 197, "func_end_lineno": 214, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=self.__class__.__name__)\n .add(\n distance_estimator=self.distance_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n .add(\n clustering_estimator=self.clustering_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n .add(\n inner_estimator=self.inner_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n return router" }, { "class_start_lineno": 33, "class_end_lineno": 392, "func_start_lineno": 216, "func_end_lineno": 392, "func_code": " def fit(\n self, X: npt.ArrayLike, y: npt.ArrayLike | None = None, **fit_params\n ) -> \"NestedClustersOptimization\":\n \"\"\"Fit the Nested Clusters Optimization estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Price returns of factors or a target benchmark.\n The default is `None`.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : NestedClustersOptimization\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n self.distance_estimator_ = check_estimator(\n self.distance_estimator,\n default=PearsonDistance(),\n check_type=BaseDistance,\n )\n self.clustering_estimator_ = check_estimator(\n self.clustering_estimator,\n default=HierarchicalClustering(),\n check_type=skb.BaseEstimator,\n )\n self.outer_estimator_ = check_estimator(\n self.outer_estimator,\n default=MeanRisk(),\n check_type=BaseOptimization,\n )\n _inner_estimator = check_estimator(\n self.inner_estimator,\n default=MeanRisk(),\n check_type=BaseOptimization,\n )\n\n # noinspection PyArgumentList\n self.distance_estimator_.fit(X, y, **routed_params.distance_estimator.fit)\n distance = self.distance_estimator_.distance_\n n_assets = distance.shape[0]\n\n # To keep the asset_names --> used for visualisation\n if isinstance(X, pd.DataFrame):\n distance = pd.DataFrame(distance, columns=X.columns)\n\n # noinspection PyUnresolvedReferences\n self.clustering_estimator_.fit(\n X=distance, y=None, **routed_params.clustering_estimator.fit\n )\n # noinspection PyUnresolvedReferences\n labels = self.clustering_estimator_.labels_\n n_clusters = max(labels) + 1\n clusters = [np.argwhere(labels == i).flatten() for i in range(n_clusters)]\n\n # Intra cluster weights\n # Fit the inner estimator on the whole training data. Those\n # base estimators will be used to retrieve the inner weights.\n # They are exposed publicly.\n # noinspection PyCallingNonCallable\n fitted_inner_estimators = skp.Parallel(n_jobs=self.n_jobs)(\n skp.delayed(fit_single_estimator)(\n sk.clone(_inner_estimator),\n X,\n y,\n routed_params.inner_estimator.fit,\n indices=cluster_ids,\n axis=1,\n )\n for cluster_ids in clusters\n if len(cluster_ids) != 1\n )\n fitted_inner_estimators = iter(fitted_inner_estimators)\n\n self.inner_estimators_ = []\n inner_weights = []\n for cluster_ids in clusters:\n w = np.zeros(n_assets)\n # For single assets, we don't run the inner optimization estimator.\n if len(cluster_ids) == 1:\n w[cluster_ids] = 1\n else:\n fitted_inner_estimator = next(fitted_inner_estimators)\n self.inner_estimators_.append(fitted_inner_estimator)\n w[cluster_ids] = fitted_inner_estimator.weights_\n inner_weights.append(w)\n inner_weights = np.array(inner_weights)\n assert not any(fitted_inner_estimators), (\n \"fitted_inner_estimator iterator must be empty\"\n )\n\n # Outer cluster weights\n # To train the outer-estimator using the most data as possible, we use\n # a cross-validation to obtain the output of the cluster estimators.\n # To ensure that the data provided to each estimator are the same,\n # we need to set the random state of the cv if there is one and we\n # need to take a copy.\n if self.cv == \"ignore\":\n cv_predictions = None\n test_indices = slice(None)\n else:\n cv = sks.check_cv(self.cv)\n if hasattr(cv, \"random_state\") and cv.random_state is None:\n cv.random_state = np.random.RandomState()\n # noinspection PyCallingNonCallable\n cv_predictions = skp.Parallel(n_jobs=self.n_jobs)(\n skp.delayed(cross_val_predict)(\n sk.clone(_inner_estimator),\n X,\n y,\n cv=deepcopy(cv),\n n_jobs=self.n_jobs,\n verbose=self.verbose,\n column_indices=cluster_ids,\n method=\"predict\",\n params=routed_params.inner_estimator.fit,\n )\n for cluster_ids in clusters\n if len(cluster_ids) != 1\n )\n cv_predictions = iter(cv_predictions)\n if isinstance(self.cv, BaseCombinatorialCV):\n test_indices = slice(None)\n else:\n test_indices = np.sort(\n np.concatenate([test for _, test in cv.split(X, y)])\n )\n\n # We validate and convert to numpy array only after inner-estimator fitting to\n # keep the assets names in case they are used in the estimator.\n if y is not None:\n X, y = skv.validate_data(self, X, y)\n y_pred = y[test_indices]\n else:\n X = skv.validate_data(self, X)\n y_pred = None\n\n X_pred = []\n fitted_inner_estimators = iter(self.inner_estimators_)\n for cluster_ids in clusters:\n if len(cluster_ids) == 1:\n pred = X[test_indices, cluster_ids[0]]\n else:\n if cv_predictions is None:\n fitted_inner_estimator = next(fitted_inner_estimators)\n pred = fitted_inner_estimator.predict(X[test_indices, cluster_ids])\n else:\n pred = next(cv_predictions)\n if isinstance(self.cv, BaseCombinatorialCV):\n pred = pred.quantile(\n measure=self.quantile_measure, q=self.quantile\n )\n X_pred.append(np.asarray(pred))\n X_pred = np.array(X_pred).T\n if cv_predictions is None:\n assert not any(fitted_inner_estimators), (\n \"fitted_inner_estimator iterator must be empty\"\n )\n else:\n assert not any(cv_predictions), \"cv_predictions iterator must be empty\"\n\n fit_single_estimator(self.outer_estimator_, X_pred, y_pred, fit_params={})\n outer_weights = self.outer_estimator_.weights_\n self.weights_ = outer_weights @ inner_weights\n return self" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.optimization.cluster._nco.NestedClustersOptimization.get_metadata_routing", "skfolio.optimization.cluster._nco.NestedClustersOptimization.fit" ]
Python
3
4
{ "total_num": 15, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::get_metadata_routing", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/prior/_empirical.py", "skfolio/prior/_empirical.py" ]
[ "tests/test_optimization/test_cluster/test_hierarchical/test_herc.py", "tests/test_optimization/test_cluster/test_hierarchical/test_hrp.py", "tests/test_optimization/test_convex/test_maximum_diversification.py", "tests/test_optimization/test_convex/test_risk_budgeting.py", "tests/test_prior/test_empirical.py", "tests/test_uncertainty_set/test_bootstrap.py", "tests/test_uncertainty_set/test_empirical.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 93, "func_end_lineno": 106, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=self.__class__.__name__)\n .add(\n mu_estimator=self.mu_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n .add(\n covariance_estimator=self.covariance_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n return router" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 108, "func_end_lineno": 202, "func_code": " def fit(self, X: npt.ArrayLike, y=None, **fit_params) -> \"EmpiricalPrior\":\n \"\"\"Fit the Empirical Prior estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : EmpiricalPrior\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n self.mu_estimator_ = check_estimator(\n self.mu_estimator,\n default=EmpiricalMu(),\n check_type=BaseMu,\n )\n self.covariance_estimator_ = check_estimator(\n self.covariance_estimator,\n default=EmpiricalCovariance(),\n check_type=BaseCovariance,\n )\n # fitting estimators\n if not self.is_log_normal:\n if self.investment_horizon is not None:\n raise ValueError(\n \"`investment_horizon` must be `None` when \"\n \"`is_log_normal` is `False`\"\n )\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X, y, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X, y, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n else:\n if self.investment_horizon is None:\n raise ValueError(\n \"`investment_horizon` must be provided when \"\n \"`is_log_normal` is `True`\"\n )\n # Convert linear returns to log returns\n X_log = np.log(1 + X)\n y_log = np.log(1 + y) if y is not None else None\n\n # Estimates the moments on the log returns\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X_log, y_log, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X_log, y_log, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n\n # Using the property of aggregation across time we scale this distribution\n # to the investment horizon by the “square-root rule”.\n mu *= self.investment_horizon\n covariance *= self.investment_horizon\n\n # We convert it into a distribution of linear returns over the investment\n # horizon\n mu = np.exp(mu + 0.5 * np.diag(covariance))\n covariance = np.outer(mu, mu) * (np.exp(covariance) - 1)\n\n # we validate and convert to numpy after all models have been fitted to keep\n # features names information.\n X = skv.validate_data(self, X)\n self.prior_model_ = PriorModel(\n mu=mu,\n covariance=covariance,\n returns=X,\n )\n return self" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.prior._empirical.EmpiricalPrior.get_metadata_routing", "skfolio.prior._empirical.EmpiricalPrior.fit" ]
Python
3
4
{ "total_num": 398, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.optimization.ensemble._stacking.StackingOptimization::get_metadata_routing", "skfolio.src.skfolio.optimization.ensemble._stacking.StackingOptimization::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/optimization/ensemble/_stacking.py", "skfolio/optimization/ensemble/_stacking.py" ]
[ "tests/test_optimization/test_ensemble/test_stacking.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 30, "class_end_lineno": 355, "func_start_lineno": 233, "func_end_lineno": 241, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = skm.MetadataRouter(owner=self.__class__.__name__)\n for name, estimator in self.estimators:\n router.add(\n **{name: estimator},\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n return router" }, { "class_start_lineno": 30, "class_end_lineno": 355, "func_start_lineno": 243, "func_end_lineno": 355, "func_code": " def fit(\n self, X: npt.ArrayLike, y: npt.ArrayLike | None = None, **fit_params\n ) -> \"StackingOptimization\":\n \"\"\"Fit the Stacking Optimization estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Price returns of factors or a target benchmark.\n The default is `None`.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : StackingOptimization\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n names, all_estimators = self._validate_estimators()\n self.final_estimator_ = check_estimator(\n self.final_estimator,\n default=MeanRisk(),\n check_type=BaseOptimization,\n )\n\n if self.cv == \"prefit\":\n self.estimators_ = []\n for estimator in all_estimators:\n skv.check_is_fitted(estimator)\n self.estimators_.append(estimator)\n else:\n # Fit the base estimators on the whole training data. Those\n # base estimators will be used to retrieve the inner weights.\n # They are exposed publicly.\n # noinspection PyCallingNonCallable\n self.estimators_ = skp.Parallel(n_jobs=self.n_jobs)(\n skp.delayed(fit_single_estimator)(\n sk.clone(est), X, y, routed_params[name][\"fit\"]\n )\n for name, est in zip(names, all_estimators, strict=True)\n )\n\n self.named_estimators_ = {\n name: estimator\n for name, estimator in zip(names, self.estimators_, strict=True)\n }\n\n inner_weights = np.array([estimator.weights_ for estimator in self.estimators_])\n\n # To train the final-estimator using the most data as possible, we use\n # a cross-validation to obtain the output of the stacked estimators.\n # To ensure that the data provided to each estimator are the same,\n # we need to set the random state of the cv if there is one and we\n # need to take a copy.\n if self.cv in [\"prefit\", \"ignore\"]:\n X_pred = np.array(\n [estimator.predict(X) for estimator in self.estimators_]\n ).T\n else:\n cv = sks.check_cv(self.cv)\n if hasattr(cv, \"random_state\") and cv.random_state is None:\n cv.random_state = np.random.RandomState()\n # noinspection PyCallingNonCallable\n cv_predictions = skp.Parallel(n_jobs=self.n_jobs)(\n skp.delayed(cross_val_predict)(\n sk.clone(est),\n X,\n y,\n cv=deepcopy(cv),\n method=\"predict\",\n n_jobs=self.n_jobs,\n params=routed_params[name][\"fit\"],\n verbose=self.verbose,\n )\n for name, est in zip(names, all_estimators, strict=True)\n )\n\n # We validate and convert to numpy array only after base-estimator fitting\n # to keep the assets names in case they are used in the estimator.\n if y is not None:\n _, y = skv.validate_data(self, X, y, multi_output=True)\n else:\n _ = skv.validate_data(self, X)\n\n if isinstance(self.cv, BaseCombinatorialCV):\n X_pred = np.array(\n [\n pred.quantile(measure=self.quantile_measure, q=self.quantile)\n for pred in cv_predictions\n ]\n ).T\n else:\n X_pred = np.array(cv_predictions).T\n if y is not None:\n test_indices = np.sort(\n np.concatenate([test for _, test in cv.split(X, y)])\n )\n y = y[test_indices]\n\n fit_single_estimator(self.final_estimator_, X_pred, y, {})\n outer_weights = self.final_estimator_.weights_\n self.weights_ = outer_weights @ inner_weights\n return self" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.optimization.ensemble._stacking.StackingOptimization.get_metadata_routing", "skfolio.optimization.ensemble._stacking.StackingOptimization.fit" ]
Python
3
4
{ "total_num": 5, "base_passed_num": 1 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.datasets._base.load_factors_dataset", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::get_metadata_routing", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/prior/_empirical.py", "skfolio/prior/_empirical.py" ]
[ "tests/test_optimization/test_naive/test_naive.py", "tests/test_prior/test_factor_model.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 247, "func_end_lineno": 292, "func_code": "def load_factors_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 5 factor ETFs.\n\n This dataset is composed of the daily prices of 5 ETF representing common factors\n starting from 2014-01-02 up to 2022-12-28.\n\n The factors are:\n\n * \"MTUM\": Momentum\n * \"QUAL\": Quality\n * \"SIZE\": Size\n * \"VLUE\": Value\n * \"USMV\": low volatility\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 2264\n Assets 5\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_factors_dataset\n >>> prices = load_factors_dataset()\n >>> prices.head()\n MTUM QUAL SIZE USMV VLUE\n Date\n 2014-01-02 52.704 48.351 48.986 29.338 47.054\n 2014-01-03 52.792 48.256 48.722 29.330 46.999\n 2014-01-06 52.677 48.067 48.722 29.263 46.991\n 2014-01-07 53.112 48.455 48.731 29.430 47.253\n 2014-01-08 53.502 48.437 48.731 29.422 47.253\n \"\"\"\n data_filename = \"factors_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 93, "func_end_lineno": 106, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=self.__class__.__name__)\n .add(\n mu_estimator=self.mu_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n .add(\n covariance_estimator=self.covariance_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n return router" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 108, "func_end_lineno": 202, "func_code": " def fit(self, X: npt.ArrayLike, y=None, **fit_params) -> \"EmpiricalPrior\":\n \"\"\"Fit the Empirical Prior estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : EmpiricalPrior\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n self.mu_estimator_ = check_estimator(\n self.mu_estimator,\n default=EmpiricalMu(),\n check_type=BaseMu,\n )\n self.covariance_estimator_ = check_estimator(\n self.covariance_estimator,\n default=EmpiricalCovariance(),\n check_type=BaseCovariance,\n )\n # fitting estimators\n if not self.is_log_normal:\n if self.investment_horizon is not None:\n raise ValueError(\n \"`investment_horizon` must be `None` when \"\n \"`is_log_normal` is `False`\"\n )\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X, y, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X, y, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n else:\n if self.investment_horizon is None:\n raise ValueError(\n \"`investment_horizon` must be provided when \"\n \"`is_log_normal` is `True`\"\n )\n # Convert linear returns to log returns\n X_log = np.log(1 + X)\n y_log = np.log(1 + y) if y is not None else None\n\n # Estimates the moments on the log returns\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X_log, y_log, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X_log, y_log, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n\n # Using the property of aggregation across time we scale this distribution\n # to the investment horizon by the “square-root rule”.\n mu *= self.investment_horizon\n covariance *= self.investment_horizon\n\n # We convert it into a distribution of linear returns over the investment\n # horizon\n mu = np.exp(mu + 0.5 * np.diag(covariance))\n covariance = np.outer(mu, mu) * (np.exp(covariance) - 1)\n\n # we validate and convert to numpy after all models have been fitted to keep\n # features names information.\n X = skv.validate_data(self, X)\n self.prior_model_ = PriorModel(\n mu=mu,\n covariance=covariance,\n returns=X,\n )\n return self" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.datasets._base.load_factors_dataset", "skfolio.prior._empirical.EmpiricalPrior.get_metadata_routing", "skfolio.prior._empirical.EmpiricalPrior.fit" ]
Python
4
5
{ "total_num": 8, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.measures._measures.variance", "skfolio.src.skfolio.measures._measures.standard_deviation" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/measures/_measures.py", "skfolio/measures/_measures.py" ]
[ "tests/test_portfolio/test_multi_period_portfolio.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 82, "func_end_lineno": 95, "func_code": "def variance(returns: np.ndarray) -> float:\n \"\"\"Compute the variance (second moment).\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n Returns\n -------\n value : float\n Variance.\n \"\"\"\n return returns.var(ddof=1)" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": "def standard_deviation(returns: np.ndarray) -> float:\n \"\"\"Compute the standard-deviation (square root of the second moment).\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n Returns\n -------\n value : float\n Standard-deviation.\n \"\"\"\n return np.sqrt(variance(returns=returns))" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.measures._measures.variance", "skfolio.measures._measures.standard_deviation" ]
Python
4
4
{ "total_num": 106, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.tools.input_to_array", "skfolio.src.skfolio.portfolio._portfolio.Portfolio::__init__" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/tools.py", "skfolio/portfolio/_portfolio.py" ]
[ "tests/test_pre_selection/test_select_k_extremes.py", "tests/test_pre_selection/test_select_non_dominated.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 357, "func_end_lineno": 442, "func_code": "def input_to_array(\n items: dict | npt.ArrayLike,\n n_assets: int,\n fill_value: Any,\n dim: int,\n assets_names: np.ndarray | None,\n name: str,\n) -> np.ndarray:\n \"\"\"Convert a collection of items (array-like or dictionary) into\n a numpy array and verify its shape.\n\n Parameters\n ----------\n items : np.ndarray | dict | list\n Items to verify and convert to array.\n\n n_assets : int\n Expected number of assets.\n Used to verify the shape of the converted array.\n\n fill_value : Any\n When `items` is a dictionary, elements that are not in `asset_names` are filled\n with `fill_value` in the converted array.\n\n dim : int\n Dimension of the final array.\n Possible values are `1` or `2`.\n\n assets_names : ndarray, optional\n Asset names used when `items` is a dictionary.\n\n name : str\n Name of the items used for error messages.\n\n Returns\n -------\n values : ndarray of shape (n_assets) for dim=1 or (n_groups, n_assets) for dim=2\n Converted array.\n \"\"\"\n if dim not in [1, 2]:\n raise ValueError(f\"dim must be 1 or 2, got {dim}\")\n if isinstance(items, dict):\n if assets_names is None:\n raise ValueError(\n f\"If `{name}` is provided as a dictionary, you must input `X` as a\"\n \" DataFrame with assets names in columns\"\n )\n if dim == 1:\n arr = np.array([items.get(asset, fill_value) for asset in assets_names])\n else:\n # add assets and convert dict to ordered array\n arr = {}\n for asset in assets_names:\n elem = items.get(asset)\n if elem is None:\n elem = [asset]\n elif np.isscalar(elem):\n elem = [asset, elem]\n else:\n elem = [asset, *elem]\n arr[asset] = elem\n arr = (\n pd.DataFrame.from_dict(arr, orient=\"index\")\n .loc[assets_names]\n .to_numpy()\n .T\n )\n else:\n arr = np.asarray(items)\n\n if arr.ndim != dim:\n raise ValueError(f\"`{name}` must be a {dim}D array, got a {arr.ndim}D array\")\n\n if not isinstance(fill_value, str) and np.isnan(arr).any():\n raise ValueError(f\"`{name}` contains NaN\")\n\n if arr.shape[-1] != n_assets:\n if dim == 1:\n s = \"(n_assets,)\"\n else:\n s = \"(n_groups, n_assets)\"\n raise ValueError(\n f\"`{name}` must be a of shape {s} with n_assets={n_assets}, \"\n f\"got {arr.shape[0]}\"\n )\n return arr" }, { "class_start_lineno": 29, "class_end_lineno": 861, "func_start_lineno": 432, "func_end_lineno": 568, "func_code": " def __init__(\n self,\n X: npt.ArrayLike,\n weights: skt.MultiInput,\n previous_weights: skt.MultiInput = None,\n transaction_costs: skt.MultiInput = None,\n management_fees: skt.MultiInput = None,\n risk_free_rate: float = 0,\n name: str | None = None,\n tag: str | None = None,\n annualized_factor: float = 252,\n fitness_measures: list[skt.Measure] | None = None,\n compounded: bool = False,\n min_acceptable_return: float | None = None,\n value_at_risk_beta: float = 0.95,\n entropic_risk_measure_theta: float = 1,\n entropic_risk_measure_beta: float = 0.95,\n cvar_beta: float = 0.95,\n evar_beta: float = 0.95,\n drawdown_at_risk_beta: float = 0.95,\n cdar_beta: float = 0.95,\n edar_beta: float = 0.95,\n ):\n # extract assets names from X\n assets = None\n observations = None\n if hasattr(X, \"columns\"):\n assets = np.asarray(X.columns, dtype=object)\n observations = np.asarray(X.index)\n\n # We don't perform extensive checks (like in check_X) for faster instantiation.\n rets = np.asarray(X)\n if rets.ndim != 2:\n raise ValueError(\"`X` must be a 2D array-like\")\n\n n_observations, n_assets = rets.shape\n\n weights = input_to_array(\n items=weights,\n n_assets=n_assets,\n fill_value=0,\n dim=1,\n assets_names=assets,\n name=\"weights\",\n )\n\n if previous_weights is None:\n previous_weights = np.zeros(n_assets)\n else:\n previous_weights = input_to_array(\n items=previous_weights,\n n_assets=n_assets,\n fill_value=0,\n dim=1,\n assets_names=assets,\n name=\"previous_weights\",\n )\n\n if transaction_costs is None:\n transaction_costs = 0\n elif not np.isscalar(transaction_costs):\n transaction_costs = input_to_array(\n items=transaction_costs,\n n_assets=n_assets,\n fill_value=0,\n dim=1,\n assets_names=assets,\n name=\"transaction_costs\",\n )\n\n if management_fees is None:\n management_fees = 0\n elif not np.isscalar(management_fees):\n management_fees = input_to_array(\n items=management_fees,\n n_assets=n_assets,\n fill_value=0,\n dim=1,\n assets_names=assets,\n name=\"management_fees\",\n )\n\n # Default observations and assets if X is not a DataFrame\n if observations is None or len(observations) == 0:\n observations = np.arange(n_observations)\n\n if assets is None or len(assets) == 0:\n assets = default_asset_names(n_assets=n_assets)\n\n # Computing portfolio returns\n if np.isscalar(transaction_costs) and transaction_costs == 0:\n total_cost = 0\n else:\n total_cost = (transaction_costs * abs(previous_weights - weights)).sum()\n\n if np.isscalar(management_fees) and management_fees == 0:\n total_fee = 0\n else:\n total_fee = (management_fees * weights).sum()\n\n returns = weights @ rets.T - total_cost - total_fee\n\n if np.any(np.isnan(returns)):\n raise ValueError(\"NaN found in `returns`\")\n\n super().__init__(\n returns=returns,\n observations=observations,\n name=name,\n tag=tag,\n fitness_measures=fitness_measures,\n compounded=compounded,\n risk_free_rate=risk_free_rate,\n annualized_factor=annualized_factor,\n min_acceptable_return=min_acceptable_return,\n value_at_risk_beta=value_at_risk_beta,\n cvar_beta=cvar_beta,\n entropic_risk_measure_theta=entropic_risk_measure_theta,\n entropic_risk_measure_beta=entropic_risk_measure_beta,\n evar_beta=evar_beta,\n drawdown_at_risk_beta=drawdown_at_risk_beta,\n cdar_beta=cdar_beta,\n edar_beta=edar_beta,\n )\n self._loaded = False\n # We save the original array-like object and not the numpy copy for improved\n # memory\n self.X = X\n self.assets = assets\n self.n_assets = n_assets\n self.weights = weights\n self.transaction_costs = transaction_costs\n self.management_fees = management_fees\n self.previous_weights = previous_weights\n self.total_cost = total_cost\n self.total_fee = total_fee\n self._loaded = True" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.tools.input_to_array", "skfolio.portfolio._portfolio.Portfolio.__init__" ]
Python
2
4
{ "total_num": 2, "base_passed_num": 0 }
[ "skfolio.src.skfolio.utils.tools.safe_indexing", "skfolio.src.skfolio.utils.tools.safe_split", "skfolio.src.skfolio.model_selection._validation.cross_val_predict", "skfolio.src.skfolio.utils.tools._check_method_params", "skfolio.src.skfolio.utils.tools.fit_and_predict" ]
skfolio
[ "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/model_selection/_validation.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py" ]
[ "tests/test_pre_selection/test_select_non_expiring.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 190, "func_end_lineno": 219, "func_code": "def safe_indexing(\n X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0\n):\n \"\"\"Return rows, items or columns of X using indices.\n\n Parameters\n ----------\n X : array-like\n Data from which to sample rows.\n\n indices : array-like, optional\n Indices of rows or columns.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n subset :\n Subset of X on axis 0.\n \"\"\"\n if indices is None:\n return X\n if hasattr(X, \"iloc\"):\n return X.take(indices, axis=axis)\n if axis == 0:\n return X[indices]\n return X[:, indices]" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 222, "func_end_lineno": 261, "func_code": "def safe_split(\n X: npt.ArrayLike,\n y: npt.ArrayLike | None = None,\n indices: np.ndarray | None = None,\n axis: int = 0,\n):\n \"\"\"Create subset of dataset.\n\n Slice X, y according to indices for cross-validation.\n\n Parameters\n ----------\n X : array-like\n Data to be indexed.\n\n y : array-like\n Data to be indexed.\n\n indices : ndarray of int, optional\n Rows or columns to select from X and y.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n X_subset : array-like\n Indexed data.\n\n y_subset : array-like\n Indexed targets.\n \"\"\"\n X_subset = safe_indexing(X, indices=indices, axis=axis)\n if y is not None:\n y_subset = safe_indexing(y, indices=indices, axis=axis)\n else:\n y_subset = None\n return X_subset, y_subset" }, { "class_start_lineno": 1, "class_end_lineno": 254, "func_start_lineno": 38, "func_end_lineno": 254, "func_code": "def cross_val_predict(\n estimator: skb.BaseEstimator,\n X: npt.ArrayLike,\n y: npt.ArrayLike = None,\n cv: sks.BaseCrossValidator | BaseCombinatorialCV | int | None = None,\n n_jobs: int | None = None,\n method: str = \"predict\",\n verbose: int = 0,\n params: dict | None = None,\n pre_dispatch: str = \"2*n_jobs\",\n column_indices: np.ndarray | None = None,\n portfolio_params: dict | None = None,\n) -> MultiPeriodPortfolio | Population:\n \"\"\"Generate cross-validated `Portfolios` estimates.\n\n The data is split according to the `cv` parameter.\n The optimization estimator is fitted on the training set and portfolios are\n predicted on the corresponding test set.\n\n For non-combinatorial cross-validation like `Kfold`, the output is the predicted\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` where\n each :class:`~skfolio.portfolio.Portfolio` corresponds to the prediction on each\n train/test pair (`k` portfolios for `Kfold`).\n\n For combinatorial cross-validation\n like :class:`~skfolio.model_selection.CombinatorialPurgedCV`, the output is the\n predicted :class:`~skfolio.population.Population` of multiple\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` (each test outputs are a\n collection of multiple paths instead of one single path).\n\n Parameters\n ----------\n estimator : BaseOptimization\n :ref:`Optimization estimators <optimization>` use to fit the data.\n\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Target data (optional).\n For example, the price returns of the factors.\n\n cv : int | cross-validation generator, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n * None, to use the default 5-fold cross validation,\n * int, to specify the number of folds in a `(Stratified)KFold`,\n * `CV splitter`,\n * An iterable that generates (train, test) splits as arrays of indices.\n\n n_jobs : int, optional\n The number of jobs to run in parallel for `fit` of all `estimators`.\n `None` means 1 unless in a `joblib.parallel_backend` context. -1 means\n using all processors.\n\n method : str\n Invokes the passed method name of the passed estimator.\n\n verbose : int, default=0\n The verbosity level.\n\n params : dict, optional\n Parameters to pass to the underlying estimator's ``fit`` and the CV splitter.\n\n pre_dispatch : int or str, default='2*n_jobs'\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n * None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n * An int, giving the exact number of total jobs that are\n spawned\n\n * A str, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n column_indices : ndarray, optional\n Indices of the `X` columns to cross-validate on.\n\n portfolio_params : dict, optional\n Additional portfolio parameters passed to `MultiPeriodPortfolio`.\n\n Returns\n -------\n predictions : MultiPeriodPortfolio | Population\n This is the result of calling `predict`\n \"\"\"\n params = {} if params is None else params\n\n X, y = safe_split(X, y, indices=column_indices, axis=1)\n X, y = sku.indexable(X, y)\n\n if _routing_enabled():\n # For estimators, a MetadataRouter is created in get_metadata_routing\n # methods. For these router methods, we create the router to use\n # `process_routing` on it.\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=\"cross_validate\")\n .add(\n splitter=cv,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"split\"),\n )\n .add(\n estimator=estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n try:\n routed_params = skm.process_routing(router, \"fit\", **params)\n except ske.UnsetMetadataPassedError as e:\n # The default exception would mention `fit` since in the above\n # `process_routing` code, we pass `fit` as the caller. However,\n # the user is not calling `fit` directly, so we change the message\n # to make it more suitable for this case.\n unrequested_params = sorted(e.unrequested_params)\n raise ske.UnsetMetadataPassedError(\n message=(\n f\"{unrequested_params} are passed to `cross_val_predict` but are\"\n \" not explicitly set as requested or not requested for\"\n f\" cross_validate's estimator: {estimator.__class__.__name__} Call\"\n \" `.set_fit_request({{metadata}}=True)` on the estimator for\"\n f\" each metadata in {unrequested_params} that you want to use and\"\n \" `metadata=False` for not using it. See the Metadata Routing User\"\n \" guide <https://scikit-learn.org/stable/metadata_routing.html>\"\n \" for more information.\"\n ),\n unrequested_params=e.unrequested_params,\n routed_params=e.routed_params,\n ) from None\n else:\n routed_params = sku.Bunch()\n routed_params.splitter = sku.Bunch(split={})\n routed_params.estimator = sku.Bunch(fit=params)\n\n cv = sks.check_cv(cv, y)\n splits = list(cv.split(X, y, **routed_params.splitter.split))\n\n portfolio_params = {} if portfolio_params is None else portfolio_params.copy()\n\n # We ensure that the folds are not shuffled\n if not isinstance(cv, BaseCombinatorialCV):\n try:\n if cv.shuffle:\n raise ValueError(\n \"`cross_val_predict` only works with cross-validation setting\"\n \" `shuffle=False`\"\n )\n except AttributeError:\n # If we cannot find the attribute shuffle, we check if the first folds\n # are shuffled\n for fold in splits[0]:\n if not np.all(np.diff(fold) > 0):\n raise ValueError(\n \"`cross_val_predict` only works with un-shuffled folds\"\n ) from None\n\n # We clone the estimator to make sure that all the folds are independent\n # and that it is pickle-able.\n parallel = skp.Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)\n # TODO remove when https://github.com/joblib/joblib/issues/1071 is fixed\n # noinspection PyCallingNonCallable\n predictions = parallel(\n skp.delayed(fit_and_predict)(\n sk.clone(estimator),\n X,\n y,\n train=train,\n test=test,\n fit_params=routed_params.estimator.fit,\n method=method,\n )\n for train, test in splits\n )\n\n if isinstance(cv, BaseCombinatorialCV):\n path_ids = cv.get_path_ids()\n path_nb = np.max(path_ids) + 1\n portfolios = [[] for _ in range(path_nb)]\n for i, prediction in enumerate(predictions):\n for j, p in enumerate(prediction):\n path_id = path_ids[i, j]\n portfolios[path_id].append(p)\n name = portfolio_params.pop(\"name\", \"path\")\n pred = Population(\n [\n MultiPeriodPortfolio(\n name=f\"{name}_{i}\", portfolios=portfolios[i], **portfolio_params\n )\n for i in range(path_nb)\n ]\n )\n else:\n # We need to re-order the test folds in case they were un-ordered by the\n # CV generator.\n # Because the tests folds are not shuffled, we use the first index of each\n # fold to order them.\n test_indices = np.concatenate([test for _, test in splits])\n if np.unique(test_indices, axis=0).shape[0] != test_indices.shape[0]:\n raise ValueError(\n \"`cross_val_predict` only works with non-duplicated test indices\"\n )\n test_indices = [test for _, test in splits]\n sorted_fold_id = np.argsort([x[0] for x in test_indices])\n pred = MultiPeriodPortfolio(\n portfolios=[predictions[fold_id] for fold_id in sorted_fold_id],\n check_observations_order=False,\n **portfolio_params,\n )\n\n return pred" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 147, "func_end_lineno": 187, "func_code": "def _check_method_params(\n X: npt.ArrayLike, params: dict, indices: np.ndarray = None, axis: int = 0\n):\n \"\"\"Check and validate the parameters passed to a specific\n method like `fit`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data array.\n\n params : dict\n Dictionary containing the parameters passed to the method.\n\n indices : ndarray of shape (n_samples,), default=None\n Indices to be selected if the parameter has the same size as `X`.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n method_params_validated : dict\n Validated parameters. We ensure that the values support indexing.\n \"\"\"\n # noinspection PyUnresolvedReferences\n n_observations = X.shape[0]\n method_params_validated = {}\n for param_key, param_value in params.items():\n if param_value.shape[0] != n_observations:\n raise ValueError(\n f\"param_key has wrong number of observations, \"\n f\"received={param_value.shape[0]}, \"\n f\"expected={n_observations}\"\n )\n method_params_validated[param_key] = _make_indexable(param_value)\n method_params_validated[param_key] = safe_indexing(\n X=method_params_validated[param_key], indices=indices, axis=axis\n )\n return method_params_validated" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 618, "func_end_lineno": 685, "func_code": "def fit_and_predict(\n estimator: Any,\n X: npt.ArrayLike,\n y: npt.ArrayLike | None,\n train: np.ndarray,\n test: np.ndarray | list[np.ndarray],\n fit_params: dict,\n method: str,\n column_indices: np.ndarray | None = None,\n) -> npt.ArrayLike | list[npt.ArrayLike]:\n \"\"\"Fit the estimator and predict values for a given dataset split.\n\n Parameters\n ----------\n estimator : estimator object implementing 'fit' and 'predict'\n The object to use to fit the data.\n\n X : array-like of shape (n_observations, n_assets)\n The data to fit.\n\n y : array-like of shape (n_observations, n_factors) or None\n The factor array if provided\n\n train : ndarray of int of shape (n_train_observations,)\n Indices of training samples.\n\n test : ndarray of int of shape (n_test_samples,) or list of ndarray\n Indices of test samples or list of indices.\n\n fit_params : dict\n Parameters that will be passed to `estimator.fit`.\n\n method : str\n Invokes the passed method name of the passed estimator.\n\n column_indices : ndarray, optional\n Indices of columns to select.\n The default (`None`) is to select all columns.\n\n Returns\n -------\n predictions : array-like or list of array-like\n If `test` is an array, it returns the array-like result of calling\n 'estimator.method' on `test`.\n Otherwise, if `test` is a list of arrays, it returns the list of array-like\n results of calling 'estimator.method' on each test set in `test`.\n \"\"\"\n fit_params = fit_params if fit_params is not None else {}\n fit_params = _check_method_params(X, params=fit_params, indices=train)\n\n X, y = safe_split(X, y, indices=column_indices, axis=1)\n X_train, y_train = safe_split(X, y, indices=train, axis=0)\n if y_train is None:\n estimator.fit(X_train, **fit_params)\n else:\n estimator.fit(X_train, y_train, **fit_params)\n func = getattr(estimator, method)\n\n if isinstance(test, list):\n predictions = []\n for t in test:\n X_test, _ = safe_split(X, indices=t, axis=0)\n predictions.append(func(X_test))\n else:\n X_test, _ = safe_split(X, indices=test, axis=0)\n predictions = func(X_test)\n\n return predictions" } ]
[ "function_empty", "TDD" ]
[ "skfolio.utils.tools.safe_indexing", "skfolio.utils.tools.safe_split", "skfolio.model_selection._validation.cross_val_predict", "skfolio.utils.tools._check_method_params", "skfolio.utils.tools.fit_and_predict" ]
Python
2
5
{ "total_num": 2, "base_passed_num": 1 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_factors_dataset", "skfolio.src.skfolio.datasets._base.load_sp500_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_preprocessing/test_returns.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 247, "func_end_lineno": 292, "func_code": "def load_factors_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 5 factor ETFs.\n\n This dataset is composed of the daily prices of 5 ETF representing common factors\n starting from 2014-01-02 up to 2022-12-28.\n\n The factors are:\n\n * \"MTUM\": Momentum\n * \"QUAL\": Quality\n * \"SIZE\": Size\n * \"VLUE\": Value\n * \"USMV\": low volatility\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 2264\n Assets 5\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_factors_dataset\n >>> prices = load_factors_dataset()\n >>> prices.head()\n MTUM QUAL SIZE USMV VLUE\n Date\n 2014-01-02 52.704 48.351 48.986 29.338 47.054\n 2014-01-03 52.792 48.256 48.722 29.330 46.999\n 2014-01-06 52.677 48.067 48.722 29.263 46.991\n 2014-01-07 53.112 48.455 48.731 29.430 47.253\n 2014-01-08 53.502 48.437 48.731 29.422 47.253\n \"\"\"\n data_filename = \"factors_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_factors_dataset", "skfolio.datasets._base.load_sp500_dataset" ]
Python
3
3
{ "total_num": 2, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.equations._validate_groups", "skfolio.src.skfolio.utils.equations.equations_to_matrix" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/equations.py", "skfolio/utils/equations.py" ]
[ "tests/test_prior/test_black_litterman.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 195, "func_end_lineno": 226, "func_code": "def _validate_groups(groups: npt.ArrayLike, name: str = \"groups\") -> np.ndarray:\n \"\"\"Validate groups by checking its dim and if group names don't appear in multiple\n levels and convert to numpy array.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D-array of strings.\n\n Returns\n -------\n groups : ndarray of shape (n_groups, n_assets)\n 2D-array of strings.\n \"\"\"\n groups = np.asarray(groups)\n if groups.ndim != 2:\n raise ValueError(\n f\"`{name} must be a 2D array, got {groups.ndim}D array instead.\"\n )\n n = len(groups)\n group_sets = [set(groups[i]) for i in range(n)]\n for i in range(n - 1):\n for e in group_sets[i]:\n for j in range(i + 1, n):\n if e in group_sets[j]:\n raise DuplicateGroupsError(\n f\"'{e}' appear in two levels: {list(groups[i])} \"\n f\"and {list(groups[i])}. \"\n f\"{name} must be in only one level.\"\n )\n\n return groups" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 32, "func_end_lineno": 134, "func_code": "def equations_to_matrix(\n groups: npt.ArrayLike,\n equations: npt.ArrayLike,\n sum_to_one: bool = False,\n raise_if_group_missing: bool = False,\n names: tuple[str, str] = (\"groups\", \"equations\"),\n) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Convert a list of linear equations into the left and right matrices of the\n inequality A <= B and equality A == B.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D array of assets groups.\n\n For example:\n\n groups = np.array(\n [\n [\"SPX\", \"SX5E\", \"NKY\", \"TLT\"],\n [\"Equity\", \"Equity\", \"Equity\", \"Bond\"],\n [\"US\", \"Europe\", \"Japan\", \"US\"],\n ]\n )\n\n equations : array-like of shape (n_equations,)\n 1D array of equations.\n\n Example of valid equation patterns:\n * \"number_1 * group_1 + number_3 <= number_4 * group_3 + number_5\"\n * \"group_1 == number * group_2\"\n * \"group_1 <= number\"\n * \"group_1 == number\"\n\n \"group_1\" and \"group_2\" are the group names defined in `groups`.\n The second expression means that the sum of all assets in \"group_1\" should be\n less or equal to \"number\" times the sum of all assets in \"group_2\".\n\n For example:\n\n equations = [\n \"Equity <= 3 * Bond\",\n \"US >= 1.5\",\n \"Europe >= 0.5 * Japan\",\n \"Japan == 1\",\n \"3*SPX + 5*SX5E == 2*TLT + 3\",\n ]\n\n sum_to_one : bool\n If this is set to True, all elements in a group sum to one (used in the `views`\n of the Black-Litterman model).\n\n raise_if_group_missing : bool, default=False\n If this is set to True, an error is raised when a group is not found in the\n groups, otherwise only a warning is shown.\n The default is False.\n\n names : tuple[str, str], default=('groups', 'equations')\n The group and equation names used in error messages.\n The default is `('groups', 'equations')`.\n\n Returns\n -------\n left_equality: ndarray of shape (n_equations_equality, n_assets)\n right_equality: ndarray of shape (n_equations_equality,)\n The left and right matrices of the inequality A <= B.\n\n left_inequality: ndarray of shape (n_equations_inequality, n_assets)\n right_inequality: ndarray of shape (n_equations_inequality,)\n The left and right matrices of the equality A == B.\n \"\"\"\n groups = _validate_groups(groups, name=names[0])\n equations = _validate_equations(equations, name=names[1])\n\n a_equality = []\n b_equality = []\n\n a_inequality = []\n b_inequality = []\n\n for string in equations:\n try:\n left, right, is_inequality = _string_to_equation(\n groups=groups,\n string=string,\n sum_to_one=sum_to_one,\n )\n if is_inequality:\n a_inequality.append(left)\n b_inequality.append(right)\n else:\n a_equality.append(left)\n b_equality.append(right)\n except GroupNotFoundError as e:\n if raise_if_group_missing:\n raise\n warnings.warn(str(e), stacklevel=2)\n return (\n np.array(a_equality),\n np.array(b_equality),\n np.array(a_inequality),\n np.array(b_inequality),\n )" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.equations._validate_groups", "skfolio.utils.equations.equations_to_matrix" ]
Python
3
4
{ "total_num": 4, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.distribution.multivariate._utils.ChildNode::central", "skfolio.src.skfolio.distribution.multivariate._utils.Edge::share_one_node", "skfolio.src.skfolio.distribution.multivariate._utils.Tree::set_edges_from_mst" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/distribution/multivariate/_utils.py", "skfolio/distribution/multivariate/_utils.py", "skfolio/distribution/multivariate/_utils.py", "skfolio/distribution/multivariate/_utils.py" ]
[ "tests/test_prior/test_synthetic_data.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 160, "class_end_lineno": 324, "func_start_lineno": 191, "func_end_lineno": 202, "func_code": " def central(self) -> bool:\n \"\"\"Determine whether this node is considered central.\n It is inherited from the associated edge's centrality.\n\n Returns\n -------\n central: bool\n True if the node is central; otherwise, False.\n \"\"\"\n if self._central is None:\n self._central = self.ref.strongly_central\n return self._central" }, { "class_start_lineno": 327, "class_end_lineno": 479, "func_start_lineno": 365, "func_end_lineno": 369, "func_code": " def weakly_central(self) -> bool:\n \"\"\"Determine if the edge is weakly central.\n An edge is weakly central if at least one of its two nodes is central.\n \"\"\"\n return self.node1.central or self.node2.central" }, { "class_start_lineno": 327, "class_end_lineno": 479, "func_start_lineno": 460, "func_end_lineno": 473, "func_code": " def share_one_node(self, other: \"Edge\") -> bool:\n \"\"\"Check whether two edges share exactly one node.\n\n Parameters\n ----------\n other : Edge\n Another edge to compare with.\n\n Returns\n -------\n bool\n True if the two edges share exactly one node; otherwise, False.\n \"\"\"\n return len({self.node1, self.node2} & {other.node1, other.node2}) == 1" }, { "class_start_lineno": 482, "class_end_lineno": 591, "func_start_lineno": 520, "func_end_lineno": 582, "func_code": " def set_edges_from_mst(self, dependence_method: DependenceMethod) -> None:\n \"\"\"Construct the Maximum Spanning Tree (MST) from the current nodes using\n the specified dependence method.\n\n The MST is built based on pairwise dependence measures computed between nodes.\n If any edge is (weakly) central, a central factor is added to the dependence\n measure to favor edges connected to central nodes.\n\n Parameters\n ----------\n dependence_method : DependenceMethod\n The method used to compute the dependence measure between nodes (e.g.,\n Kendall's tau).\n\n Returns\n -------\n None\n \"\"\"\n n = len(self.nodes)\n dependence_matrix = np.zeros((n, n))\n eligible_edges = {}\n central = False\n for i, j in combinations(range(n), 2):\n node1 = self.nodes[i]\n node2 = self.nodes[j]\n if self.level == 0 or node1.ref.share_one_node(node2.ref):\n edge = Edge(\n node1=node1, node2=node2, dependence_method=dependence_method\n )\n if not central and edge.weakly_central:\n central = True\n # Negate the matrix to use minimum_spanning_tree for maximum spanning\n # Add a cst to ensure that even if dep is 0, we still build a valid MST\n dep = abs(edge.dependence) + 1e-5\n dependence_matrix[i, j] = dep\n eligible_edges[(i, j)] = edge\n\n if np.any(np.isnan(dependence_matrix)):\n raise RuntimeError(\"dependence_matrix contains NaNs\")\n\n if central:\n max_dep = np.max(dependence_matrix)\n for (i, j), edge in eligible_edges.items():\n if edge.weakly_central:\n if edge.strongly_central:\n central_factor = 3 * max_dep\n else:\n central_factor = 2 * max_dep\n dep = dependence_matrix[i, j] + central_factor\n dependence_matrix[i, j] = dep\n\n # Compute the minimum spanning tree\n mst = ssc.minimum_spanning_tree(-dependence_matrix, overwrite=True)\n\n edges = []\n # Extract the indices of the non-zero entries (edges)\n for i, j in zip(*mst.nonzero(), strict=True):\n edge = eligible_edges[(i, j)]\n # connect Nodes to Edges\n edge.ref_to_nodes()\n edges.append(edge)\n\n self.edges = edges" } ]
[ "function_empty", "TDD" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.distribution.multivariate._utils.ChildNode.central", "skfolio.distribution.multivariate._utils.Edge.weakly_central", "skfolio.distribution.multivariate._utils.Edge.share_one_node", "skfolio.distribution.multivariate._utils.Tree.set_edges_from_mst" ]
Python
4
5
{ "total_num": 4, "base_passed_num": 0 }
[ "skfolio.src.skfolio.utils.equations._split_equation_string", "skfolio.src.skfolio.utils.equations._string_to_equation", "skfolio.src.skfolio.utils.equations._validate_groups", "skfolio.src.skfolio.utils.equations.equations_to_matrix" ]
skfolio
[ "skfolio/utils/equations.py", "skfolio/utils/equations.py", "skfolio/utils/equations.py", "skfolio/utils/equations.py" ]
[ "tests/test_utils/test_equations.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 347, "func_end_lineno": 371, "func_code": "def _split_equation_string(string: str) -> list[str]:\n \"\"\"Split an equation strings by operators.\"\"\"\n comp_pattern = \"(?=\" + \"|\".join([\".+\\\\\" + e for e in _COMPARISON_OPERATORS]) + \")\"\n if not bool(re.match(comp_pattern, string)):\n raise EquationToMatrixError(\n f\"The string must contains a comparison operator: \"\n f\"{list(_COMPARISON_OPERATORS)}\"\n )\n\n # Regex to match only '>' and '<' but not '<=' or '>='\n invalid_pattern = r\"(?<!<)(?<!<=)>(?!=)|(?<!>)<(?!=)\"\n invalid_matches = re.findall(invalid_pattern, string)\n\n if len(invalid_matches) > 0:\n raise EquationToMatrixError(\n f\"{invalid_matches[0]} is an invalid comparison operator. \"\n f\"Valid comparison operators are: {list(_COMPARISON_OPERATORS)}\"\n )\n\n # '==' needs to be before '='\n operators = sorted(_OPERATORS, reverse=True)\n pattern = \"((?:\" + \"|\".join([\"\\\\\" + e for e in operators]) + \"))\"\n res = [x.strip() for x in re.split(pattern, string)]\n res = [x for x in res if x != \"\"]\n return res" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 374, "func_end_lineno": 499, "func_code": "def _string_to_equation(\n groups: np.ndarray,\n string: str,\n sum_to_one: bool,\n) -> tuple[np.ndarray, float, bool]:\n \"\"\"Convert a string to a left 1D-array and right float of the form:\n `groups @ left <= right` or `groups @ left == right` and return whether it's an\n equality or inequality.\n\n Parameters\n ----------\n groups : ndarray of shape (n_groups, n_assets)\n Groups 2D-array\n\n string : str\n String to convert\n\n sum_to_one : bool\n If this is set to True, the 1D-array is scaled to have a sum of one.\n\n Returns\n -------\n left : 1D-array of shape (n_assets,)\n right : float\n is_inequality : bool\n \"\"\"\n n = groups.shape[1]\n err_msg = f\"Wrong pattern encountered while converting the string '{string}'\"\n\n iterator = iter(_split_equation_string(string))\n group_names = set(groups.flatten())\n\n def is_group(name: str) -> bool:\n return name in group_names\n\n left = np.zeros(n)\n right = 0\n main_sign = 1\n comparison_sign = None\n is_inequality = None\n e = next(iterator, None)\n i = 0\n while True:\n i += 1\n if i > 1e6:\n raise RecursionError(err_msg)\n if e is None:\n break\n sign = 1\n if e in _COMPARISON_OPERATORS:\n if e in _INEQUALITY_OPERATORS:\n is_inequality = True\n else:\n is_inequality = False\n main_sign = -1\n comparison_sign = _comparison_operator_sign(e)\n e = next(iterator, None)\n if e in _SUB_ADD_OPERATORS:\n sign *= _sub_add_operator_sign(e)\n e = next(iterator, None)\n elif e in _SUB_ADD_OPERATORS:\n sign *= _sub_add_operator_sign(e)\n e = next(iterator, None)\n elif e in _MUL_OPERATORS:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n sign *= main_sign\n # next can only be a number or a group\n if e is None or e in _OPERATORS:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n if is_group(e):\n arr = _matching_array(values=groups, key=e, sum_to_one=sum_to_one)\n # next can only be a '*' or an ['-', '+', '>=', '<=', '==', '='] or None\n e = next(iterator, None)\n if e is None or e in _NON_MUL_OPERATORS:\n left += sign * arr\n elif e in _MUL_OPERATORS:\n # next can only a number\n e = next(iterator, None)\n try:\n number = float(e)\n except ValueError:\n raise GroupNotFoundError(\n f\"{err_msg}: the group '{e}' is missing from the groups\"\n f\" {groups}\"\n ) from None\n\n left += number * sign * arr\n e = next(iterator, None)\n else:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n else:\n try:\n number = float(e)\n except ValueError:\n raise GroupNotFoundError(\n f\"{err_msg}: the group '{e}' is missing from the groups {groups}\"\n ) from None\n # next can only be a '*' or an operator or None\n e = next(iterator, None)\n if e in _MUL_OPERATORS:\n # next can only a group\n e = next(iterator, None)\n if not is_group(e):\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n arr = _matching_array(values=groups, key=e, sum_to_one=sum_to_one)\n left += number * sign * arr\n e = next(iterator, None)\n elif e is None or e in _NON_MUL_OPERATORS:\n right += number * sign\n else:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n\n left *= comparison_sign\n right *= -comparison_sign\n\n return left, right, is_inequality" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 195, "func_end_lineno": 226, "func_code": "def _validate_groups(groups: npt.ArrayLike, name: str = \"groups\") -> np.ndarray:\n \"\"\"Validate groups by checking its dim and if group names don't appear in multiple\n levels and convert to numpy array.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D-array of strings.\n\n Returns\n -------\n groups : ndarray of shape (n_groups, n_assets)\n 2D-array of strings.\n \"\"\"\n groups = np.asarray(groups)\n if groups.ndim != 2:\n raise ValueError(\n f\"`{name} must be a 2D array, got {groups.ndim}D array instead.\"\n )\n n = len(groups)\n group_sets = [set(groups[i]) for i in range(n)]\n for i in range(n - 1):\n for e in group_sets[i]:\n for j in range(i + 1, n):\n if e in group_sets[j]:\n raise DuplicateGroupsError(\n f\"'{e}' appear in two levels: {list(groups[i])} \"\n f\"and {list(groups[i])}. \"\n f\"{name} must be in only one level.\"\n )\n\n return groups" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 32, "func_end_lineno": 134, "func_code": "def equations_to_matrix(\n groups: npt.ArrayLike,\n equations: npt.ArrayLike,\n sum_to_one: bool = False,\n raise_if_group_missing: bool = False,\n names: tuple[str, str] = (\"groups\", \"equations\"),\n) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Convert a list of linear equations into the left and right matrices of the\n inequality A <= B and equality A == B.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D array of assets groups.\n\n For example:\n\n groups = np.array(\n [\n [\"SPX\", \"SX5E\", \"NKY\", \"TLT\"],\n [\"Equity\", \"Equity\", \"Equity\", \"Bond\"],\n [\"US\", \"Europe\", \"Japan\", \"US\"],\n ]\n )\n\n equations : array-like of shape (n_equations,)\n 1D array of equations.\n\n Example of valid equation patterns:\n * \"number_1 * group_1 + number_3 <= number_4 * group_3 + number_5\"\n * \"group_1 == number * group_2\"\n * \"group_1 <= number\"\n * \"group_1 == number\"\n\n \"group_1\" and \"group_2\" are the group names defined in `groups`.\n The second expression means that the sum of all assets in \"group_1\" should be\n less or equal to \"number\" times the sum of all assets in \"group_2\".\n\n For example:\n\n equations = [\n \"Equity <= 3 * Bond\",\n \"US >= 1.5\",\n \"Europe >= 0.5 * Japan\",\n \"Japan == 1\",\n \"3*SPX + 5*SX5E == 2*TLT + 3\",\n ]\n\n sum_to_one : bool\n If this is set to True, all elements in a group sum to one (used in the `views`\n of the Black-Litterman model).\n\n raise_if_group_missing : bool, default=False\n If this is set to True, an error is raised when a group is not found in the\n groups, otherwise only a warning is shown.\n The default is False.\n\n names : tuple[str, str], default=('groups', 'equations')\n The group and equation names used in error messages.\n The default is `('groups', 'equations')`.\n\n Returns\n -------\n left_equality: ndarray of shape (n_equations_equality, n_assets)\n right_equality: ndarray of shape (n_equations_equality,)\n The left and right matrices of the inequality A <= B.\n\n left_inequality: ndarray of shape (n_equations_inequality, n_assets)\n right_inequality: ndarray of shape (n_equations_inequality,)\n The left and right matrices of the equality A == B.\n \"\"\"\n groups = _validate_groups(groups, name=names[0])\n equations = _validate_equations(equations, name=names[1])\n\n a_equality = []\n b_equality = []\n\n a_inequality = []\n b_inequality = []\n\n for string in equations:\n try:\n left, right, is_inequality = _string_to_equation(\n groups=groups,\n string=string,\n sum_to_one=sum_to_one,\n )\n if is_inequality:\n a_inequality.append(left)\n b_inequality.append(right)\n else:\n a_equality.append(left)\n b_equality.append(right)\n except GroupNotFoundError as e:\n if raise_if_group_missing:\n raise\n warnings.warn(str(e), stacklevel=2)\n return (\n np.array(a_equality),\n np.array(b_equality),\n np.array(a_inequality),\n np.array(b_inequality),\n )" } ]
[ "function_empty", "TDD" ]
[ "skfolio.utils.equations._split_equation_string", "skfolio.utils.equations._string_to_equation", "skfolio.utils.equations._validate_groups", "skfolio.utils.equations.equations_to_matrix" ]
Python
2
4
{ "total_num": 12, "base_passed_num": 3 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.datasets._base.get_data_home", "skfolio.src.skfolio.datasets._base.download_dataset", "skfolio.src.skfolio.datasets._base.load_nasdaq_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_utils/test_stats.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 25, "func_end_lineno": 55, "func_code": "def get_data_home(data_home: str | Path | None = None) -> str:\n \"\"\"Return the path of the skfolio data directory.\n\n This folder is used by some large dataset loaders to avoid downloading the\n data several times.\n\n By default, the data directory is set to a folder named 'skfolio_data' in the\n user home folder.\n\n Alternatively, it can be set by the 'SKFOLIO_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str, optional\n The path to skfolio data directory. If `None`, the default path\n is `~/skfolio_data`.\n\n Returns\n -------\n data_home: str or path-like, optional\n The path to skfolio data directory.\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SKFOLIO_DATA\", os.path.join(\"~\", \"skfolio_data\"))\n data_home = os.path.expanduser(data_home)\n os.makedirs(data_home, exist_ok=True)\n return data_home" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 116, "func_end_lineno": 165, "func_code": "def download_dataset(\n data_filename: str,\n data_home: str | Path | None = None,\n download_if_missing: bool = True,\n) -> pd.DataFrame:\n \"\"\"Download and save locally a dataset from the remote GitHub dataset folder.\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from a remote\n GitHub dataset folder.\n\n data_home : str or path-like, optional\n Specify another download and cache folder for the datasets. By default,\n all skfolio data is stored in `~/skfolio_data` sub-folders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n # Use a CORS proxy when triggering requests from the browser\n url_prefix = \"https://corsproxy.io/?\" if sys.platform == \"emscripten\" else \"\"\n url = url_prefix + (\n f\"https://github.com/skfolio/skfolio-datasets/raw/main/\"\n f\"datasets/{data_filename}.csv.gz\"\n )\n\n data_home = get_data_home(data_home=data_home)\n filepath = os.path.join(data_home, f\"{data_filename}.pkz\")\n\n if os.path.exists(filepath):\n return joblib.load(filepath)\n\n if not download_if_missing:\n raise OSError(\"Data not found and `download_if_missing` is False\")\n\n archive_path = os.path.join(data_home, os.path.basename(url))\n ur.urlretrieve(url, archive_path)\n df = load_gzip_compressed_csv_data(archive_path)\n joblib.dump(df, filepath, compress=6)\n os.remove(archive_path)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 348, "func_end_lineno": 397, "func_code": "def load_nasdaq_dataset(data_home=None, download_if_missing=True) -> pd.DataFrame:\n \"\"\"Load the prices of 1455 assets from the NASDAQ Composite Index.\n\n This dataset is composed of the daily prices of 1455 assets from the NASDAQ\n Composite starting from 2018-01-02 up to 2023-05-31.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 1362\n Assets 1455\n ============== ==================\n\n Parameters\n ----------\n data_home : str, optional\n Specify another download and cache folder for the datasets.\n By default, all skfolio data is stored in `~/skfolio_data` subfolders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_nasdaq_dataset\n >>> prices = load_nasdaq_dataset()\n >>> prices.head()\n AAL AAOI AAON AAPL ... ZVRA ZYME ZYNE ZYXI\n Date ...\n 2018-01-02 51.648 37.91 35.621 41.310 ... 66.4 7.933 12.995 2.922\n 2018-01-03 51.014 37.89 36.247 41.303 ... 72.8 7.965 13.460 2.913\n 2018-01-04 51.336 38.38 36.103 41.495 ... 78.4 8.430 12.700 2.869\n 2018-01-05 51.316 38.89 36.681 41.967 ... 77.6 8.400 12.495 2.780\n 2018-01-08 50.809 38.37 36.103 41.811 ... 82.4 8.310 12.550 2.825\n \"\"\"\n data_filename = \"nasdaq_dataset\"\n df = download_dataset(\n data_filename, data_home=data_home, download_if_missing=download_if_missing\n )\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.datasets._base.get_data_home", "skfolio.datasets._base.download_dataset", "skfolio.datasets._base.load_nasdaq_dataset" ]
Python
5
5
{ "total_num": 37, "base_passed_num": 33 }
[ "skfolio.src.skfolio.utils.tools.optimal_rounding_decimals", "skfolio.src.skfolio.utils.tools.format_measure", "skfolio.src.skfolio.utils.tools.safe_indexing", "skfolio.src.skfolio.utils.tools.safe_split" ]
skfolio
[ "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py" ]
[ "tests/test_utils/test_tools.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 537, "func_end_lineno": 550, "func_code": "def optimal_rounding_decimals(x: float) -> int:\n \"\"\"Return the optimal rounding decimal number for a user-friendly formatting.\n\n Parameters\n ----------\n x : float\n Number to round.\n\n Returns\n -------\n n : int\n Rounding decimal number.\n \"\"\"\n return min(6, max(int(-np.log10(abs(x))) + 2, 2))" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 506, "func_end_lineno": 534, "func_code": "def format_measure(x: float, percent: bool = False) -> str:\n \"\"\"Format a measure number into a user-friendly string.\n\n Parameters\n ----------\n x : float\n Number to format.\n\n percent : bool, default=False\n If this is set to True, the number is formatted in percentage.\n\n Returns\n -------\n formatted : str\n Formatted string.\n \"\"\"\n if np.isnan(x):\n return str(x)\n if percent:\n xn = x * 100\n f = \"%\"\n else:\n xn = x\n f = \"f\"\n if xn == 0:\n n = 0\n else:\n n = optimal_rounding_decimals(xn)\n return \"{value:{fmt}}\".format(value=x, fmt=f\".{n}{f}\")" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 190, "func_end_lineno": 219, "func_code": "def safe_indexing(\n X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0\n):\n \"\"\"Return rows, items or columns of X using indices.\n\n Parameters\n ----------\n X : array-like\n Data from which to sample rows.\n\n indices : array-like, optional\n Indices of rows or columns.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n subset :\n Subset of X on axis 0.\n \"\"\"\n if indices is None:\n return X\n if hasattr(X, \"iloc\"):\n return X.take(indices, axis=axis)\n if axis == 0:\n return X[indices]\n return X[:, indices]" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 222, "func_end_lineno": 261, "func_code": "def safe_split(\n X: npt.ArrayLike,\n y: npt.ArrayLike | None = None,\n indices: np.ndarray | None = None,\n axis: int = 0,\n):\n \"\"\"Create subset of dataset.\n\n Slice X, y according to indices for cross-validation.\n\n Parameters\n ----------\n X : array-like\n Data to be indexed.\n\n y : array-like\n Data to be indexed.\n\n indices : ndarray of int, optional\n Rows or columns to select from X and y.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n X_subset : array-like\n Indexed data.\n\n y_subset : array-like\n Indexed targets.\n \"\"\"\n X_subset = safe_indexing(X, indices=indices, axis=axis)\n if y is not None:\n y_subset = safe_indexing(y, indices=indices, axis=axis)\n else:\n y_subset = None\n return X_subset, y_subset" } ]
[ "function_empty", "TDD" ]
[ "skfolio.utils.tools.optimal_rounding_decimals", "skfolio.utils.tools.format_measure", "skfolio.utils.tools.safe_indexing", "skfolio.utils.tools.safe_split" ]
Python
1
4
{ "total_num": 21, "base_passed_num": 16 }