Dataset Viewer
python_code
stringlengths 0
108k
|
---|
#!/usr/bin/env python3
"""Setup script"""
from pathlib import Path
import re
import os
import setuptools
if __name__ == "__main__":
# Read metadata from version.py
with Path("autofaiss/version.py").open(encoding="utf-8") as file:
metadata = dict(re.findall(r'__([a-z]+)__\s*=\s*"([^"]+)"', file.read()))
# Read description from README
with Path(Path(__file__).parent, "README.md").open(encoding="utf-8") as file:
long_description = file.read()
def _read_reqs(relpath):
fullpath = os.path.join(os.path.dirname(__file__), relpath)
with open(fullpath) as f:
return [s.strip() for s in f.readlines() if (s.strip() and not s.startswith("#"))]
_INSTALL_REQUIRES = _read_reqs("requirements.txt")
_TEST_REQUIRE = ["pytest"]
# Run setup
setuptools.setup(
name="autofaiss",
version=metadata["version"],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Intended Audience :: Developers",
],
long_description=long_description,
long_description_content_type="text/markdown",
description=long_description.split("\n")[0],
author=metadata["author"],
install_requires=_INSTALL_REQUIRES,
tests_require=_TEST_REQUIRE,
dependency_links=[],
entry_points={"console_scripts": ["autofaiss = autofaiss.external.quantize:main"]},
data_files=[(".", ["requirements.txt", "README.md"])],
packages=setuptools.find_packages(),
url="https://github.com/criteo/autofaiss",
)
|
"""Check version and git tag script."""
from pathlib import Path
import re
import sys
import subprocess
if __name__ == "__main__":
# Read package version
with Path("autofaiss/version.py").open(encoding="utf-8") as file:
metadata = dict(re.findall(r'__([a-z]+)__\s*=\s*"([^"]+)"', file.read()))
version = metadata["version"]
# Read git tag
with subprocess.Popen(["git", "describe", "--tags"], stdout=subprocess.PIPE) as process:
tagged_version = process.communicate()[0].strip().decode(encoding="utf-8")
# Exit depending on version and tagged_version
if version == tagged_version:
print(f"Tag and version are the same ({version}) !")
sys.exit(0)
else:
print(f"Tag {tagged_version} and version {version} are not the same !")
sys.exit(1)
|
"""Test version."""
from autofaiss import version
def test_version():
"""Test version."""
assert len(version.__version__.split(".")) == 3
assert isinstance(version.__author__, str)
|
""" test utils functions """
# pylint: disable= invalid-name
import numpy as np
import pytest
from autofaiss.utils.array_functions import multi_array_split
def test_multi_array_split():
"""test multi_array_split fct number 1"""
assert len(list(multi_array_split([np.zeros((123, 2)), np.zeros((123, 5))], 41))) == 41
@pytest.mark.parametrize("seed", list(range(1, 10)))
def test_multi_array_split_2(seed):
"""test multi_array_split fct number 2"""
np.random.seed(seed)
length = np.random.randint(1, 100)
nb_chunk = np.random.randint(1, length + 1)
dim1 = np.random.randint(10)
dim2 = np.random.randint(10)
a = np.random.randint(0, 10000, (length, dim1))
b = np.random.randint(0, 10000, (length, dim2))
c = list(multi_array_split([a, b], nb_chunk))
a2 = np.concatenate([x[0] for x in c])
b2 = np.concatenate([x[1] for x in c])
assert np.all(a == a2)
assert np.all(b == b2)
|
import numpy as np
from autofaiss import build_index, tune_index, score_index
def test_scoring_tuning():
embs = np.ones((100, 512), "float32")
index, index_infos = build_index(embs, save_on_disk=False)
index = tune_index(index, index_infos["index_key"], save_on_disk=False)
infos = score_index(index, embs, save_on_disk=False)
|
import logging
import faiss
import numpy as np
import pytest
from autofaiss.external.optimize import (
get_min_param_value_for_best_neighbors_coverage,
get_optimal_hyperparameters,
get_optimal_index_keys_v2,
)
from autofaiss.external.quantize import build_index
from autofaiss.indices.index_factory import index_factory
from autofaiss.indices.index_utils import set_search_hyperparameters, speed_test_ms_per_query
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize("nb_vectors", [10, 900, 9_000, 90_000, 900_000, 9_000_000])
@pytest.mark.parametrize("dim_vector", [10, 100])
@pytest.mark.parametrize("max_index_memory_usage", ["1K", "1M", "1G"])
def test_get_optimal_index_keys_v2(nb_vectors: int, dim_vector: int, max_index_memory_usage: str) -> None:
# Check that should_be_memory_mappable returns only ivf indices
for index_key in get_optimal_index_keys_v2(
nb_vectors, dim_vector, max_index_memory_usage, should_be_memory_mappable=True
):
# LOGGER.debug(f"nb_vectors={nb_vectors}, max_mem={max_index_memory_usage} -> {index_key}")
assert "IVF" in index_key
@pytest.mark.parametrize(
"nb_vectors, use_gpu, expected",
[
(999_999, False, "IVF4096,Flat"),
(1_000_000, False, "OPQ256_768,IVF16384_HNSW32,PQ256x8"),
(1_000_000, True, "IVF16384,Flat"),
],
)
def test_get_optimal_index_keys_v2_with_large_nb_vectors(nb_vectors: int, use_gpu, expected: str):
assert (
get_optimal_index_keys_v2(
nb_vectors=nb_vectors,
dim_vector=512,
max_index_memory_usage="50G",
should_be_memory_mappable=True,
ivf_flat_threshold=1_000_000,
use_gpu=use_gpu,
)[0]
== expected
)
def test_get_min_param_value_for_best_neighbors_coverage() -> None:
"""
Check that get_min_param_value_for_best_neighbors_coverage works as expected.
"""
# We only test on hnsw because this index is fast to build
embeddings = np.float32(np.random.rand(30001, 512))
hyperparameter_str_from_param = lambda ef_search: f"efSearch={ef_search}"
parameter_range = list(range(16, 2 ** 14))
index, _ = build_index(embeddings, save_on_disk=False, index_key="HNSW15")
embeddings = np.float32(np.random.rand(66, 512))
for targeted_nb_neighbors_to_query in [10, 3000, 31000]:
for targeted_coverage in [0.99, 0.5]:
# Compute max coverage ratio
param_str = hyperparameter_str_from_param(parameter_range[-1])
set_search_hyperparameters(index, param_str)
ind = index.search(embeddings, targeted_nb_neighbors_to_query)[1]
max_coverage = 1 - np.sum(ind == -1) / ind.size
# Compute optimal param value
param = get_min_param_value_for_best_neighbors_coverage(
index, parameter_range, hyperparameter_str_from_param, targeted_nb_neighbors_to_query
)
set_search_hyperparameters(index, hyperparameter_str_from_param(param))
# Compute coverage for optimal param value
ind = index.search(embeddings, targeted_nb_neighbors_to_query)[1]
coverage = 1 - np.sum(ind == -1) / ind.size
epsilon = 0.02
# Check that the coverage is close to the targeted coverage
if max_coverage == 1:
assert coverage >= targeted_coverage - epsilon
else:
assert coverage >= 0.95 * max_coverage - epsilon
@pytest.mark.skip(reason="This test takes too long to run (11m)")
@pytest.mark.parametrize(
"index_key", ["OPQ64_128,IVF1024_HNSW32,PQ64x8", "OPQ64_128,IVF1024,PQ64x8", "IVF256,Flat", "HNSW15"]
)
@pytest.mark.parametrize("d", [100])
def test_get_optimal_hyperparameters(index_key: str, d: int) -> None:
"""
Check that get_optimal_hyperparameters returns an hyperparameter string that
match with the speed constraint of the index.
"""
# commented out because slow to run
# nb_vectors_list = [1000, 100000]
# target_speed_ms_list = [0.5, 1, 10, 50]
nb_vectors_list = [10000]
target_speed_ms_list = [0.5]
min_ef_search = 32
use_gpu = False
embeddings = np.float32(np.random.rand(max(nb_vectors_list), d))
index = index_factory(d, index_key, faiss.METRIC_INNER_PRODUCT)
index.train(embeddings[:10000])
for nb_vec_in, target_nb_vec in zip([0] + nb_vectors_list, nb_vectors_list):
index.add(embeddings[nb_vec_in:target_nb_vec])
assert index.ntotal == target_nb_vec
for target_speed_ms in target_speed_ms_list:
hyperparameters_str = get_optimal_hyperparameters(
index, index_key, target_speed_ms, use_gpu, max_timeout_per_iteration_s=1.0, min_ef_search=min_ef_search
)
set_search_hyperparameters(index, hyperparameters_str, use_gpu)
avg_query_time_ms = speed_test_ms_per_query(index)
LOGGER.debug(
f"nb_vectors={target_nb_vec}, max_mem={index_key}, target_speed_ms {target_speed_ms} -> avg_query_time_ms: {avg_query_time_ms}, {hyperparameters_str}"
)
if (
"nprobe=1" == hyperparameters_str
or "nprobe=1," in hyperparameters_str
or "efSearch=1" == hyperparameters_str
or "efSearch=1," in hyperparameters_str
or f"efSearch={min_ef_search}," in hyperparameters_str
or f"efSearch={min_ef_search}" == hyperparameters_str
):
# Target_speed is too constraining
assert avg_query_time_ms >= target_speed_ms * 0.90 - 0.25
continue
assert avg_query_time_ms <= 1.05 * target_speed_ms + 0.25 # ms
|
import logging
import os
import py
import random
from tempfile import TemporaryDirectory, NamedTemporaryFile
from typing import Tuple, List
import faiss
import numpy as np
import pandas as pd
import pyarrow.parquet as pq
import pytest
from numpy.testing import assert_array_equal
LOGGER = logging.getLogger(__name__)
from autofaiss import build_index, build_partitioned_indexes
logging.basicConfig(level=logging.DEBUG)
# hide py4j DEBUG from pyspark, otherwise, there will be too many useless debugging output
# https://stackoverflow.com/questions/37252527/how-to-hide-py4j-java-gatewayreceived-command-c-on-object-id-p0
logging.getLogger("py4j").setLevel(logging.ERROR)
def build_test_collection_numpy(
tmpdir: py.path, min_size=2, max_size=10000, dim=512, nb_files=5, tmpdir_name: str = "autofaiss_numpy"
):
tmp_path = tmpdir.mkdir(tmpdir_name)
sizes = [random.randint(min_size, max_size) for _ in range(nb_files)]
all_arrays = []
file_paths = []
for i, size in enumerate(sizes):
arr = np.random.rand(size, dim).astype("float32")
all_arrays.append(arr)
file_path = os.path.join(tmp_path, f"{str(i)}.npy")
file_paths.append(file_path)
np.save(file_path, arr)
all_arrays = np.vstack(all_arrays)
return str(tmp_path), sizes, dim, all_arrays, file_paths
def build_test_collection_parquet(
tmpdir: py.path,
min_size=2,
max_size=10000,
dim=512,
nb_files=5,
tmpdir_name: str = "autofaiss_parquet",
consecutive_ids=False,
):
tmp_path = tmpdir.mkdir(tmpdir_name)
sizes = [random.randint(min_size, max_size) for _ in range(nb_files)]
dim = dim
all_dfs = []
file_paths = []
n = 0
for i, size in enumerate(sizes):
arr = np.random.rand(size, dim).astype("float32")
if consecutive_ids:
# ids would be consecutive from 0 to N-1
ids = list(range(n, n + size))
else:
ids = np.random.randint(max_size * nb_files * 10, size=size)
df = pd.DataFrame({"embedding": list(arr), "id": ids})
all_dfs.append(df)
file_path = os.path.join(tmp_path, f"{str(i)}.parquet")
df.to_parquet(file_path)
file_paths.append(file_path)
n += len(df)
all_dfs = pd.concat(all_dfs)
return str(tmp_path), sizes, dim, all_dfs, file_paths
def test_quantize(tmpdir):
min_size = random.randint(1, 100)
max_size = random.randint(min_size, 10240)
dim = random.randint(1, 100)
nb_files = random.randint(1, 5)
tmp_dir, sizes, dim, expected_array, _ = build_test_collection_numpy(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
output_numpy_index = os.path.join(tmpdir.strpath, "numpy_knn.index")
output_numpy_index_infos = os.path.join(tmpdir.strpath, "numpy_knn_infos.json")
build_index(
embeddings=tmp_dir,
file_format="npy",
index_path=output_numpy_index,
index_infos_path=output_numpy_index_infos,
max_index_query_time_ms=10.0,
max_index_memory_usage="1G",
current_memory_available="2G",
)
output_numpy_index_faiss = faiss.read_index(output_numpy_index)
assert output_numpy_index_faiss.ntotal == len(expected_array)
tmp_dir, sizes, dim, expected_df, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
index_path = os.path.join(tmpdir.strpath, "parquet_knn.index")
index_infos_path = os.path.join(tmpdir.strpath, "infos.json")
build_index(
embeddings=tmp_dir,
file_format="parquet",
embedding_column_name="embedding",
index_path=index_path,
index_infos_path=index_infos_path,
max_index_query_time_ms=10.0,
max_index_memory_usage="1G",
current_memory_available="2G",
)
output_parquet_index_faiss = faiss.read_index(index_path)
assert output_parquet_index_faiss.ntotal == len(expected_df)
def test_quantize_with_ids(tmpdir):
min_size = random.randint(1, 100)
max_size = random.randint(min_size, 10240)
dim = random.randint(1, 100)
nb_files = random.randint(1, 5)
tmp_dir, sizes, dim, expected_df, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
index_path = os.path.join(tmpdir.strpath, "parquet_knn.index")
index_infos_path = os.path.join(tmpdir.strpath, "infos.json")
ids_path = os.path.join(tmpdir.strpath, "ids")
build_index(
embeddings=tmp_dir,
file_format="parquet",
embedding_column_name="embedding",
index_path=index_path,
index_infos_path=index_infos_path,
ids_path=ids_path,
max_index_query_time_ms=10.0,
max_index_memory_usage="1G",
current_memory_available="2G",
id_columns=["id"],
)
output_parquet_index_faiss = faiss.read_index(index_path)
output_parquet_ids = pq.read_table(ids_path).to_pandas()
assert output_parquet_index_faiss.ntotal == len(expected_df)
expected_df["i"] = np.arange(start=0, stop=len(expected_df))
pd.testing.assert_frame_equal(
output_parquet_ids.reset_index(drop=True), expected_df[["id", "i"]].reset_index(drop=True)
)
def test_quantize_with_pyspark(tmpdir):
min_size = random.randint(1, 100)
max_size = random.randint(min_size, 10240)
dim = random.randint(1, 100)
nb_files = random.randint(1, 5)
tmp_dir, _, _, expected_df, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
index_parquet_path = os.path.join(tmpdir.strpath, "parquet_knn.index")
output_parquet_index_infos = os.path.join(tmpdir.strpath, "infos.json")
ids_path = os.path.join(tmpdir.strpath, "ids")
temporary_indices_folder = os.path.join(tmpdir.strpath, "distributed_autofaiss_indices")
build_index(
embeddings=tmp_dir,
distributed="pyspark",
file_format="parquet",
temporary_indices_folder=temporary_indices_folder,
index_infos_path=output_parquet_index_infos,
ids_path=ids_path,
max_index_memory_usage="1G",
current_memory_available="2G",
id_columns=["id"],
embedding_column_name="embedding",
index_path=index_parquet_path,
)
output_parquet_index_faiss = faiss.read_index(index_parquet_path)
output_parquet_ids = pq.read_table(ids_path).to_pandas()
assert output_parquet_index_faiss.ntotal == len(expected_df)
pd.testing.assert_frame_equal(
output_parquet_ids[["id"]].reset_index(drop=True), expected_df[["id"]].reset_index(drop=True)
)
tmp_dir, _, _, expected_array, _ = build_test_collection_numpy(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
output_numpy_index = os.path.join(tmpdir.strpath, "numpy_knn.index")
output_numpy_index_infos = os.path.join(tmpdir.strpath, "numpy_knn_infos.json")
build_index(
embeddings=tmp_dir,
distributed="pyspark",
file_format="npy",
temporary_indices_folder=temporary_indices_folder,
index_infos_path=output_numpy_index_infos,
max_index_memory_usage="1G",
current_memory_available="2G",
embedding_column_name="embedding",
index_path=output_numpy_index,
)
output_numpy_index_faiss = faiss.read_index(output_numpy_index)
assert output_numpy_index_faiss.ntotal == len(expected_array)
def test_quantize_with_multiple_inputs(tmpdir):
min_size = random.randint(1, 100)
max_size = random.randint(min_size, 10240)
dim = random.randint(1, 100)
nb_files = random.randint(1, 5)
tmp_dir1, _, _, expected_df1, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files, tmpdir_name="autofaiss_parquet1"
)
tmp_dir2, _, _, expected_df2, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files, tmpdir_name="autofaiss_parquet2"
)
expected_df = pd.concat([expected_df1, expected_df2])
index_parquet_path = os.path.join(tmpdir.strpath, "parquet_knn.index")
output_parquet_index_infos = os.path.join(tmpdir.strpath, "infos.json")
build_index(
embeddings=[tmp_dir1, tmp_dir2],
file_format="parquet",
embedding_column_name="embedding",
index_path=index_parquet_path,
index_infos_path=output_parquet_index_infos,
max_index_query_time_ms=10.0,
max_index_memory_usage="1G",
current_memory_available="2G",
)
output_parquet_index_faiss = faiss.read_index(index_parquet_path)
assert output_parquet_index_faiss.ntotal == len(expected_df)
def test_quantize_with_empty_file():
with TemporaryDirectory() as tmp_dir:
with NamedTemporaryFile() as tmp_file:
df = pd.DataFrame({"embedding": [], "id": []})
df.to_parquet(os.path.join(tmp_dir, tmp_file.name))
with pytest.raises(ValueError):
build_index(embeddings=tmp_dir, file_format="parquet", embedding_column_name="embedding")
def test_quantize_with_empty_and_non_empty_files(tmpdir):
with TemporaryDirectory() as tmp_empty_dir:
with NamedTemporaryFile() as tmp_file:
df = pd.DataFrame({"embedding": [], "id": []})
df.to_parquet(os.path.join(tmp_empty_dir, tmp_file.name))
min_size = random.randint(1, 100)
max_size = random.randint(min_size, 10240)
dim = random.randint(1, 100)
nb_files = random.randint(1, 5)
tmp_non_empty_dir, _, _, expected_df, _ = build_test_collection_parquet(
tmpdir,
min_size=min_size,
max_size=max_size,
dim=dim,
nb_files=nb_files,
tmpdir_name="autofaiss_parquet1",
)
index_parquet_path = os.path.join(tmpdir.strpath, "parquet_knn.index")
output_parquet_index_infos = os.path.join(tmpdir.strpath, "infos.json")
build_index(
embeddings=[tmp_empty_dir, tmp_non_empty_dir],
file_format="parquet",
embedding_column_name="embedding",
index_path=index_parquet_path,
index_infos_path=output_parquet_index_infos,
max_index_query_time_ms=10.0,
max_index_memory_usage="1G",
current_memory_available="2G",
)
output_parquet_index_faiss = faiss.read_index(index_parquet_path)
assert output_parquet_index_faiss.ntotal == len(expected_df)
def test_index_correctness_in_distributed_mode(tmpdir):
min_size = 8000
max_size = 10240
dim = 512
nb_files = 5
# parquet
tmp_dir, _, _, expected_df, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files, consecutive_ids=True
)
temporary_indices_folder = os.path.join(tmpdir.strpath, "distributed_autofaiss_indices")
ids_path = os.path.join(tmpdir.strpath, "ids")
index, _ = build_index(
embeddings=tmp_dir,
distributed="pyspark",
file_format="parquet",
temporary_indices_folder=temporary_indices_folder,
max_index_memory_usage="600MB",
current_memory_available="700MB",
embedding_column_name="embedding",
index_key="IVF1,Flat",
should_be_memory_mappable=True,
metric_type="l2",
ids_path=ids_path,
save_on_disk=True,
id_columns=["id"],
)
query = faiss.rand((1, dim))
distances, ids = index.search(query, k=9)
ground_truth_index = faiss.index_factory(dim, "IVF1,Flat")
expected_array = np.stack(expected_df["embedding"])
ground_truth_index.train(expected_array)
ground_truth_index.add(expected_array)
ground_truth_distances, ground_truth_ids = ground_truth_index.search(query, k=9)
ids_mappings = pd.read_parquet(ids_path)["id"]
assert len(ids_mappings) == len(expected_df)
assert_array_equal(ids_mappings.iloc[ids[0, :]].to_numpy(), ids[0, :])
assert_array_equal(ids, ground_truth_ids)
# numpy
tmp_dir, _, _, expected_array, _ = build_test_collection_numpy(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
index, _ = build_index(
embeddings=tmp_dir,
distributed="pyspark",
file_format="npy",
temporary_indices_folder=temporary_indices_folder,
max_index_memory_usage="400MB",
current_memory_available="500MB",
embedding_column_name="embedding",
index_key="IVF1,Flat",
should_be_memory_mappable=True,
metric_type="l2",
)
query = faiss.rand((1, dim))
distances, ids = index.search(query, k=9)
ground_truth_index = faiss.index_factory(dim, "IVF1,Flat")
ground_truth_index.train(expected_array)
ground_truth_index.add(expected_array)
ground_truth_distances, ground_truth_ids = ground_truth_index.search(query, k=9)
assert_array_equal(ids, ground_truth_ids)
def _search_from_multiple_indices(index_paths, query, k):
all_distances, all_ids, NB_QUERIES = [], [], 1
for rest_index_file in index_paths:
index = faiss.read_index(rest_index_file)
distances, ids = index.search(query, k=k)
all_distances.append(distances)
all_ids.append(ids)
dists_arr = np.stack(all_distances, axis=1).reshape(NB_QUERIES, -1)
knn_ids_arr = np.stack(all_ids, axis=1).reshape(NB_QUERIES, -1)
sorted_k_indices = np.argsort(-dists_arr)[:, :k]
sorted_k_dists = np.take_along_axis(dists_arr, sorted_k_indices, axis=1)
sorted_k_ids = np.take_along_axis(knn_ids_arr, sorted_k_indices, axis=1)
return sorted_k_dists, sorted_k_ids
def _merge_indices(index_paths):
merged = faiss.read_index(index_paths[0])
for rest_index_file in index_paths[1:]:
index = faiss.read_index(rest_index_file)
faiss.merge_into(merged, index, shift_ids=False)
return merged
def test_index_correctness_in_distributed_mode_with_multiple_indices(tmpdir):
min_size = 20000
max_size = 40000
dim = 512
nb_files = 5
# parquet
tmp_dir, _, _, expected_df, _ = build_test_collection_parquet(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files, consecutive_ids=True
)
temporary_indices_folder = os.path.join(tmpdir.strpath, "distributed_autofaiss_indices")
ids_path = os.path.join(tmpdir.strpath, "ids")
_, index_path2_metric_infos = build_index(
embeddings=tmp_dir,
distributed="pyspark",
file_format="parquet",
temporary_indices_folder=temporary_indices_folder,
max_index_memory_usage="2GB",
current_memory_available="500MB",
embedding_column_name="embedding",
index_key="IVF1,Flat",
should_be_memory_mappable=True,
ids_path=ids_path,
nb_indices_to_keep=2,
save_on_disk=True,
id_columns=["id"],
)
index_paths = sorted(index_path2_metric_infos.keys())
K, NB_QUERIES = 5, 1
query = faiss.rand((NB_QUERIES, dim))
ground_truth_index = faiss.index_factory(dim, "IVF1,Flat", faiss.METRIC_INNER_PRODUCT)
expected_array = np.stack(expected_df["embedding"])
ground_truth_index.train(expected_array)
ground_truth_index.add(expected_array)
_, ground_truth_ids = ground_truth_index.search(query, k=K)
ids_mappings = pd.read_parquet(ids_path)["id"]
assert len(ids_mappings) == len(expected_df)
assert_array_equal(ids_mappings.iloc[ground_truth_ids[0, :]].to_numpy(), ground_truth_ids[0, :])
_, sorted_k_ids = _search_from_multiple_indices(index_paths=index_paths, query=query, k=K)
merged = _merge_indices(index_paths)
_, ids = merged.search(query, k=K)
assert_array_equal(ids, ground_truth_ids)
assert_array_equal(sorted_k_ids, ground_truth_ids)
# numpy
tmp_dir, _, _, expected_array, _ = build_test_collection_numpy(
tmpdir, min_size=min_size, max_size=max_size, dim=dim, nb_files=nb_files
)
temporary_indices_folder = os.path.join(tmpdir.strpath, "distributed_autofaiss_indices")
_, index_path2_metric_infos = build_index(
embeddings=tmp_dir,
distributed="pyspark",
file_format="npy",
temporary_indices_folder=temporary_indices_folder,
max_index_memory_usage="2GB",
current_memory_available="500MB",
embedding_column_name="embedding",
index_key="IVF1,Flat",
should_be_memory_mappable=True,
nb_indices_to_keep=2,
)
ground_truth_index = faiss.index_factory(dim, "IVF1,Flat", faiss.METRIC_INNER_PRODUCT)
ground_truth_index.train(expected_array)
ground_truth_index.add(expected_array)
_, ground_truth_ids = ground_truth_index.search(query, k=K)
index_paths = sorted(index_path2_metric_infos.keys())
_, sorted_k_ids = _search_from_multiple_indices(index_paths=index_paths, query=query, k=K)
merged = _merge_indices(index_paths)
_, ids = merged.search(query, k=K)
assert_array_equal(ids, ground_truth_ids)
assert_array_equal(sorted_k_ids, ground_truth_ids)
def test_build_partitioned_indexes(tmpdir):
embedding_root_dir = tmpdir.mkdir("embeddings")
output_root_dir = tmpdir.mkdir("outputs")
temp_root_dir = tmpdir.strpath
small_partitions = [("partnerId=123", 1), ("partnerId=44", 2)]
big_partitions = [("partnerId=22", 3)]
all_partitions = small_partitions + big_partitions
expected_embeddings, partitions = _create_partitioned_parquet_embedding_dataset(
embedding_root_dir, all_partitions, n_dimensions=3
)
nb_splits_per_big_index = 2
metrics = build_partitioned_indexes(
partitions=partitions,
output_root_dir=str(output_root_dir),
embedding_column_name="embedding",
id_columns=["id"],
temp_root_dir=str(temp_root_dir),
nb_splits_per_big_index=nb_splits_per_big_index,
big_index_threshold=3,
should_be_memory_mappable=True,
)
assert len(all_partitions) == len(metrics)
all_ids = []
for partition_name, partition_size in small_partitions:
index_path = os.path.join(output_root_dir, partition_name, "knn.index")
index = faiss.read_index(index_path)
assert partition_size == index.ntotal
ids_path = os.path.join(output_root_dir, partition_name, "ids")
ids = pq.read_table(ids_path).to_pandas()
all_ids.append(ids)
for partition_name, partition_size in big_partitions:
n_embeddings = 0
for i in range(nb_splits_per_big_index):
index_path = os.path.join(output_root_dir, partition_name, f"knn.index{i}")
index = faiss.read_index(index_path)
n_embeddings += index.ntotal
assert partition_size == n_embeddings
ids_path = os.path.join(output_root_dir, partition_name, "ids")
ids = pq.read_table(ids_path).to_pandas()
all_ids.append(ids)
all_ids = pd.concat(all_ids)
pd.testing.assert_frame_equal(
all_ids[["id"]].reset_index(drop=True), expected_embeddings[["id"]].reset_index(drop=True)
)
def _create_partitioned_parquet_embedding_dataset(
embedding_root_dir: str, partition_sizes: List[Tuple[str, int]], n_dimensions: int = 512
):
partition_embeddings = []
partitions = []
n = 0
for i, (partition_name, partition_size) in enumerate(partition_sizes):
embeddings = np.random.rand(partition_size, n_dimensions).astype("float32")
ids = list(range(n, n + partition_size))
df = pd.DataFrame({"embedding": list(embeddings), "id": ids})
partition_embeddings.append(df)
partition_dir = os.path.join(embedding_root_dir, partition_name)
os.mkdir(partition_dir)
partitions.append(partition_dir)
file_path = os.path.join(partition_dir, f"{str(i)}.parquet")
df.to_parquet(file_path)
n += len(df)
all_embeddings = pd.concat(partition_embeddings)
return all_embeddings, partitions
|
import numpy as np
from autofaiss import build_index
def test_np_quantize():
embs = np.ones((100, 512), "float32")
index, _ = build_index(embs, save_on_disk=False)
_, I = index.search(embs, 1)
assert I[0][0] == 0
|
from autofaiss.external.build import estimate_memory_required_for_index_creation
#
# def test_estimate_memory_required_for_index_creation():
# needed_memory, _ = estimate_memory_required_for_index_creation(
# nb_vectors=4_000_000_000,
# vec_dim=512,
# index_key="OPQ4_28,IVF131072_HNSW32,PQ4x8",
# max_index_memory_usage="50G",
# )
# assert needed_memory == 100
|
""" Test that the memory efficient flat index give same results as the faiss flat index """
import time
import faiss
import numpy as np
import pytest
from autofaiss.indices.memory_efficient_flat_index import MemEfficientFlatIndex
@pytest.fixture(name="prod_emb")
def fixture_prod_emb():
"""generate random database vectors"""
np.random.seed(15)
return np.random.rand(5003, 99).astype(np.float32)
@pytest.fixture(name="user_emb")
def fixture_user_emb():
"""generate random query vectors"""
np.random.seed(17)
return np.random.rand(501, 99).astype(np.float32)
# pylint: disable too-many-arguments redefined-outer-name
@pytest.mark.parametrize("dataset_size", [1, 10, 3000, 5003])
@pytest.mark.parametrize("batch_size", [1000, 10000])
@pytest.mark.parametrize("nb_query_vectors", [1, 10, 100])
@pytest.mark.parametrize("k", [1, 10, 101])
def test_memory_efficient_flat_index(prod_emb, user_emb, dataset_size, batch_size, nb_query_vectors, k):
"""Test our flat index vs. FAISS flat index"""
dim = prod_emb.shape[-1] # vectors dim
# Test our flat index with faiss batches
start_time = time.time()
flat_index = MemEfficientFlatIndex(dim, "IP")
flat_index.add(prod_emb[:dataset_size])
D_our, I_our = flat_index.search(user_emb[:nb_query_vectors], k, batch_size=batch_size)
print(f"Our flat index: {time.time()-start_time:.2f} (bias if all the dataset is already in RAM)")
# Test our flat index with numpy batches
start_time = time.time()
flat_index = MemEfficientFlatIndex(dim, "IP")
flat_index.add(prod_emb[:dataset_size])
D_our_numpy, I_our_numpy = flat_index.search_numpy(user_emb[:nb_query_vectors], k, batch_size=batch_size)
print(f"Our numpy flat index: {time.time()-start_time:.2f} (bias if all the dataset is already in RAM)")
# Test FAISS flat index
start_time = time.time()
brute = faiss.IndexFlatIP(dim)
# pylint: disable=no-value-for-parameter
brute.add(prod_emb[:dataset_size])
D_faiss, I_faiss = brute.search(user_emb[:nb_query_vectors], k)
print(f"Faiss flat index: {time.time()-start_time:.2f} (no bias since all the dataset is already in RAM)")
# Check that the vectors we can't retrive are the same
assert np.all((I_faiss == -1) == (I_our == -1))
assert np.all((I_faiss == -1) == (I_our_numpy == -1))
mask = I_faiss == -1
# Check that all the distances are equal and in the same order
assert np.all((np.abs(D_our - D_faiss) <= 2 ** -13) | mask)
assert np.all((np.abs(D_our_numpy - D_faiss) <= 2 ** -13) | mask)
# Check the order is the same as Faiss -> it is not, but no big dead
# since the computation always give the same results (repetability works)
assert np.all(I_our == I_faiss) or True
assert np.all(I_our_numpy == I_faiss) or True
|
from autofaiss.indices.distributed import _batch_loader
def test_batch_loader():
for input_size in range(2, 500):
for output_size in range(1, input_size):
batches = list(_batch_loader(nb_batches=output_size, total_size=input_size))
# test output size is expected
assert len(batches) == output_size
# test no empty batch
assert all(batch[1] <= input_size - 1 for batch in batches)
# test on continuous between batches
assert all(prev_end == next_start for (_, _, prev_end), (_, next_start, _) in zip(batches, batches[1:]))
# test last element is covered
assert batches[-1][2] >= input_size
# test range sum
assert sum(end - start for _, start, end in batches) == input_size
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
project = "autofaiss"
copyright = "2020, Criteo"
author = "Criteo reco team"
# The full version, including alpha/beta/rc tags
release = "1.0.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinxcontrib.napoleon",
"sphinx.ext.viewcode",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"nbsphinx",
]
nbsphinx_execute = "never"
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"deepr": ("https://criteo.github.io/deepr/", None),
}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build/**", ".env/**"]
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
source_suffix = [".rst"]
# -- Extension configuration -------------------------------------------------
|
"""
An example of running autofaiss by pyspark to produce N indices.
You need to install pyspark before using the following example.
"""
from typing import Dict
import faiss
import numpy as np
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark session would be created by autofaiss with the least configuration.
_, index_path2_metric_infos = build_index(
embeddings="hdfs://root/path/to/your/embeddings/folder",
distributed="pyspark",
file_format="parquet",
temporary_indices_folder="hdfs://root/tmp/distributed_autofaiss_indices",
current_memory_available="10G",
max_index_memory_usage="100G",
nb_indices_to_keep=10,
)
index_paths = sorted(index_path2_metric_infos.keys())
###########################################
# Use case 1: merging 10 indices into one #
###########################################
merged = faiss.read_index(index_paths[0])
for rest_index_file in index_paths[1:]:
index = faiss.read_index(rest_index_file)
faiss.merge_into(merged, index, shift_ids=False)
with open("merged-knn.index", "wb") as f:
faiss.write_index(merged, faiss.PyCallbackIOWriter(f.write))
########################################
# Use case 2: searching from N indices #
########################################
K, DIM, all_distances, all_ids, NB_QUERIES = 5, 512, [], [], 2
queries = faiss.rand((NB_QUERIES, DIM))
for rest_index_file in index_paths:
index = faiss.read_index(rest_index_file)
distances, ids = index.search(queries, k=K)
all_distances.append(distances)
all_ids.append(ids)
dists_arr = np.stack(all_distances, axis=1).reshape(NB_QUERIES, -1)
knn_ids_arr = np.stack(all_ids, axis=1).reshape(NB_QUERIES, -1)
sorted_k_indices = np.argsort(-dists_arr)[:, :K]
sorted_k_dists = np.take_along_axis(dists_arr, sorted_k_indices, axis=1)
sorted_k_ids = np.take_along_axis(knn_ids_arr, sorted_k_indices, axis=1)
print(f"{K} nearest distances: {sorted_k_dists}")
print(f"{K} nearest ids: {sorted_k_ids}")
############################################
# Use case 3: on disk merging of N indices #
############################################
# using faiss.merge_ondisk (https://github.com/facebookresearch/faiss/blob/30abcd6a865afef7cf86df7e8b839a41b5161505/contrib/ondisk.py )
# https://github.com/facebookresearch/faiss/blob/151e3d7be54aec844b6328dc3e7dd0b83fcfa5bc/demos/demo_ondisk_ivf.py
# to merge indices on disk without using memory
# this is useful in particular to use a very large index with almost no memory usage.
from faiss.contrib.ondisk import merge_ondisk
import faiss
block_fnames = index_paths
empty_index = faiss.read_index(block_fnames[0], faiss.IO_FLAG_MMAP)
empty_index.ntotal = 0
merge_ondisk(empty_index, block_fnames, "merged_index.ivfdata")
faiss.write_index(empty_index, "populated.index")
pop = faiss.read_index("populated.index", faiss.IO_FLAG_ONDISK_SAME_DIR)
########################################################
# Use case 4: use N indices using HStackInvertedLists #
########################################################
# This allows using N indices as a single combined index
# without changing anything on disk or loading anything to memory
# it works well but it's slower than first using merge_ondisk
# because it requires explore N pieces of inverted list for each
# list to explore
import os
class CombinedIndex:
"""
combines a set of inverted lists into a hstack
adds these inverted lists to an empty index that contains
the info on how to perform searches
"""
def __init__(self, invlist_fnames):
ilv = faiss.InvertedListsPtrVector()
for fname in invlist_fnames:
if os.path.exists(fname):
index = faiss.read_index(fname, faiss.IO_FLAG_MMAP)
index_ivf = faiss.extract_index_ivf(index)
il = index_ivf.invlists
index_ivf.own_invlists = False
else:
raise FileNotFoundError
ilv.push_back(il)
self.big_il = faiss.HStackInvertedLists(ilv.size(), ilv.data())
ntotal = self.big_il.compute_ntotal()
self.index = faiss.read_index(invlist_fnames[0], faiss.IO_FLAG_MMAP)
index_ivf = faiss.extract_index_ivf(self.index)
index_ivf.replace_invlists(self.big_il, True)
index_ivf.ntotal = self.index.ntotal = ntotal
def search(self, x, k):
D, I = self.index.search(x, k)
return D, I
index = CombinedIndex(index_paths)
index.search(queries, K)
|
"""
Given a partitioned dataset of embeddings, create an index per partition
"""
import os
from autofaiss import build_partitioned_indexes
from pyspark.sql import SparkSession # pylint: disable=import-outside-toplevel
def create_spark_session():
# PEX file packaging your Python environment and accessible on yarn by all executors
os.environ["PYSPARK_PYTHON"] = "/home/ubuntu/autofaiss.pex"
spark = (
SparkSession.builder.config("spark.submit.deployMode", "client")
.config("spark.executorEnv.PEX_ROOT", "./.pex")
.config("spark.task.cpus", "32")
.config("spark.driver.port", "5678")
.config("spark.driver.blockManager.port", "6678")
.config("spark.driver.host", "172.31.35.188")
.config("spark.driver.bindAddress", "172.31.35.188")
.config("spark.executor.memory", "18G") # make sure to increase this if you're using more cores per executor
.config(
"spark.executor.memoryOverhead", "8G"
) # Memory overhead is needed for Faiss as indexes are built outside of the JVM/Java heap
.config(
"spark.executor.cores", "32"
) # Faiss is multi-threaded so increasing the number of cores will speed up index creation
.config("spark.task.maxFailures", "100")
.appName("Partitioned indexes")
.getOrCreate()
)
return spark
spark = create_spark_session()
partitions = [
"/root/directory/to/partitions/A",
"/root/directory/to/partitions/B",
"/root/directory/to/partitions/C",
"/root/directory/to/partitions/D",
...,
]
# Parameter `big_index_threshold` is used to to define the minimum size of a big index.
# Partitions with >= `big_index_threshold` embeddings will be created in a distributed
# way and resulting index will be split into `nb_splits_per_big_index` smaller indexes.
# Partitions with less than `big_index_threshold` embeddings will not be created in a
# distributed way and resulting index will be composed of only one index.
index_metrics = build_partitioned_indexes(
partitions=partitions,
output_root_dir="/output/root/directory",
embedding_column_name="embedding",
nb_splits_per_big_index=2,
big_index_threshold=5_000_000,
)
|
import faiss
import numpy as np
from autofaiss import build_index
embeddings = np.float32(np.random.rand(5000, 100))
# Example on how to build a memory-mapped index and load it from disk
_, index_infos = build_index(
embeddings,
save_on_disk=True,
should_be_memory_mappable=True,
index_path="my_index_folder/knn.index",
max_index_memory_usage="4G",
max_index_query_time_ms=50,
)
index = faiss.read_index("my_index_folder/knn.index", faiss.IO_FLAG_MMAP | faiss.IO_FLAG_READ_ONLY)
|
from autofaiss import build_index
import numpy as np
embeddings = np.float32(np.random.rand(100, 512))
index, index_infos = build_index(embeddings, save_on_disk=False)
_, I = index.search(embeddings, 1)
print(I)
|
import numpy as np
from autofaiss import build_index, tune_index, score_index
embs = np.float32(np.random.rand(100, 512))
index, index_infos = build_index(embs, save_on_disk=False)
index = tune_index(index, index_infos["index_key"], save_on_disk=False)
infos = score_index(index, embs, save_on_disk=False)
|
"""
An example of running autofaiss by pyspark.
You need to install pyspark before using the following example.
"""
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark session would be created by autofaiss with the least configuration.
index, index_infos = build_index(
embeddings="hdfs://root/path/to/your/embeddings/folder",
distributed="pyspark",
file_format="parquet",
temporary_indices_folder="hdfs://root/tmp/distributed_autofaiss_indices",
)
|
from autofaiss import build_index
build_index(
embeddings="embeddings",
index_path="knn.index",
index_infos_path="infos.json",
max_index_memory_usage="4G",
current_memory_available="5G",
)
|
# pylint: disable=all
__version__ = "2.15.5"
__author__ = "Criteo"
MAJOR = __version__.split(".")[0]
MINOR = __version__.split(".")[1]
PATCH = __version__.split(".")[2]
|
# pylint: disable=unused-import,missing-docstring
from autofaiss.external.quantize import build_index, score_index, tune_index, build_partitioned_indexes
from autofaiss.version import __author__, __version__
|
""" function to compute different kind of recalls """
from typing import List, Optional
import faiss
import numpy as np
def r_recall_at_r_single(
query: np.ndarray,
ground_truth: np.ndarray,
other_index: faiss.Index,
r_max: int = 40,
eval_item_ids: Optional[np.ndarray] = None,
) -> List[int]:
"""Compute an R-recall@R array for each R in range [1, R_max]"""
# O(r_max)
_, inds = other_index.search(np.expand_dims(query, 0), r_max)
res = inds[0]
recall_count = []
s_true = set()
s_pred = set()
tot = 0
for p_true, p_pred in zip(ground_truth[:r_max], res):
if eval_item_ids is not None and p_pred != -1:
p_pred = eval_item_ids[p_pred]
if p_true == p_pred and p_true != -1:
tot += 1
else:
if p_true in s_pred and p_true != -1:
tot += 1
if p_pred in s_true and p_pred != -1:
tot += 1
s_true.add(p_true)
s_pred.add(p_pred)
recall_count.append(tot)
return recall_count
def r_recall_at_r(
query: np.ndarray,
ground_truth: np.ndarray,
other_index: faiss.Index,
r_max: int = 40,
eval_item_ids: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute an R-recall@R array for each R in range [1, R_max] for
a single query.
"""
# O(r_max)
r_lim = min(r_max, other_index.ntotal)
if r_lim <= 0:
return np.ones((max(r_max, 0),))
total = np.zeros((r_max,))
for i in range(query.shape[0]):
# If the ground truth contains -1 (missing elements), the recall definition must change.
# We should divide by the number of elements possible to retrieve, not r_lim
r_lim_fix = min(r_lim, np.min(np.where(ground_truth[i] == -1)[0])) if -1 in ground_truth[i] else r_lim
res_for_one = r_recall_at_r_single(
query[i], ground_truth[i], other_index, r_max, eval_item_ids
) / np.concatenate((np.arange(1, r_lim_fix + 1, 1), np.full(r_max - r_lim_fix, r_lim_fix)))
total += np.array(res_for_one)
return total / query.shape[0]
def one_recall_at_r_single(
query: np.ndarray,
ground_truth: np.ndarray,
other_index: faiss.Index,
r_max: int = 40,
eval_item_ids: Optional[np.ndarray] = None,
) -> List[int]:
"""
Compute an 1-recall@R array for each R in range [1, r_max] for
a single query.
"""
# O(r_max)
_, inds = other_index.search(np.expand_dims(query, 0), 1)
first = inds[0][0]
if eval_item_ids is not None and first != -1:
first = eval_item_ids[first]
# return empty array if no product is found by other_index
if first == -1:
return [0 for _ in ground_truth[:r_max]]
recall_count = []
seen = False
for p_true in ground_truth[:r_max]:
if p_true == first:
seen = True
recall_count.append(1 if seen else 0)
return recall_count
def one_recall_at_r(
query: np.ndarray,
ground_truth: np.ndarray,
other_index: faiss.Index,
r_max: int = 40,
eval_item_ids: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compute an 1-recall@R array for each R in range [1, r_max]"""
# O(r_max)
if r_max <= 0:
return np.zeros((0,))
_, first = other_index.search(query, 1)
if eval_item_ids is not None:
first = np.vectorize(lambda e: eval_item_ids[e] if e != -1 else -1)(first) # type: ignore
recall_array = np.cumsum((ground_truth[:, :r_max] == first) & (first != -1), axis=-1)
avg_recall = np.mean(recall_array, axis=0)
return avg_recall
|
""" function to compute the reconstruction error """
from typing import Optional
import numpy as np
import faiss
def reconstruction_error(before, after, avg_norm_before: Optional[float] = None) -> float:
"""Computes the average reconstruction error"""
diff = np.mean(np.linalg.norm(after - before, axis=1))
if avg_norm_before is None:
avg_norm_before = np.mean(np.linalg.norm(before, axis=1))
return diff / avg_norm_before
def quantize_vec_without_modifying_index(index: faiss.Index, vecs: np.ndarray) -> np.ndarray:
"""Quantizes a batch of vectors if the index given uses quantization"""
try:
return index.sa_decode(index.sa_encode(vecs))
except (TypeError, RuntimeError): # error if the index doesn't use quantization
return vecs
|
""" functions to compare different indices """
import time
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm as tq
from autofaiss.indices.index_utils import format_speed_ms_per_query, get_index_size, speed_test_ms_per_query
from autofaiss.metrics.recalls import r_recall_at_r_single, one_recall_at_r_single
from autofaiss.utils.cast import cast_bytes_to_memory_string
def avg_speed_dict_ms_per_query(indices_dict, vectors, k_closest: int = 40, timeout_s: float = 5):
"""compute the average query speed of a dictionary of indices"""
speed_dict = {}
for index_key in indices_dict:
speed = speed_test_ms_per_query(indices_dict[index_key], vectors, k_closest, timeout_s)
speed_dict[index_key] = speed
return speed_dict
def index_sizes_in_bytes_dict(indices_dict):
"""compute sizes of indices in a dictionary of indices"""
size_dict = {}
for index_key in indices_dict:
size_dict[index_key] = get_index_size(indices_dict[index_key])
return size_dict
def benchmark_index(
indices_dict, gt_test, test_points, vectors_size_in_bytes, save_path=None, speed_dict=None, size_dict=None
):
"""
Compute recall curves for the indices.
"""
perfect_index_label = "perfect index"
if perfect_index_label not in indices_dict:
indices_dict[perfect_index_label] = None
if speed_dict:
speed_dict[perfect_index_label] = vectors_size_in_bytes
k_max = gt_test.shape[1]
plt.figure(figsize=(16, 8))
k_values = np.arange(0, k_max + 1)
avg_one_recall_at_r = {}
avg_r_recall_at_r = {}
timout_s = 5.0
comp_size = vectors_size_in_bytes
for index_key in tq(list(sorted(indices_dict.keys()))):
if index_key not in indices_dict:
continue
index = indices_dict[index_key]
if index_key == "Flat" or (index is None):
y_r_recall_at_r = np.arange(1, k_max + 1)
y_one_recall_at_r = np.ones(k_max)
tot = 1
else:
y_r_recall_at_r = np.zeros(k_max)
y_one_recall_at_r = np.zeros(k_max)
tot = 0
start_time = time.time()
for i, item in enumerate(test_points):
y_r_recall_at_r += np.array(r_recall_at_r_single(item, gt_test[i], index, k_max))
y_one_recall_at_r += np.array(one_recall_at_r_single(item, gt_test[i], index, k_max))
tot += 1
if time.time() - start_time > timout_s and tot > 150:
break
avg_r_recall_at_r[index_key] = y_r_recall_at_r / tot
avg_one_recall_at_r[index_key] = y_one_recall_at_r / tot
info_string = {index_key: "" for index_key in indices_dict}
initial_size_string = cast_bytes_to_memory_string(comp_size)
for index_key in indices_dict:
if index_key in speed_dict:
info_string[index_key] += f"avg speed: {format_speed_ms_per_query(speed_dict[index_key])}, "
if index_key in size_dict:
info_string[index_key] += (
f"(Size: {cast_bytes_to_memory_string(size_dict[index_key])} "
f"({(100*size_dict[index_key]/comp_size):.1f}% of {initial_size_string})"
)
plt.subplot(121)
for index_key in sorted(indices_dict.keys()):
if index_key not in indices_dict:
continue
label = f"{index_key:<30} Index, {info_string[index_key]}"
plt.plot(k_values, np.concatenate(([0], avg_r_recall_at_r[index_key])), label=label)
plt.xlabel("k, number of nearests items")
plt.ylabel("k-recall@k")
plt.vlines(40, 0, k_max)
plt.legend()
plt.tight_layout()
plt.subplot(122)
for index_key in sorted(indices_dict.keys()):
if index_key not in indices_dict:
continue
label = f"{index_key:<30} Index, {info_string[index_key]}"
plt.plot(k_values, np.concatenate(([0], 100 * avg_one_recall_at_r[index_key])), label=label)
plt.xlabel("k, number of nearests items")
plt.ylabel("1-Recall@k")
plt.vlines(100, 0, k_max)
plt.legend()
plt.tight_layout()
if save_path:
plt.savefig(save_path)
plt.show()
|
# pylint: disable=unused-import,missing-docstring
|
""" Common functions to build an index """
import logging
from typing import Dict, Optional, Tuple, Union, Callable, Any
import uuid
import re
import os
import tempfile
import fsspec
import faiss
import pandas as pd
from embedding_reader import EmbeddingReader
from autofaiss.external.optimize import optimize_and_measure_index, get_optimal_batch_size
from autofaiss.indices.index_utils import set_search_hyperparameters, initialize_direct_map, load_index
from autofaiss.utils.path import make_path_absolute
from autofaiss.utils.cast import cast_bytes_to_memory_string
logger = logging.getLogger("autofaiss")
def get_write_ids_df_to_parquet_fn(ids_root_dir: str) -> Callable[[pd.DataFrame, int], None]:
"""Create function to write ids from Pandas dataframe to parquet"""
def _write_ids_df_to_parquet_fn(ids: pd.DataFrame, batch_id: int):
filename = f"part-{batch_id:08d}-{uuid.uuid1()}.parquet"
output_file = os.path.join(ids_root_dir, filename) # type: ignore
with fsspec.open(output_file, "wb") as f:
logger.debug(f"Writing id DataFrame to file {output_file}")
ids.to_parquet(f, index=False)
return _write_ids_df_to_parquet_fn
def get_optimize_index_fn(
embedding_reader: EmbeddingReader,
index_key: str,
index_path: Optional[str],
index_infos_path: Optional[str],
use_gpu: bool,
save_on_disk: bool,
max_index_query_time_ms: float,
min_nearest_neighbors_to_retrieve: int,
make_direct_map: bool,
index_param: Optional[str],
) -> Callable[[faiss.Index, str], Dict]:
"""Create function to optimize index by choosing best hyperparameters and calculating metrics"""
def _optimize_index_fn(index: faiss.Index, index_suffix: str):
if make_direct_map:
initialize_direct_map(index)
cur_index_path = make_path_absolute(index_path) + index_suffix if index_path else None
cur_index_infos_path = make_path_absolute(index_infos_path) + index_suffix if index_infos_path else None
if any(re.findall(r"OPQ\d+_\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
set_search_hyperparameters(index, f"nprobe={64},efSearch={128},ht={2048}", use_gpu)
metric_infos = optimize_and_measure_index(
embedding_reader,
index,
cur_index_infos_path,
index_key,
index_param,
cur_index_path,
max_index_query_time_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
save_on_disk=save_on_disk,
use_gpu=use_gpu,
)
return metric_infos
return _optimize_index_fn
def add_embeddings_to_index_local(
embedding_reader: EmbeddingReader,
trained_index_or_path: Union[faiss.Index, str],
memory_available_for_adding: str,
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]] = None,
index_optimizer: Callable = None,
add_embeddings_with_ids: bool = False,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""Add embeddings to index from driver"""
vec_dim = embedding_reader.dimension
batch_size = get_optimal_batch_size(vec_dim, memory_available_for_adding)
logger.info(
f"Using a batch size of {batch_size} (memory overhead {cast_bytes_to_memory_string(batch_size * vec_dim * 4)})"
)
with tempfile.TemporaryDirectory() as tmp_dir:
if isinstance(trained_index_or_path, str):
local_index_path = os.path.join(tmp_dir, "index")
trained_index = load_index(trained_index_or_path, local_index_path)
else:
trained_index = trained_index_or_path
for batch_id, (vec_batch, ids_batch) in enumerate(embedding_reader(batch_size=batch_size)):
if add_embeddings_with_ids:
trained_index.add_with_ids(vec_batch, ids_batch["i"].to_numpy())
else:
trained_index.add(vec_batch)
if embedding_ids_df_handler:
embedding_ids_df_handler(ids_batch, batch_id)
metric_infos = index_optimizer(trained_index, "") if index_optimizer else None # type: ignore
return trained_index, metric_infos
|
""" functions that fixe faiss index_factory function """
# pylint: disable=invalid-name
import re
from typing import Optional
import faiss
def index_factory(d: int, index_key: str, metric_type: int, ef_construction: Optional[int] = None):
"""
custom index_factory that fix some issues of
faiss.index_factory with inner product metrics.
"""
if metric_type == faiss.METRIC_INNER_PRODUCT:
# make the index described by the key
if any(re.findall(r"OPQ\d+_\d+,IVF\d+,PQ\d+", index_key)):
params = [int(x) for x in re.findall(r"\d+", index_key)]
cs = params[3] # code size (in Bytes if nbits=8)
nbits = params[4] if len(params) == 5 else 8 # default value
ncentroids = params[2]
out_d = params[1]
M_OPQ = params[0]
quantizer = faiss.index_factory(out_d, "Flat", metric_type)
assert quantizer.metric_type == metric_type
index_ivfpq = faiss.IndexIVFPQ(quantizer, out_d, ncentroids, cs, nbits, metric_type)
assert index_ivfpq.metric_type == metric_type
index_ivfpq.own_fields = True
quantizer.this.disown() # pylint: disable = no-member
opq_matrix = faiss.OPQMatrix(d, M=M_OPQ, d2=out_d)
# opq_matrix.niter = 50 # Same as default value
index = faiss.IndexPreTransform(opq_matrix, index_ivfpq)
elif any(re.findall(r"OPQ\d+_\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
params = [int(x) for x in re.findall(r"\d+", index_key)]
M_HNSW = params[3]
cs = params[4] # code size (in Bytes if nbits=8)
nbits = params[5] if len(params) == 6 else 8 # default value
ncentroids = params[2]
out_d = params[1]
M_OPQ = params[0]
quantizer = faiss.IndexHNSWFlat(out_d, M_HNSW, metric_type)
if ef_construction is not None and ef_construction >= 1:
quantizer.hnsw.efConstruction = ef_construction
assert quantizer.metric_type == metric_type
index_ivfpq = faiss.IndexIVFPQ(quantizer, out_d, ncentroids, cs, nbits, metric_type)
assert index_ivfpq.metric_type == metric_type
index_ivfpq.own_fields = True
quantizer.this.disown() # pylint: disable = no-member
opq_matrix = faiss.OPQMatrix(d, M=M_OPQ, d2=out_d)
# opq_matrix.niter = 50 # Same as default value
index = faiss.IndexPreTransform(opq_matrix, index_ivfpq)
elif any(re.findall(r"Pad\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
params = [int(x) for x in re.findall(r"\d+", index_key)]
out_d = params[0]
M_HNSW = params[2]
cs = params[3] # code size (in Bytes if nbits=8)
nbits = params[4] if len(params) == 5 else 8 # default value
ncentroids = params[1]
remapper = faiss.RemapDimensionsTransform(d, out_d, True)
quantizer = faiss.IndexHNSWFlat(out_d, M_HNSW, metric_type)
if ef_construction is not None and ef_construction >= 1:
quantizer.hnsw.efConstruction = ef_construction
index_ivfpq = faiss.IndexIVFPQ(quantizer, out_d, ncentroids, cs, nbits, metric_type)
index_ivfpq.own_fields = True
quantizer.this.disown() # pylint: disable = no-member
index = faiss.IndexPreTransform(remapper, index_ivfpq)
elif any(re.findall(r"HNSW\d+", index_key)):
params = [int(x) for x in re.findall(r"\d+", index_key)]
M_HNSW = params[0]
index = faiss.IndexHNSWFlat(d, M_HNSW, metric_type)
assert index.metric_type == metric_type
elif index_key == "Flat" or any(re.findall(r"IVF\d+,Flat", index_key)):
index = faiss.index_factory(d, index_key, metric_type)
else:
index = faiss.index_factory(d, index_key, metric_type)
raise ValueError(
(
"Be careful, faiss might not create what you expect when using the "
"inner product similarity metric, remove this line to try it anyway. "
"Happened with index_key: " + str(index_key)
)
)
else:
index = faiss.index_factory(d, index_key, metric_type)
return index
|
""" useful functions to apply on an index """
import os
import time
from functools import partial
from itertools import chain, repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Dict, Optional, Union, List, Tuple
import logging
from faiss import extract_index_ivf
import faiss
import fsspec
import numpy as np
logger = logging.getLogger("autofaiss")
def get_index_size(index: faiss.Index) -> int:
"""Returns the size in RAM of a given index"""
with NamedTemporaryFile() as tmp_file:
faiss.write_index(index, tmp_file.name)
size_in_bytes = Path(tmp_file.name).stat().st_size
return size_in_bytes
def speed_test_ms_per_query(
index: faiss.Index, query: Optional[np.ndarray] = None, ksearch: int = 40, timout_s: Union[float, int] = 5.0
) -> float:
"""Evaluate the average speed in milliseconds of the index without using batch"""
nb_samples = 2_000
if query is None:
query = np.random.rand(nb_samples, index.d).astype("float32")
count = 0
nb_repeat = 1 + (nb_samples - 1) // query.shape[0]
start_time = time.perf_counter()
for one_query in chain.from_iterable(repeat(query, nb_repeat)):
_, _ = index.search(np.expand_dims(one_query, 0), ksearch)
count += 1
if time.perf_counter() - start_time > timout_s:
break
return (time.perf_counter() - start_time) / count * 1000.0
def search_speed_test(
index: faiss.Index, query: Optional[np.ndarray] = None, ksearch: int = 40, timout_s: Union[float, int] = 10.0
) -> Dict[str, float]:
"""return the average and 99p search speed"""
nb_samples = 2_000
if query is None:
query = np.random.rand(nb_samples, index.d).astype("float32")
test_start_time_s = time.perf_counter()
speed_list_ms = [] # in milliseconds
nb_repeat = 1 + (nb_samples - 1) // query.shape[0]
for one_query in chain.from_iterable(repeat(query, nb_repeat)):
start_time_s = time.perf_counter() # high precision
_, _ = index.search(np.expand_dims(one_query, 0), ksearch)
end_time_s = time.perf_counter()
search_time_ms = 1000.0 * (end_time_s - start_time_s)
speed_list_ms.append(search_time_ms)
if time.perf_counter() - test_start_time_s > timout_s:
break
speed_list_ms2 = np.array(speed_list_ms)
# avg2 = 1000 * (time.perf_counter() - test_start_time_s) / len(speed_list_ms)
speed_infos = {
"avg_search_speed_ms": np.average(speed_list_ms2),
"99p_search_speed_ms": np.quantile(speed_list_ms2, 0.99),
}
return speed_infos
def format_speed_ms_per_query(speed: float) -> str:
"""format the speed (ms/query) into a nice string"""
return f"{speed:.2f} ms/query"
def quantize_vec_without_modifying_index(index: faiss.Index, vecs: np.ndarray) -> np.ndarray:
"""qantize a batch of vectors"""
quantized_vecs = index.sa_decode(index.sa_encode(vecs))
return quantized_vecs
def set_search_hyperparameters(index: faiss.Index, param_str: str, use_gpu: bool = False) -> None:
"""set hyperparameters to an index"""
# depends on installed faiss version # pylint: disable=no-member
params = faiss.ParameterSpace() if not use_gpu else faiss.GpuParameterSpace()
params.set_index_parameters(index, param_str)
def get_index_from_bytes(index_bytes: Union[bytearray, bytes]) -> faiss.Index:
"""Transforms a bytearray containing a faiss index into the corresponding object."""
with NamedTemporaryFile(delete=False) as output_file:
output_file.write(index_bytes)
tmp_name = output_file.name
b = faiss.read_index(tmp_name)
os.remove(tmp_name)
return b
def get_bytes_from_index(index: faiss.Index) -> bytearray:
"""Transforms a faiss index into a bytearray."""
with NamedTemporaryFile(delete=False) as output_file:
faiss.write_index(index, output_file.name)
tmp_name = output_file.name
with open(tmp_name, "rb") as index_file:
b = index_file.read()
os.remove(tmp_name)
return bytearray(b)
def parallel_download_indices_from_remote(
fs: fsspec.AbstractFileSystem, indices_file_paths: List[str], dst_folder: str
):
"""Download small indices in parallel."""
def _download_one(src_dst_path: Tuple[str, str], fs: fsspec.AbstractFileSystem):
src_path, dst_path = src_dst_path
try:
fs.get(src_path, dst_path)
except Exception as e:
raise Exception(f"Failed to download {src_path} to {dst_path}") from e
if len(indices_file_paths) == 0:
return
os.makedirs(dst_folder, exist_ok=True)
dst_paths = [os.path.join(dst_folder, os.path.split(p)[-1]) for p in indices_file_paths]
src_dest_paths = zip(indices_file_paths, dst_paths)
with ThreadPool(min(16, len(indices_file_paths))) as pool:
for _ in pool.imap_unordered(partial(_download_one, fs=fs), src_dest_paths):
pass
def initialize_direct_map(index: faiss.Index) -> None:
nested_index = extract_index_ivf(index) if isinstance(index, faiss.swigfaiss.IndexPreTransform) else index
# Make direct map is only implemented for IndexIVF and IndexBinaryIVF, see built file faiss/swigfaiss.py
if isinstance(nested_index, (faiss.swigfaiss.IndexIVF, faiss.swigfaiss.IndexBinaryIVF)):
nested_index.make_direct_map()
def save_index(index: faiss.Index, root_dir: str, index_filename: str) -> str:
"""Save index"""
fs = fsspec.core.url_to_fs(root_dir, use_listings_cache=False)[0]
fs.mkdirs(root_dir, exist_ok=True)
output_index_path = os.path.join(root_dir, index_filename)
with fsspec.open(output_index_path, "wb").open() as f:
faiss.write_index(index, faiss.PyCallbackIOWriter(f.write))
return output_index_path
def load_index(index_src_path: str, index_dst_path: str) -> faiss.Index:
fs = fsspec.core.url_to_fs(index_src_path, use_listings_cache=False)[0]
try:
fs.get(index_src_path, index_dst_path)
except Exception as e:
raise Exception(f"Failed to download index from {index_src_path} to {index_dst_path}") from e
return faiss.read_index(index_dst_path)
|
# pylint: disable=unused-import,missing-docstring
|
"""
Building the index with pyspark.
"""
import math
import multiprocessing
import os
import logging
from tempfile import TemporaryDirectory
import tempfile
from typing import Dict, Optional, Iterator, Tuple, Callable, Any, Union, List
from functools import partial
from multiprocessing.pool import ThreadPool
import faiss
import fsspec
import pandas as pd
from embedding_reader import EmbeddingReader
from tqdm import tqdm
from autofaiss.external.metadata import IndexMetadata
from autofaiss.external.optimize import get_optimal_batch_size
from autofaiss.indices.build import get_write_ids_df_to_parquet_fn, get_optimize_index_fn, add_embeddings_to_index_local
from autofaiss.indices.index_utils import (
get_index_from_bytes,
get_bytes_from_index,
parallel_download_indices_from_remote,
load_index,
save_index,
)
from autofaiss.utils.path import make_path_absolute, extract_partition_name_from_path
from autofaiss.utils.cast import cast_memory_to_bytes, cast_bytes_to_memory_string
from autofaiss.utils.decorators import Timeit
from autofaiss.indices.training import create_and_train_index_from_embedding_dir, TrainedIndex
logger = logging.getLogger("autofaiss")
def _generate_suffix(batch_id: int, nb_batches: int) -> str:
suffix_width = int(math.log10(nb_batches)) + 1
return str(batch_id).zfill(suffix_width)
def _generate_small_index_file_name(batch_id: int, nb_batches: int) -> str:
return "index_" + _generate_suffix(batch_id, nb_batches)
def _add_index(
start: int,
end: int,
broadcasted_trained_index_or_path,
memory_available_for_adding: str,
embedding_reader: EmbeddingReader,
batch_id: int,
small_indices_folder: str,
nb_batches: int,
num_cores: Optional[int] = None,
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]] = None,
):
"""
Add a batch of embeddings on trained index and save this index.
Parameters
----------
start: int
Start position of this batch
end: int
End position of this batch
broadcasted_trained_index_or_path: pyspark.Broadcast or str
Broadcasted trained index or path to a trained index
memory_available_for_adding: str
Memory available for adding embeddings
embedding_reader: EmbeddingReader
Embedding reader
batch_id: int
Batch id
small_indices_folder: str
The folder where we save all the small indices
num_cores: int
Number of CPU cores (not Vcores)
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]]
The function that handles the embeddings Ids when id_columns is given
"""
if num_cores is None:
num_cores = multiprocessing.cpu_count()
faiss.omp_set_num_threads(num_cores)
with tempfile.TemporaryDirectory() as tmp_dir:
# load empty trained index
if isinstance(broadcasted_trained_index_or_path, str):
local_index_path = os.path.join(tmp_dir, "index")
empty_index = load_index(broadcasted_trained_index_or_path, local_index_path)
else:
empty_index = get_index_from_bytes(broadcasted_trained_index_or_path.value)
batch_size = get_optimal_batch_size(embedding_reader.dimension, memory_available_for_adding)
ids_total = []
for (vec_batch, ids_batch) in embedding_reader(batch_size=batch_size, start=start, end=end):
consecutive_ids = ids_batch["i"].to_numpy()
# using add_with_ids makes it possible to have consecutive and unique ids over all the N indices
empty_index.add_with_ids(vec_batch, consecutive_ids)
if embedding_ids_df_handler:
ids_total.append(ids_batch)
if embedding_ids_df_handler:
embedding_ids_df_handler(pd.concat(ids_total), batch_id)
save_index(empty_index, small_indices_folder, _generate_small_index_file_name(batch_id, nb_batches))
def _get_pyspark_active_session():
"""Reproduce SparkSession.getActiveSession() available since pyspark 3.0."""
import pyspark # pylint: disable=import-outside-toplevel
# pylint: disable=protected-access
ss: Optional[pyspark.sql.SparkSession] = pyspark.sql.SparkSession._instantiatedSession # mypy: ignore
if ss is None:
logger.info("No pyspark session found, creating a new one!")
ss = (
pyspark.sql.SparkSession.builder.config("spark.driver.memory", "16G")
.master("local[1]")
.appName("Distributed autofaiss")
.config("spark.submit.deployMode", "client")
.getOrCreate()
)
return ss
def _batch_loader(nb_batches: int, total_size: int) -> Iterator[Tuple[int, int, int]]:
"""Yield [batch id, batch start position, batch end position (excluded)]"""
# Thanks to https://stackoverflow.com/a/2135920
batch_size, mod = divmod(total_size, nb_batches)
for batch_id in range(nb_batches):
start = batch_size * batch_id + min(batch_id, mod)
end = batch_size * (batch_id + 1) + min(batch_id + 1, mod)
yield batch_id, start, end
def _merge_index(
small_indices_folder: str,
nb_batches: int,
batch_id: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
max_size_on_disk: str = "50GB",
tmp_output_folder: Optional[str] = None,
index_optimizer: Callable = None,
) -> Tuple[faiss.Index, Dict[str, str]]:
"""
Merge all the indices in `small_indices_folder` into single one.
Also run optimization when `index_optimizer` is given.
Returns the merged index and the metric
"""
fs = _get_file_system(small_indices_folder)
small_indices_files = sorted(fs.ls(small_indices_folder, detail=False))
small_indices_files = small_indices_files[start:end]
if len(small_indices_files) == 0:
raise ValueError(f"No small index is saved in {small_indices_folder}")
def _merge_from_local(merged: Optional[faiss.Index] = None) -> faiss.Index:
local_file_paths = [
os.path.join(local_indices_folder, filename) for filename in sorted(os.listdir(local_indices_folder))
]
if merged is None:
merged = faiss.read_index(local_file_paths[0])
start_index = 1
else:
start_index = 0
for rest_index_file in tqdm(local_file_paths[start_index:]):
# if master and executor are the same machine, rest_index_file could be the folder for stage2
# so, we have to check whether it is file or not
if os.path.isfile(rest_index_file):
index = faiss.read_index(rest_index_file)
faiss.merge_into(merged, index, shift_ids=False)
return merged
# estimate index size by taking the first index
first_index_file = small_indices_files[0]
first_index_size = fs.size(first_index_file)
max_sizes_in_bytes = cast_memory_to_bytes(max_size_on_disk)
nb_files_each_time = max(1, int(max_sizes_in_bytes / first_index_size))
merged_index = None
n = len(small_indices_files)
nb_iterations = max(math.ceil(n / nb_files_each_time), 1)
with Timeit("-> Merging small indices", indent=4):
with tqdm(total=nb_iterations) as pbar:
for i in range(nb_iterations):
to_downloads = small_indices_files[i * nb_files_each_time : min(n, (i + 1) * nb_files_each_time)]
with TemporaryDirectory() as local_indices_folder:
parallel_download_indices_from_remote(
fs=fs, indices_file_paths=to_downloads, dst_folder=local_indices_folder
)
merged_index = _merge_from_local(merged_index)
pbar.update(1)
if batch_id is not None and tmp_output_folder is not None:
if index_optimizer is not None:
metric_infos = index_optimizer(merged_index, index_suffix=_generate_suffix(batch_id, nb_batches))
else:
metric_infos = None
save_index(merged_index, tmp_output_folder, _generate_small_index_file_name(batch_id, nb_batches))
else:
metric_infos = None
return merged_index, metric_infos
def _get_file_system(path: str) -> fsspec.AbstractFileSystem:
return fsspec.core.url_to_fs(path, use_listings_cache=False)[0]
def _merge_to_n_indices(spark_session, n: int, src_folder: str, dst_folder: str, index_optimizer: Callable = None):
"""Merge all the indices from src_folder into n indices, and return the folder for the next stage, as well as the metrics"""
fs = _get_file_system(src_folder)
nb_indices_on_src_folder = len(fs.ls(src_folder, detail=False))
if nb_indices_on_src_folder <= n and index_optimizer is None:
# no need to merge
return src_folder, None
merge_batches = _batch_loader(nb_batches=n, total_size=nb_indices_on_src_folder)
rdd = spark_session.sparkContext.parallelize(merge_batches, n)
def merge(x):
_, metrics = _merge_index(
small_indices_folder=src_folder,
nb_batches=n,
batch_id=x[0],
start=x[1],
end=x[2],
tmp_output_folder=dst_folder,
index_optimizer=index_optimizer,
) # type: ignore
return metrics
metrics_rdd = rdd.map(merge)
metrics = list(metrics_rdd.collect())
if index_optimizer is not None:
metrics_dict = {metric_info["index_path"]: metric_info for metric_info in metrics} # type: ignore
else:
metrics_dict = None # type: ignore
for file in fs.ls(src_folder, detail=False):
if fs.isfile(file):
fs.rm(file)
return dst_folder, metrics_dict
def add_embeddings_to_index_distributed(
trained_index_or_path: Union[faiss.Index, str],
embedding_reader: EmbeddingReader,
memory_available_for_adding: str,
nb_cores: Optional[int] = None,
temporary_indices_folder="hdfs://root/tmp/distributed_autofaiss_indices",
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]] = None,
nb_indices_to_keep: int = 1,
index_optimizer: Optional[Callable] = None,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""
Create indices by pyspark.
Parameters
----------
trained_index_or_path: trained faiss.Index or path to a trained faiss index
Trained faiss index
embedding_reader: EmbeddingReader
Embedding reader.
memory_available_for_adding: str
Memory available for adding embeddings.
nb_cores: int
Number of CPU cores per executor
temporary_indices_folder: str
Folder to save the temporary small indices
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]]
The function that handles the embeddings Ids when id_columns is given
nb_indices_to_keep: int
Number of indices to keep at most after the merging step
index_optimizer: Optional[Callable]
The function that optimizes the index
"""
temporary_indices_folder = make_path_absolute(temporary_indices_folder)
fs = _get_file_system(temporary_indices_folder)
if fs.exists(temporary_indices_folder):
fs.rm(temporary_indices_folder, recursive=True)
stage1_folder = temporary_indices_folder.rstrip("/") + "/stage-1"
ss = _get_pyspark_active_session()
# Broadcast index
broadcasted_trained_index_or_path = (
trained_index_or_path
if isinstance(trained_index_or_path, str)
else ss.sparkContext.broadcast(get_bytes_from_index(trained_index_or_path))
)
sc = ss._jsc.sc() # pylint: disable=protected-access
n_workers = len(sc.statusTracker().getExecutorInfos()) - 1
# maximum between the number of spark workers, 10M embeddings per task and the number of indices to keep
n_batches = min(
embedding_reader.count, max(n_workers, math.ceil(embedding_reader.count / (10 ** 7)), nb_indices_to_keep)
)
nb_indices_to_keep = min(nb_indices_to_keep, n_batches)
batches = _batch_loader(total_size=embedding_reader.count, nb_batches=n_batches)
rdd = ss.sparkContext.parallelize(batches, n_batches)
with Timeit("-> Adding indices", indent=2):
rdd.foreach(
lambda x: _add_index(
batch_id=x[0],
start=x[1],
end=x[2],
memory_available_for_adding=memory_available_for_adding,
broadcasted_trained_index_or_path=broadcasted_trained_index_or_path,
embedding_reader=embedding_reader,
small_indices_folder=stage1_folder,
num_cores=nb_cores,
embedding_ids_df_handler=embedding_ids_df_handler,
nb_batches=n_batches,
)
)
with Timeit("-> Merging indices", indent=2):
stage2_folder = temporary_indices_folder.rstrip("/") + "/stage-2"
next_stage_folder, _ = _merge_to_n_indices(
spark_session=ss, n=100, src_folder=stage1_folder, dst_folder=stage2_folder, index_optimizer=None
)
if nb_indices_to_keep == 1:
merged_index, _ = _merge_index(small_indices_folder=next_stage_folder, nb_batches=1)
if fs.exists(temporary_indices_folder):
fs.rm(temporary_indices_folder, recursive=True)
metrics = index_optimizer(merged_index, "") # type: ignore
return merged_index, metrics
else:
final_folder = temporary_indices_folder.rstrip("/") + "/final"
next_stage_folder, metrics = _merge_to_n_indices(
spark_session=ss,
n=nb_indices_to_keep,
src_folder=next_stage_folder,
dst_folder=final_folder,
index_optimizer=index_optimizer,
)
if fs.exists(temporary_indices_folder):
fs.rm(temporary_indices_folder, recursive=True)
return None, metrics
def _add_embeddings_to_index(
add_embeddings_fn: Callable,
embedding_reader: EmbeddingReader,
output_root_dir: str,
index_key: str,
current_memory_available: str,
id_columns: Optional[List[str]],
max_index_query_time_ms: float,
min_nearest_neighbors_to_retrieve: int,
use_gpu: bool,
make_direct_map: bool,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""Add embeddings to index"""
# Define output folders
partition = extract_partition_name_from_path(embedding_reader.embeddings_folder)
output_dir = os.path.join(output_root_dir, partition)
index_dest_path = os.path.join(output_dir, "knn.index")
ids_dest_dir = os.path.join(output_dir, "ids")
index_infos_dest_path = os.path.join(output_dir, "index_infos.json")
# Compute memory available for adding embeddings to index
metadata = IndexMetadata(index_key, embedding_reader.count, embedding_reader.dimension, make_direct_map)
index_size = metadata.estimated_index_size_in_bytes()
memory_available_for_adding = cast_bytes_to_memory_string(
cast_memory_to_bytes(current_memory_available) - index_size
)
write_ids_df_to_parquet_fn = get_write_ids_df_to_parquet_fn(ids_root_dir=ids_dest_dir) if id_columns else None
optimize_index_fn = get_optimize_index_fn(
embedding_reader=embedding_reader,
index_key=index_key,
index_path=index_dest_path,
index_infos_path=index_infos_dest_path,
use_gpu=use_gpu,
save_on_disk=True,
max_index_query_time_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
make_direct_map=make_direct_map,
index_param=None,
)
# Add embeddings to index
return add_embeddings_fn(
embedding_reader=embedding_reader,
memory_available_for_adding=memory_available_for_adding,
embedding_ids_df_handler=write_ids_df_to_parquet_fn,
index_optimizer=optimize_index_fn,
)
def _add_embeddings_from_dir_to_index(
add_embeddings_fn: Callable,
embedding_root_dir: str,
output_root_dir: str,
index_key: str,
embedding_column_name: str,
current_memory_available: str,
id_columns: Optional[List[str]],
max_index_query_time_ms: float,
min_nearest_neighbors_to_retrieve: int,
use_gpu: bool,
make_direct_map: bool,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""Add embeddings from directory to index"""
# Read embeddings
with Timeit("-> Reading embeddings", indent=2):
embedding_reader = EmbeddingReader(
embedding_root_dir, file_format="parquet", embedding_column=embedding_column_name, meta_columns=id_columns
)
# Add embeddings to index
return _add_embeddings_to_index(
add_embeddings_fn=add_embeddings_fn,
embedding_reader=embedding_reader,
output_root_dir=output_root_dir,
index_key=index_key,
current_memory_available=current_memory_available,
id_columns=id_columns,
max_index_query_time_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
use_gpu=use_gpu,
make_direct_map=make_direct_map,
)
def create_big_index(
embedding_root_dir: str,
ss,
output_root_dir: str,
id_columns: Optional[List[str]],
should_be_memory_mappable: bool,
max_index_query_time_ms: float,
max_index_memory_usage: str,
min_nearest_neighbors_to_retrieve: int,
embedding_column_name: str,
index_key: str,
index_path: Optional[str],
current_memory_available: str,
nb_cores: Optional[int],
use_gpu: bool,
metric_type: str,
nb_splits_per_big_index: int,
make_direct_map: bool,
temp_root_dir: str,
) -> Optional[Dict[str, str]]:
"""
Create a big index
"""
def _create_and_train_index_from_embedding_dir() -> TrainedIndex:
trained_index = create_and_train_index_from_embedding_dir(
embedding_root_dir=embedding_root_dir,
embedding_column_name=embedding_column_name,
index_key=index_key,
max_index_memory_usage=max_index_memory_usage,
make_direct_map=make_direct_map,
should_be_memory_mappable=should_be_memory_mappable,
use_gpu=use_gpu,
metric_type=metric_type,
nb_cores=nb_cores,
current_memory_available=current_memory_available,
id_columns=id_columns,
)
index_output_root_dir = os.path.join(temp_root_dir, "training", partition)
output_index_path = save_index(trained_index.index_or_path, index_output_root_dir, "trained_index")
return TrainedIndex(output_index_path, trained_index.index_key, embedding_root_dir)
partition = extract_partition_name_from_path(embedding_root_dir)
if not index_path:
# Train index
rdd = ss.sparkContext.parallelize([embedding_root_dir], 1)
trained_index_path, trained_index_key, _, = rdd.map(
lambda _: _create_and_train_index_from_embedding_dir()
).collect()[0]
else:
assert index_key, "index key of the input index must be provided because you provided an index_path"
trained_index_path = index_path
trained_index_key = index_key
# Add embeddings to index and compute metrics
partition_temp_root_dir = os.path.join(temp_root_dir, "add_embeddings", partition)
index, metrics = _add_embeddings_from_dir_to_index(
add_embeddings_fn=partial(
add_embeddings_to_index_distributed,
trained_index_or_path=trained_index_path,
nb_cores=nb_cores,
temporary_indices_folder=partition_temp_root_dir,
nb_indices_to_keep=nb_splits_per_big_index,
),
embedding_root_dir=embedding_root_dir,
output_root_dir=output_root_dir,
index_key=trained_index_key,
embedding_column_name=embedding_column_name,
current_memory_available=current_memory_available,
id_columns=id_columns,
max_index_query_time_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
use_gpu=use_gpu,
make_direct_map=make_direct_map,
)
# Only metrics are returned to save memory on driver
if index:
del index
return metrics
def create_small_index(
embedding_root_dir: str,
output_root_dir: str,
id_columns: Optional[List[str]] = None,
should_be_memory_mappable: bool = False,
max_index_query_time_ms: float = 10.0,
max_index_memory_usage: str = "16G",
min_nearest_neighbors_to_retrieve: int = 20,
embedding_column_name: str = "embedding",
index_key: Optional[str] = None,
index_path: Optional[str] = None,
current_memory_available: str = "32G",
use_gpu: bool = False,
metric_type: str = "ip",
nb_cores: Optional[int] = None,
make_direct_map: bool = False,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""
Create a small index
"""
if not index_path:
trained_index = create_and_train_index_from_embedding_dir(
embedding_root_dir=embedding_root_dir,
embedding_column_name=embedding_column_name,
index_key=index_key,
max_index_memory_usage=max_index_memory_usage,
make_direct_map=make_direct_map,
should_be_memory_mappable=should_be_memory_mappable,
use_gpu=use_gpu,
metric_type=metric_type,
nb_cores=nb_cores,
current_memory_available=current_memory_available,
id_columns=id_columns,
)
else:
assert index_key, "index key of the input index must be provided because you provided an index_path"
with tempfile.TemporaryDirectory() as tmp_dir:
embedding_reader = EmbeddingReader(
embedding_root_dir,
file_format="parquet",
embedding_column=embedding_column_name,
meta_columns=id_columns,
)
index = load_index(index_path, os.path.join(tmp_dir, "index"))
trained_index = TrainedIndex(index, index_key, embedding_reader)
# Add embeddings to index and compute metrics
return _add_embeddings_to_index(
add_embeddings_fn=partial(
add_embeddings_to_index_local,
trained_index_or_path=trained_index.index_or_path,
add_embeddings_with_ids=True,
),
embedding_reader=trained_index.embedding_reader_or_path,
output_root_dir=output_root_dir,
index_key=trained_index.index_key,
current_memory_available=current_memory_available,
id_columns=id_columns,
max_index_query_time_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
use_gpu=use_gpu,
make_direct_map=make_direct_map,
)
def create_partitioned_indexes(
partitions: List[str],
big_index_threshold: int,
output_root_dir: str,
nb_cores: Optional[int],
nb_splits_per_big_index: int,
id_columns: Optional[List[str]] = None,
max_index_query_time_ms: float = 10.0,
min_nearest_neighbors_to_retrieve: int = 20,
embedding_column_name: str = "embedding",
index_key: Optional[str] = None,
index_path: Optional[str] = None,
max_index_memory_usage: str = "16G",
current_memory_available: str = "32G",
use_gpu: bool = False,
metric_type: str = "ip",
make_direct_map: bool = False,
should_be_memory_mappable: bool = False,
temp_root_dir: str = "hdfs://root/tmp/distributed_autofaiss_indices",
maximum_nb_threads: int = 256,
) -> List[Optional[Dict[str, str]]]:
"""
Create partitioned indexes from a list of parquet partitions,
i.e. create and train one index per parquet partition
"""
def _create_small_indexes(embedding_root_dirs: List[str]) -> List[Optional[Dict[str, str]]]:
rdd = ss.sparkContext.parallelize(embedding_root_dirs, len(embedding_root_dirs))
return rdd.map(
lambda embedding_root_dir: create_small_index(
embedding_root_dir=embedding_root_dir,
output_root_dir=output_root_dir,
id_columns=id_columns,
should_be_memory_mappable=should_be_memory_mappable,
max_index_query_time_ms=max_index_query_time_ms,
max_index_memory_usage=max_index_memory_usage,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
embedding_column_name=embedding_column_name,
index_key=index_key,
index_path=index_path,
current_memory_available=current_memory_available,
use_gpu=use_gpu,
metric_type=metric_type,
nb_cores=nb_cores,
make_direct_map=make_direct_map,
)[1]
).collect()
ss = _get_pyspark_active_session()
create_big_index_fn = partial(
create_big_index,
ss=ss,
output_root_dir=output_root_dir,
id_columns=id_columns,
should_be_memory_mappable=should_be_memory_mappable,
max_index_query_time_ms=max_index_query_time_ms,
max_index_memory_usage=max_index_memory_usage,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
embedding_column_name=embedding_column_name,
index_key=index_key,
index_path=index_path,
current_memory_available=current_memory_available,
nb_cores=nb_cores,
use_gpu=use_gpu,
metric_type=metric_type,
nb_splits_per_big_index=nb_splits_per_big_index,
make_direct_map=make_direct_map,
temp_root_dir=temp_root_dir,
)
# Compute number of embeddings for each partition
rdd = ss.sparkContext.parallelize(partitions, len(partitions))
partition_sizes = rdd.map(
lambda partition: (
partition,
EmbeddingReader(partition, file_format="parquet", embedding_column=embedding_column_name).count,
)
).collect()
# Group partitions in two categories, small and big indexes
small_partitions = []
big_partitions = []
for partition, size in partition_sizes:
if size < big_index_threshold:
small_partitions.append(partition)
else:
big_partitions.append(partition)
# Create small and big indexes
all_metrics = []
n_threads = min(maximum_nb_threads, len(big_partitions) + int(len(small_partitions) > 0))
with ThreadPool(n_threads) as p:
small_index_metrics_future = (
p.apply_async(_create_small_indexes, (small_partitions,)) if small_partitions else None
)
for metrics in p.starmap(create_big_index_fn, [(p,) for p in big_partitions]):
all_metrics.append(metrics)
if small_index_metrics_future:
all_metrics.extend(small_index_metrics_future.get())
return all_metrics
|
""" function related to search on indices """
from typing import Iterable, Tuple
import numpy as np
def knn_query(index, query, ksearch: int) -> Iterable[Tuple[Tuple[int, int], float]]:
"""Do a knn search and return a list of the closest items and the associated distance"""
dist, ind = index.search(np.expand_dims(query, 0), ksearch)
distances = dist[0]
item_dist = list(zip(ind[0], distances))
return item_dist
|
""" This file contain a class describing a memory efficient flat index """
import heapq
from typing import List, Optional, Tuple
from embedding_reader import EmbeddingReader
import faiss
import numpy as np
from tqdm import trange
from autofaiss.indices.faiss_index_wrapper import FaissIndexWrapper
class MemEfficientFlatIndex(FaissIndexWrapper):
"""
Faiss-like Flat index that can support any size of vectors
without memory issues.
Two search functions are available to use either batch of smaller faiss
flat index or rely fully on numpy.
"""
def __init__(self, d: int, metric_type: int):
"""
__init__ function for MemEfficientFlatIndex
Parameters:
-----------
d : int
dimension of the vectors, named d to keep Faiss notation
metric_type : int
similarity metric used in the vector space, using faiss
enumerate values (faiss.METRIC_INNER_PRODUCT and faiss.METRIC_L2)
"""
super().__init__(d, metric_type)
self.dim = d
self.prod_emb = np.zeros((0, self.dim))
self.embedding_reader: Optional[EmbeddingReader] = None
def delete_vectors(self):
"""delete the vectors of the index"""
self.prod_emb = np.zeros((0, self.dim))
# pylint: disable=missing-function-docstring, invalid-name
def add(self, x: np.ndarray):
if self.prod_emb.shape[0] == 0:
self.prod_emb = x.astype(np.float32)
else:
raise NotImplementedError("You can add vectors only once, delete them first with delete_vectors")
def add_all(self, filename: str, nb_items: int):
"""
Function that adds vectors to the index from a memmory-mapped array
Parameters
----------
filename : string
path of the 2D numpy array of shape (nb_items, vector_dim)
on the disk
nb_items : int
number of vectors in the 2D array (the dim is already known)
"""
if self.prod_emb.shape[0] == 0:
self.prod_emb = np.memmap(filename, dtype="float32", mode="r", shape=(nb_items, self.dim))
else:
raise NotImplementedError("You can add vectors only once, delete them first")
def add_files(self, embedding_reader: EmbeddingReader):
if self.embedding_reader is None:
self.embedding_reader = embedding_reader
else:
raise NotImplementedError("You can add vectors only once, delete them first with delete_vectors")
# pylint: disable too_many_locals
def search_numpy(self, xq: np.ndarray, k: int, batch_size: int = 4_000_000):
"""
Function that search the k nearest neighbours of a batch of vectors.
This implementation is based on vectorized numpy function, it is slower than
the search function based on batches of faiss flat indices.
We keep this implementation because we can build new functions using this code.
Moreover, the distance computation is more precise in numpy than the faiss implementation
that optimizes speed over precision.
Parameters
----------
xq : 2D numpy.array of floats
Batch of vectors of shape (batch_size, vector_dim)
k : int
Number of neighbours to retrieve for every vector
batch_size : int
Size of the batch of vectors that are explored.
A bigger value is prefered to avoid multiple loadings
of the vectors from the disk.
Returns
-------
D : 2D numpy.array of floats
Distances numpy array of shape (batch_size, k).
Contains the distances computed by the index of the k nearest neighbours.
I : 2D numpy.array of ints
Labels numpy array of shape (batch_size, k).
Contains the vectors' labels of the k nearest neighbours.
"""
assert self.metric_type == faiss.METRIC_INNER_PRODUCT
# Instanciate several heaps, (is there a way to have vectorized heaps?)
h: List[List[Tuple[float, int]]] = [[] for _ in range(xq.shape[0])]
# reshape input for vectorized distance computation
xq_reshaped = np.expand_dims(xq, 1)
# initialize index offset
offset = 0
# For each batch
for i in trange(0, self.prod_emb.shape[0], batch_size):
# compute distances in one tensor product
dist_arr = np.sum((xq_reshaped * np.expand_dims(self.prod_emb[i : i + batch_size], 0)), axis=-1)
# get index of the k biggest
# pylint: disable=unsubscriptable-object # pylint/issues/3139
max_k = min(k, dist_arr.shape[1])
ind_k_max = np.argpartition(dist_arr, -max_k)[:, -max_k:]
assert ind_k_max.shape == (xq.shape[0], max_k)
# to be vectorized if it is indeed the bottleneck, (it's not for batch_size >> 10000)
for j, inds in enumerate(ind_k_max):
for ind, distance in zip(inds, dist_arr[j, inds]):
true_ind = offset + ind if ind != -1 else -1
if len(h[j]) < k:
heapq.heappush(h[j], (distance, true_ind))
else:
heapq.heappushpop(h[j], (distance, true_ind))
offset += batch_size
# Fill distance and indice matrix
D = np.zeros((xq.shape[0], k), dtype=np.float32)
I = np.full((xq.shape[0], k), fill_value=-1, dtype=np.int32)
for i in range(xq.shape[0]):
# case where we couldn't find enough vectors
max_k = min(k, len(h[i]))
for j in range(max_k):
x = heapq.heappop(h[i])
D[i][max_k - 1 - j] = x[0]
I[i][max_k - 1 - j] = x[1]
return D, I
# pylint: disable=too-many-locals, arguments-differ
def search(self, x: np.ndarray, k: int, batch_size: int = 4_000_000):
"""
Function that search the k nearest neighbours of a batch of vectors
Parameters
----------
x : 2D numpy.array of floats
Batch of vectors of shape (batch_size, vector_dim)
k : int
Number of neighbours to retrieve for every vector
batch_size : int
Size of the batch of vectors that are explored.
A bigger value is prefered to avoid multiple loadings
of the vectors from the disk.
Returns
-------
D : 2D numpy.array of floats
Distances numpy array of shape (batch_size, k).
Contains the distances computed by the index of the k nearest neighbours.
I : 2D numpy.array of ints
Labels numpy array of shape (batch_size, k).
Contains the vectors' labels of the k nearest neighbours.
"""
if self.prod_emb is None:
raise ValueError("The index is empty")
# Cast in the right format for Faiss
if x.dtype != np.float32:
x = x.astype(np.float32)
# xq for x query, a better name than x which is Faiss convention
xq = x
# Instanciate several heaps, (is there a way to have vectorized heaps?)
h: List[List[Tuple[float, int]]] = [[] for _ in range(xq.shape[0])]
# initialize index offset
offset = 0
# For each batch
for i in trange(0, self.prod_emb.shape[0], batch_size):
# instanciate a Flat index
brute = faiss.IndexFlatIP(self.dim)
# pylint: disable=no-value-for-parameter
brute.add(self.prod_emb[i : i + batch_size])
D_tmp, I_tmp = brute.search(xq, k)
# to be vectorized if it is indeed the bottleneck, (it's not for batch_size >> 10000)
for j, (distances, inds) in enumerate(zip(D_tmp, I_tmp)):
for distance, ind in zip(distances, inds):
true_ind: int = offset + ind if ind != -1 else -1
if len(h[j]) < k:
heapq.heappush(h[j], (distance, true_ind))
else:
heapq.heappushpop(h[j], (distance, true_ind))
offset += batch_size
# Fill distance and indice matrix
D = np.zeros((xq.shape[0], k), dtype=np.float32)
I = np.full((xq.shape[0], k), fill_value=-1, dtype=np.int32)
for i in range(xq.shape[0]):
# case where we couldn't find enough vectors
max_k = min(k, len(h[i]))
for j in range(max_k):
x = heapq.heappop(h[i])
D[i][max_k - 1 - j] = x[0]
I[i][max_k - 1 - j] = x[1]
return D, I
def search_files(self, x: np.ndarray, k: int, batch_size: int):
if self.embedding_reader is None:
raise ValueError("The index is empty")
# Cast in the right format for Faiss
if x.dtype != np.float32:
x = x.astype(np.float32)
# xq for x query, a better name than x which is Faiss convention
xq = x
# Instanciate several heaps, (is there a way to have vectorized heaps?)
h: List[List[Tuple[float, int]]] = [[] for _ in range(xq.shape[0])]
# initialize index offset
offset = 0
# For each batch
for emb_array, _ in self.embedding_reader(batch_size):
# for i in trange(0, self.prod_emb.shape[0], batch_size):
# instanciate a Flat index
brute = faiss.IndexFlatIP(self.dim)
# pylint: disable=no-value-for-parameter
brute.add(emb_array)
D_tmp, I_tmp = brute.search(xq, k)
# to be vectorized if it is indeed the bottleneck, (it's not for batch_size >> 10000)
for j, (distances, inds) in enumerate(zip(D_tmp, I_tmp)):
for distance, ind in zip(distances, inds):
true_ind: int = offset + ind if ind != -1 else -1
if len(h[j]) < k:
heapq.heappush(h[j], (distance, true_ind))
else:
heapq.heappushpop(h[j], (distance, true_ind))
offset += emb_array.shape[0]
# Fill distance and indice matrix
D = np.zeros((xq.shape[0], k), dtype=np.float32)
I = np.full((xq.shape[0], k), fill_value=-1, dtype=np.int32)
for i in range(xq.shape[0]):
# case where we couldn't find enough vectors
max_k = min(k, len(h[i]))
for j in range(max_k):
x = heapq.heappop(h[i]) # type: ignore
D[i][max_k - 1 - j] = x[0]
I[i][max_k - 1 - j] = x[1]
return D, I
|
""" This file contains a wrapper class to create Faiss-like indices """
from abc import ABC, abstractmethod
import faiss
import numpy as np
class FaissIndexWrapper(ABC):
"""
This abstract class is describing a Faiss-like index
It is useful to use this wrapper to use benchmarking functions written for
faiss in this library
"""
# pylint: disable=invalid-name
def __init__(self, d: int, metric_type: int):
"""
__init__ function for FaissIndexWrapper
Parameters:
-----------
d : int
dimension of the vectors, named d to keep Faiss notation
metric_type : int
similarity metric used in the vector space, using faiss
enumerate values (faiss.METRIC_INNER_PRODUCT and faiss.METRIC_L2)
"""
self.d = d
if metric_type in [faiss.METRIC_INNER_PRODUCT, "IP", "ip"]:
self.metric_type = faiss.METRIC_INNER_PRODUCT
elif metric_type in [faiss.METRIC_L2, "L2", "l2"]:
self.metric_type = faiss.METRIC_L2
else:
raise NotImplementedError
# pylint: disable=invalid-name
@abstractmethod
def search(self, x: np.ndarray, k: int):
"""
Function that search the k nearest neighbours of a batch of vectors
Parameters
----------
x : 2D numpy.array of floats
Batch of vectors of shape (batch_size, vector_dim)
k : int
Number of neighbours to retrieve for every vector
Returns
-------
D : 2D numpy.array of floats
Distances numpy array of shape (batch_size, k).
Contains the distances computed by the index of the k nearest neighbours.
I : 2D numpy.array of ints
Labels numpy array of shape (batch_size, k).
Contains the vectors' labels of the k nearest neighbours.
"""
raise NotImplementedError
# pylint: disable=invalid-name
@abstractmethod
def add(self, x: np.ndarray):
"""
Function that adds vectors to the index
Parameters
----------
x : 2D numpy.array of floats
Batch of vectors of shape (batch_size, vector_dim)
"""
raise NotImplementedError
|
"""Index training"""
from typing import Union, NamedTuple, Optional, List
import logging
import multiprocessing
import faiss
from embedding_reader import EmbeddingReader
from autofaiss.external.metadata import IndexMetadata
from autofaiss.external.optimize import check_if_index_needs_training, get_optimal_train_size
from autofaiss.indices.index_factory import index_factory
from autofaiss.utils.cast import cast_bytes_to_memory_string, cast_memory_to_bytes, to_faiss_metric_type
from autofaiss.utils.decorators import Timeit
from autofaiss.external.optimize import get_optimal_index_keys_v2
logger = logging.getLogger("autofaiss")
class TrainedIndex(NamedTuple):
index_or_path: Union[faiss.Index, str]
index_key: str
embedding_reader_or_path: Union[EmbeddingReader, str]
def create_empty_index(vec_dim: int, index_key: str, metric_type: Union[str, int]) -> faiss.Index:
"""Create empty index"""
with Timeit(f"-> Instanciate the index {index_key}", indent=2):
# Convert metric_type to faiss type
metric_type = to_faiss_metric_type(metric_type)
# Instanciate the index
return index_factory(vec_dim, index_key, metric_type)
def _train_index(
embedding_reader: EmbeddingReader,
index_key: str,
index: faiss.Index,
metadata: IndexMetadata,
current_memory_available: str,
use_gpu: bool,
) -> faiss.Index:
"""Train index"""
logger.info(
f"The index size will be approximately {cast_bytes_to_memory_string(metadata.estimated_index_size_in_bytes())}"
)
# Extract training vectors
with Timeit("-> Extract training vectors", indent=2):
memory_available_for_training = cast_bytes_to_memory_string(cast_memory_to_bytes(current_memory_available))
# Determine the number of vectors necessary to train the index
train_size = get_optimal_train_size(
embedding_reader.count, index_key, memory_available_for_training, embedding_reader.dimension
)
memory_needed_for_training = metadata.compute_memory_necessary_for_training(train_size)
logger.info(
f"Will use {train_size} vectors to train the index, "
f"that will use {cast_bytes_to_memory_string(memory_needed_for_training)} of memory"
)
# Extract training vectors
train_vectors, _ = next(embedding_reader(batch_size=train_size, start=0, end=train_size))
# Instanciate the index and train it
# pylint: disable=no-member
if use_gpu:
# if this fails, it means that the GPU version was not comp.
assert (
faiss.StandardGpuResources
), "FAISS was not compiled with GPU support, or loading _swigfaiss_gpu.so failed"
res = faiss.StandardGpuResources()
dev_no = 0
# transfer to GPU (may be partial).
index = faiss.index_cpu_to_gpu(res, dev_no, index)
with Timeit(
f"-> Training the index with {train_vectors.shape[0]} vectors of dim {train_vectors.shape[1]}", indent=2
):
index.train(train_vectors)
del train_vectors
return index
def create_and_train_new_index(
embedding_reader: EmbeddingReader,
index_key: str,
metadata: IndexMetadata,
metric_type: Union[str, int],
current_memory_available: str,
use_gpu: bool = False,
) -> faiss.Index:
"""Create and train new index"""
# Instanciate the index
index = create_empty_index(embedding_reader.dimension, index_key, metric_type)
# Train index if needed
if check_if_index_needs_training(index_key):
index = _train_index(embedding_reader, index_key, index, metadata, current_memory_available, use_gpu)
return index
def create_and_train_index_from_embedding_dir(
embedding_root_dir: str,
embedding_column_name: str,
max_index_memory_usage: str,
make_direct_map: bool,
should_be_memory_mappable: bool,
current_memory_available: str,
use_gpu: bool = False,
index_key: Optional[str] = None,
id_columns: Optional[List[str]] = None,
metric_type: str = "ip",
nb_cores: Optional[int] = None,
) -> TrainedIndex:
"""
Create and train index from embedding directory
"""
nb_cores = nb_cores if nb_cores else multiprocessing.cpu_count()
faiss.omp_set_num_threads(nb_cores)
# Read embeddings
with Timeit("-> Reading embeddings", indent=2):
embedding_reader = EmbeddingReader(
embedding_root_dir, file_format="parquet", embedding_column=embedding_column_name, meta_columns=id_columns
)
# Define index key
if index_key is None:
best_index_keys = get_optimal_index_keys_v2(
embedding_reader.count,
embedding_reader.dimension,
max_index_memory_usage,
make_direct_map=make_direct_map,
should_be_memory_mappable=should_be_memory_mappable,
use_gpu=use_gpu,
)
if not best_index_keys:
raise RuntimeError(f"Unable to find optimal index key from embedding directory {embedding_root_dir}")
index_key = best_index_keys[0]
# Create metadata
with Timeit("-> Reading metadata", indent=2):
metadata = IndexMetadata(index_key, embedding_reader.count, embedding_reader.dimension, make_direct_map)
# Create and train index
index = create_and_train_new_index(
embedding_reader, index_key, metadata, metric_type, current_memory_available, use_gpu
)
return TrainedIndex(index, index_key, embedding_reader)
|
""" function to cast variables in others """
import re
from math import floor
from typing import Union
import faiss
def cast_memory_to_bytes(memory_string: str) -> float:
"""
Parse a memory string and returns the number of bytes
>>> cast_memory_to_bytes("16B")
16
>>> cast_memory_to_bytes("16G") == 16*1024*1024*1024
True
"""
conversion = {unit: (2 ** 10) ** i for i, unit in enumerate("BKMGTPEZ")}
number_match = r"([0-9]*\.[0-9]+|[0-9]+)"
unit_match = "("
for unit in conversion:
if unit != "B":
unit_match += unit + "B|"
for unit in conversion:
unit_match += unit + "|"
unit_match = unit_match[:-1] + ")"
matching_groups = re.findall(number_match + unit_match, memory_string, re.IGNORECASE)
if matching_groups and len(matching_groups) == 1 and "".join(matching_groups[0]) == memory_string:
group = matching_groups[0]
return float(group[0]) * conversion[group[1][0].upper()]
raise ValueError(f"Unknown format for memory string: {memory_string}")
def cast_bytes_to_memory_string(num_bytes: float) -> str:
"""
Cast a number of bytes to a readable string
>>> from autofaiss.utils.cast import cast_bytes_to_memory_string
>>> cast_bytes_to_memory_string(16.*1024*1024*1024) == "16.0GB"
True
"""
suffix = "B"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num_bytes) < 1024.0:
return "%3.1f%s%s" % (num_bytes, unit, suffix)
num_bytes /= 1024.0
return "%.1f%s%s" % (num_bytes, "Y", suffix)
def to_faiss_metric_type(metric_type: Union[str, int]) -> int:
"""convert metric_type string/enum to faiss enum of the distance metric"""
if metric_type in ["ip", "IP", faiss.METRIC_INNER_PRODUCT]:
return faiss.METRIC_INNER_PRODUCT
elif metric_type in ["l2", "L2", faiss.METRIC_L2]:
return faiss.METRIC_L2
else:
raise ValueError("Metric currently not supported")
def to_readable_time(seconds: float, rounding: bool = False) -> str:
"""cast time in seconds to readable string"""
initial_seconds = seconds
hours = int(floor(seconds // 3600))
seconds -= 3600 * hours
minutes = int(floor(seconds // 60))
seconds -= 60 * minutes
if rounding:
if hours:
return f"{initial_seconds/3600:3.1f} hours"
if minutes:
return f"{initial_seconds/60:3.1f} minutes"
return f"{initial_seconds:3.1f} seconds"
time_str = ""
if hours:
time_str += f"{hours:d} hours "
if hours or minutes:
time_str += f"{minutes:d} minutes "
time_str += f"{seconds:.2f} seconds"
return time_str
|
""" Various optimization algorithms """
from typing import Callable
# pylint: disable=invalid-name
def discrete_binary_search(is_ok: Callable[[int], bool], n: int) -> int:
"""
Binary search in a function domain
Parameters
----------
is_ok : bool
Boolean monotone function defined on range(n)
n : int
length of the search scope
Returns
-------
i : int
first index i such that is_ok(i) or returns n if is_ok is all False
:complexity: O(log(n))
"""
lo = 0
hi = n
while lo < hi:
mid = lo + (hi - lo) // 2
if mid >= n or is_ok(mid):
hi = mid
else:
lo = mid + 1
return lo
|
# pylint: disable=unused-import,missing-docstring
|
""" useful functions t apply on numpy arrays """
import numpy as np
def sanitize(x):
return np.ascontiguousarray(x, dtype="float32")
def multi_array_split(array_list, nb_chunk):
total_length = len(array_list[0])
chunk_size = (total_length - 1) // nb_chunk + 1
assert all(len(x) == total_length for x in array_list)
for i in range(0, total_length, chunk_size):
yield tuple(x[i : i + chunk_size] for x in array_list)
|
"""path"""
import os
import fsspec
def make_path_absolute(path: str) -> str:
fs, p = fsspec.core.url_to_fs(path, use_listings_cache=False)
if fs.protocol == "file":
return os.path.abspath(p)
return path
def extract_partition_name_from_path(path: str) -> str:
"""Extract partition name from path"""
return path.rstrip("/").split("/")[-1]
|
""" Useful decorators for fast debuging """
import functools
import time
import logging
from contextlib import ContextDecorator
from datetime import datetime
from typing import Optional
logger = logging.getLogger("autofaiss")
class Timeit(ContextDecorator):
"""Timing class, used as a context manager"""
def __init__(self, comment: Optional[str] = None, indent: int = 0, verbose: bool = True):
self.start_time = 0
self.comment = comment
self.indent = indent
self.verbose = verbose
def __enter__(self):
if self.verbose:
if self.comment is not None:
space = "\t" * self.indent
start_date = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
logger.info(f"{space}{self.comment} {start_date}")
# flush to make sure we display log in stdout before entering in the wrapped function
for h in logger.handlers:
h.flush()
self.start_time = time.perf_counter()
def __exit__(self, *exc):
if self.verbose:
end_time = time.perf_counter()
run_time = end_time - self.start_time
space = "\t" * self.indent
logger.info(f'{space}>>> Finished "{self.comment}" in {run_time:.4f} secs')
timeit = Timeit()
def timer(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
logger.info(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
def should_run_once(func):
"""
Decorator to force a function to run only once.
The fonction raises a ValueError otherwise.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if wrapper.has_run:
raise ValueError("Can't call this function twice")
wrapper.has_run = True
return func(*args, **kwargs)
wrapper.has_run = False
return wrapper
|
""" gather functions necessary to build an index """
import logging
from typing import Dict, Optional, Tuple, Union, Callable, Any, List
import faiss
import pandas as pd
from embedding_reader import EmbeddingReader
from autofaiss.external.metadata import IndexMetadata
from autofaiss.external.optimize import check_if_index_needs_training, get_optimal_index_keys_v2, get_optimal_train_size
from autofaiss.utils.cast import cast_bytes_to_memory_string, cast_memory_to_bytes, to_readable_time
from autofaiss.utils.decorators import Timeit
from autofaiss.indices import distributed
from autofaiss.indices.index_utils import initialize_direct_map
from autofaiss.indices.training import create_and_train_new_index
from autofaiss.indices.build import add_embeddings_to_index_local
logger = logging.getLogger("autofaiss")
def estimate_memory_required_for_index_creation(
nb_vectors: int,
vec_dim: int,
index_key: Optional[str] = None,
max_index_memory_usage: Optional[str] = None,
make_direct_map: bool = False,
nb_indices_to_keep: int = 1,
) -> Tuple[int, str]:
"""
Estimates the RAM necessary to create the index
The value returned is in Bytes
"""
if index_key is None:
if max_index_memory_usage is not None:
index_key = get_optimal_index_keys_v2(
nb_vectors, vec_dim, max_index_memory_usage, make_direct_map=make_direct_map
)[0]
else:
raise ValueError("you should give max_index_memory_usage value if no index_key is given")
metadata = IndexMetadata(index_key, nb_vectors, vec_dim, make_direct_map)
index_memory = metadata.estimated_index_size_in_bytes()
needed_for_adding = min(index_memory * 0.1, 10 ** 9)
index_needs_training = check_if_index_needs_training(index_key)
if index_needs_training:
# Compute the smallest number of vectors required to train the index given
# the maximal memory constraint
nb_vectors_train = get_optimal_train_size(nb_vectors, index_key, "1K", vec_dim)
memory_for_training = metadata.compute_memory_necessary_for_training(nb_vectors_train)
else:
memory_for_training = 0
# the calculation for max_index_memory_in_one_index comes from the way we split batches
# see _batch_loader in distributed.py
max_index_memory_in_one_index = index_memory // nb_indices_to_keep + index_memory % nb_indices_to_keep
return int(max(max_index_memory_in_one_index + needed_for_adding, memory_for_training)), index_key
def get_estimated_construction_time_infos(nb_vectors: int, vec_dim: int, indent: int = 0) -> str:
"""
Gives a general approximation of the construction time of the index
"""
size = 4 * nb_vectors * vec_dim
train = 1000 # seconds, depends on the number of points for training
add = 450 * size / (150 * 1024 ** 3) # seconds, Linear approx (450s for 150GB in classic conditions)
infos = (
f"-> Train: {to_readable_time(train, rounding=True)}\n"
f"-> Add: {to_readable_time(add, rounding=True)}\n"
f"Total: {to_readable_time(train + add, rounding=True)}"
)
tab = "\t" * indent
infos = tab + infos.replace("\n", "\n" + tab)
return infos
def add_embeddings_to_index(
embedding_reader: EmbeddingReader,
trained_index_or_path: Union[str, faiss.Index],
metadata: IndexMetadata,
current_memory_available: str,
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]] = None,
distributed_engine: Optional[str] = None,
temporary_indices_folder: str = "hdfs://root/tmp/distributed_autofaiss_indices",
nb_indices_to_keep: int = 1,
index_optimizer: Callable = None,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""Add embeddings to the index"""
with Timeit("-> Adding the vectors to the index", indent=2):
# Estimate memory available for adding embeddings to index
size_per_index = metadata.estimated_index_size_in_bytes() / nb_indices_to_keep
memory_available_for_adding = cast_bytes_to_memory_string(
cast_memory_to_bytes(current_memory_available) - size_per_index
)
logger.info(
f"The memory available for adding the vectors is {memory_available_for_adding}"
"(total available - used by the index)"
)
if distributed_engine is None:
return add_embeddings_to_index_local(
embedding_reader=embedding_reader,
trained_index_or_path=trained_index_or_path,
memory_available_for_adding=memory_available_for_adding,
embedding_ids_df_handler=embedding_ids_df_handler,
index_optimizer=index_optimizer,
add_embeddings_with_ids=False,
)
elif distributed_engine == "pyspark":
return distributed.add_embeddings_to_index_distributed(
trained_index_or_path=trained_index_or_path,
embedding_reader=embedding_reader,
memory_available_for_adding=memory_available_for_adding,
embedding_ids_df_handler=embedding_ids_df_handler,
temporary_indices_folder=temporary_indices_folder,
nb_indices_to_keep=nb_indices_to_keep,
index_optimizer=index_optimizer,
)
else:
raise ValueError(f'Distributed by {distributed_engine} is not supported, only "pyspark" is supported')
def create_index(
embedding_reader: EmbeddingReader,
index_key: str,
metric_type: Union[str, int],
current_memory_available: str,
embedding_ids_df_handler: Optional[Callable[[pd.DataFrame, int], Any]] = None,
use_gpu: bool = False,
make_direct_map: bool = False,
distributed_engine: Optional[str] = None,
temporary_indices_folder: str = "hdfs://root/tmp/distributed_autofaiss_indices",
nb_indices_to_keep: int = 1,
index_optimizer: Callable = None,
) -> Tuple[Optional[faiss.Index], Optional[Dict[str, str]]]:
"""
Create an index and add embeddings to the index
"""
metadata = IndexMetadata(index_key, embedding_reader.count, embedding_reader.dimension, make_direct_map)
# Create and train index
trained_index = create_and_train_new_index(
embedding_reader, index_key, metadata, metric_type, current_memory_available, use_gpu
)
# Add embeddings to index
index, metrics = add_embeddings_to_index(
embedding_reader,
trained_index,
metadata,
current_memory_available,
embedding_ids_df_handler,
distributed_engine,
temporary_indices_folder,
nb_indices_to_keep,
index_optimizer,
)
if make_direct_map:
initialize_direct_map(index)
return index, metrics
def create_partitioned_indexes(
partitions: List[str],
output_root_dir: str,
embedding_column_name: str = "embedding",
index_key: Optional[str] = None,
index_path: Optional[str] = None,
id_columns: Optional[List[str]] = None,
should_be_memory_mappable: bool = False,
max_index_query_time_ms: float = 10.0,
max_index_memory_usage: str = "16G",
min_nearest_neighbors_to_retrieve: int = 20,
current_memory_available: str = "32G",
use_gpu: bool = False,
metric_type: str = "ip",
nb_cores: Optional[int] = None,
make_direct_map: bool = False,
temp_root_dir: str = "hdfs://root/tmp/distributed_autofaiss_indices",
big_index_threshold: int = 5_000_000,
nb_splits_per_big_index: int = 1,
maximum_nb_threads: int = 256,
) -> List[Optional[Dict[str, str]]]:
"""
Create partitioned indexes from a list of parquet partitions, i.e. create one index per parquet partition
Only supported with Pyspark. An active PySpark session must exist before calling this method
"""
return distributed.create_partitioned_indexes(
partitions=partitions,
big_index_threshold=big_index_threshold,
output_root_dir=output_root_dir,
nb_cores=nb_cores,
nb_splits_per_big_index=nb_splits_per_big_index,
id_columns=id_columns,
max_index_query_time_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
embedding_column_name=embedding_column_name,
index_key=index_key,
index_path=index_path,
max_index_memory_usage=max_index_memory_usage,
current_memory_available=current_memory_available,
use_gpu=use_gpu,
metric_type=metric_type,
make_direct_map=make_direct_map,
should_be_memory_mappable=should_be_memory_mappable,
temp_root_dir=temp_root_dir,
maximum_nb_threads=maximum_nb_threads,
)
|
""" Functions to find optimal index parameters """
import json
import logging
import re
from functools import partial, reduce
from math import floor, log2, sqrt
from operator import mul
from typing import Callable, List, Optional, TypeVar
import faiss
import fsspec
import numpy as np
from autofaiss.external.metadata import IndexMetadata, compute_memory_necessary_for_training_wrapper
from autofaiss.external.scores import compute_fast_metrics
from autofaiss.indices.index_utils import set_search_hyperparameters, speed_test_ms_per_query
from autofaiss.utils.algorithms import discrete_binary_search
from autofaiss.utils.cast import cast_memory_to_bytes
from autofaiss.utils.decorators import Timeit
from embedding_reader import EmbeddingReader
logger = logging.getLogger("autofaiss")
def check_if_index_needs_training(index_key: str) -> bool:
"""
Function that checks if the index needs to be trained
"""
if "IVF" in index_key:
return True
elif "IMI" in index_key:
return True
else:
return False
def index_key_to_nb_cluster(index_key: str) -> int:
"""
Function that takes an index key and returns the number of clusters
"""
matching = re.findall(r"IVF\d+|IMI\d+x\d+", index_key)
if matching:
# case IVF index
if re.findall(r"IVF\d+", matching[0]):
nb_clusters = int(matching[0][3:])
# case IMI index
elif re.findall(r"IMI\d+x\d+", matching[0]):
nb_clusters = 2 ** reduce(mul, [int(num) for num in re.findall(r"\d+", matching[0])])
else:
raise ValueError("Unable to determine the number of clusters for index {}".format(index_key))
else:
raise ValueError("Unable to determine the number of clusters for index {}".format(index_key))
return nb_clusters
def get_optimal_train_size(
nb_vectors: int, index_key: str, current_memory_available: Optional[str], vec_dim: Optional[int]
) -> int:
"""
Function that determines the number of training points necessary to
train the index, based on faiss heuristics for k-means clustering.
"""
matching = re.findall(r"IVF\d+|IMI\d+x\d+", index_key)
if matching:
nb_clusters = index_key_to_nb_cluster(index_key)
points_per_cluster: int = 100
# compute best possible number of vectors to give to train the index
# given memory constraints
if current_memory_available and vec_dim:
memory_per_cluster_set = compute_memory_necessary_for_training_wrapper(
points_per_cluster, index_key, vec_dim
)
size = cast_memory_to_bytes(current_memory_available)
points_per_cluster = max(min(size / memory_per_cluster_set, points_per_cluster), 31.0)
# You will need between 30 * nb_clusters and 256 * nb_clusters to train the index
train_size = min(round(points_per_cluster * nb_clusters), nb_vectors)
else:
raise ValueError(f"Unknown index type: {index_key}")
return train_size
def get_optimal_batch_size(vec_dim: int, current_memory_available: str) -> int:
"""compute optimal batch size to use the RAM at its full potential for adding vectors"""
memory = cast_memory_to_bytes(current_memory_available)
batch_size = int(min(memory, 10 ** 9) / (vec_dim * 4)) # using more than 1GB of ram is not faster here
return batch_size
def get_optimal_nb_clusters(nb_vectors: int) -> List[int]:
"""
Returns a list with the recommended number of clusters for an index containing nb_vectors vectors.
The first value is the most recommended one.
see: https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
"""
nb_clusters_list = []
if nb_vectors < 1_000_000:
# no need to use HNSW for small number of clusters
x_initial = 4 * sqrt(nb_vectors) # between 4xsqrt(n) and 16xsqrt(n)
x_closest_power = 2 ** round(log2(x_initial))
nb_clusters_list.append(round(x_closest_power))
nb_clusters_list.append(2 * x_closest_power)
nb_clusters_list.append(x_closest_power)
nb_clusters_list.append(round(x_initial))
elif nb_vectors < 10_000_000:
nb_clusters_list.append(16_384)
nb_clusters_list.append(65_536)
elif nb_vectors < 300_000_000:
nb_clusters_list.append(65_536)
nb_clusters_list.append(2 ** 17)
nb_clusters_list.append(2 ** 18) # slow training !
else:
nb_clusters_list.append(2 ** 17)
nb_clusters_list.append(2 ** 18) # slow training !
nb_clusters_list.append(65_536)
nb_clusters_list.append(2 ** 20) # very slow training !
nb_clusters_list = [int(x) for x in nb_clusters_list]
if not nb_clusters_list:
return [256] # default value
return nb_clusters_list
def get_optimal_index_keys_v2(
nb_vectors: int,
dim_vector: int,
max_index_memory_usage: str,
flat_threshold: int = 1000,
quantization_threshold: int = 10000,
force_pq: Optional[int] = None,
make_direct_map: bool = False,
should_be_memory_mappable: bool = False,
ivf_flat_threshold: int = 1_000_000,
use_gpu: bool = False,
) -> List[str]:
"""
Gives a list of interesting indices to try, *the one at the top is the most promising*
See: https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index for
detailed explanations.
"""
# Exception cases:
if nb_vectors < flat_threshold: # When less than 1000 vectors, the flat index is usually faster
return ["IVF1,Flat" if should_be_memory_mappable else "Flat"]
if nb_vectors < quantization_threshold: # quantization is not possible (>=10_000 vectors needed)
if should_be_memory_mappable:
return get_optimal_ivf(nb_vectors)
return ["HNSW15"]
if force_pq is not None: # Forced quantization
return get_optimal_quantization(nb_vectors, dim_vector, force_quantization_value=force_pq)
# General cases:
# Get max memory usage
max_size_in_bytes = cast_memory_to_bytes(max_index_memory_usage)
if not should_be_memory_mappable:
# If we can build an HNSW with the given memory constraints, it's the best
m_hnsw = int(floor((max_size_in_bytes / (4 * nb_vectors) - dim_vector) / 2))
if m_hnsw >= 8:
return [f"HNSW{min(m_hnsw, 32)}"]
if nb_vectors < ivf_flat_threshold or use_gpu:
# Try to build a not quantized IVF index
index_keys = get_optimal_ivf(nb_vectors)
index_metadata = IndexMetadata(index_keys[0], nb_vectors, dim_vector, make_direct_map)
if index_metadata.estimated_index_size_in_bytes() <= max_size_in_bytes:
return index_keys
# Otherwise, there is not enough space, let's go for quantization
return get_optimal_quantization(nb_vectors, dim_vector, force_max_index_memory_usage=max_index_memory_usage)
def get_optimal_ivf(nb_vectors: int) -> List[str]:
"""
Function that returns a list of relevant index_keys to create not quantized IVF indices.
Parameters
----------
nb_vectors: int
Number of vectors in the dataset.
"""
index_keys = []
for nb_clusters in get_optimal_nb_clusters(nb_vectors):
index_keys.append(f"IVF{nb_clusters},Flat")
return index_keys
def get_optimal_quantization(
nb_vectors: int,
dim_vector: int,
force_quantization_value: Optional[int] = None,
force_max_index_memory_usage: Optional[str] = None,
) -> List[str]:
"""
Function that returns a list of relevant index_keys to create quantized indices.
Parameters:
----------
nb_vectors: int
Number of vectors in the dataset.
dim_vector: int
Dimension of the vectors in the dataset.
force_quantization_value: Optional[int]
Force to use this value as the size of the quantized vectors (PQx).
It can be used with the force_max_index_memory_usage parameter,
but the result might be empty.
force_max_index_memory_usage: Optional[str]
Add a memory constraint on the index.
It can be used with the force_quantization_value parameter,
but the result might be empty.
Return:
-------
index_keys: List[str]
List of index_keys that would be good choices for quantization.
The list can be empty if the given constraints are too strong.
"""
# Default values
pq_values = [256, 128, 64, 48, 32, 24, 16, 8, 4]
targeted_compression_ratio = 0.0 # 0 = no constraint
# Force compression ratio if required
if force_max_index_memory_usage is not None:
total_bytes = 4.0 * nb_vectors * dim_vector # x4 because float32
max_mem_bytes = float(cast_memory_to_bytes(force_max_index_memory_usage))
targeted_compression_ratio = total_bytes / max_mem_bytes
# Force quantization value if required
if force_quantization_value is not None:
pq_values = [force_quantization_value]
# Compute optimal number of clusters
relevant_list: List[str] = []
nb_clusters_list = get_optimal_nb_clusters(nb_vectors)
# Look for matching index keys
for pq in pq_values:
if pq < dim_vector:
for nb_clusters in nb_clusters_list:
# Compute quantized vector size
# https://github.com/facebookresearch/faiss/blob/main/faiss/invlists/InvertedLists.h#L193
embedding_id_byte = 8
vector_size_byte = pq + embedding_id_byte
# Compute compression ratio with quantization PQx
compression_ratio = (4 * dim_vector) / vector_size_byte
# Add index_key if compression ratio is high enough
if compression_ratio >= targeted_compression_ratio:
# y is a multiple of pq (required)
# y <= d, with d the dimension of the input vectors (preferable)
# y <= 6*pq (preferable)
# here we choose a y slightly bigger than d to avoid losing information
# in case such as 101, 128 is better than 64 to avoid losing information
# in the linear transform
y = (min(dim_vector // pq, 6) + 1) * pq
cluster_opt = f"IVF{nb_clusters}" if nb_clusters < 1000 else f"IVF{nb_clusters}_HNSW32"
relevant_list.append(f"OPQ{pq}_{y},{cluster_opt},PQ{pq}x8")
return relevant_list
T = TypeVar("T", int, float)
def get_min_param_value_for_best_neighbors_coverage(
index: faiss.Index,
parameter_range: List[T],
hyperparameter_str_from_param: Callable[[T], str],
targeted_nb_neighbors_to_query: int,
*,
targeted_coverage: float = 0.99,
use_gpu: bool = False,
) -> T:
"""
This function returns the minimal value to set in the index hyperparameters so that,
on average, the index retrieves 99% of the requested k=targeted_nb_neighbors_to_query nearest neighbors.
1 ^ ------------------------
| /
nearest | /
neighbors | /
coverage | /
| /
0 +--[--------------------------]--> param_value
^ ^ ^
| | |
| min_param_value |
| |
min(parameter_range) max(parameter_range)
Parameters
----------
index: faiss.Index
Index to search on.
parameter_range: List[T]
List of possible values for the hyperparameter. This list is sorted.
hyperparameter_str_from_param: Callable[[T], str]
Function to generate a hyperparameter string from the hyperparameter value
on which we do a binary search.
targeted_nb_neighbors_to_query: int
Targeted number of neighbors to query.
targeted_coverage: float
Targeted nearest neighbors coverage. The average ratio of neighbors really retrived
when asking for k=targeted_nb_neighbors_to_query nearest neighbors.
use_gpu: bool
Whether the index is on the GPU.
"""
# Initialize query vectors to run the benchmark
query_vectors = index.reconstruct_n(0, min(index.ntotal, 100))
# Function to compute the coverage of the nearest neighbors
def get_nearest_neighbors_coverage(k: int) -> float:
ind = index.search(query_vectors, k)[1]
return 1 - np.sum(ind == -1) / ind.size
# Display a warning if the targeted number of nearest neighbors is incoherent with the index size
if targeted_nb_neighbors_to_query > index.ntotal:
logger.warning(
f"The targeted number of nearest neighbors ({targeted_nb_neighbors_to_query}) "
f"is greater than the total number of vectors in the index ({index.ntotal}). "
"We set the targeted number of nearest neighbors to the total number of vectors."
)
targeted_nb_neighbors_to_query = index.ntotal
# Compute the max nearest neighbors coverage possible with the given hyperparameters
param_str = hyperparameter_str_from_param(parameter_range[-1])
set_search_hyperparameters(index, param_str, use_gpu)
max_nearest_neighbors_coverage = get_nearest_neighbors_coverage(targeted_nb_neighbors_to_query)
# If the index cannot reach the targeted coverage, we adapt it.
if max_nearest_neighbors_coverage < targeted_coverage:
logger.warning(
f"The maximum nearest neighbors coverage is {100*max_nearest_neighbors_coverage:.2f}% for this index. "
f"It means that when requesting {targeted_nb_neighbors_to_query} nearest neighbors, the average number "
f"of retrieved neighbors will be {round(targeted_nb_neighbors_to_query*max_nearest_neighbors_coverage)}. "
f"The program will try to find the best hyperparameters to reach 95% of this max coverage at least, "
"and then will optimize the search time for this target. "
"The index search speed could be higher than the requested max search speed."
)
# In that case there is a hard limit on the maximal nearest neighbors coverage.
# We redefine the new targeted coverage to reach the begining of the inflexion point
#
# 1 ^ <---- Initial target: 99% coverage
# |
# nearest | ------------------------- <---- New target 0.95*max_nearest_neighbors_coverage
# neighbors | /
# coverage | /
# | /
# 0 +--[--------------------------]--> param_value
# ^ ^ ^
# | | |
# | min_param_value |
# | |
# min(parameter_range) max(parameter_range)
targeted_coverage = 0.95 * max_nearest_neighbors_coverage
# Intialize the binary search
def is_meeting_constraint(rank: int) -> bool:
parameter_value = parameter_range[rank]
param_str = hyperparameter_str_from_param(parameter_value)
set_search_hyperparameters(index, param_str, use_gpu)
nearest_neighbors_coverage = get_nearest_neighbors_coverage(targeted_nb_neighbors_to_query)
return nearest_neighbors_coverage >= targeted_coverage
# Find the min param_value that reaches the targeted coverage
best_rank = max(0, discrete_binary_search(is_meeting_constraint, len(parameter_range)) - 1)
return parameter_range[best_rank]
def binary_search_on_param(
index: faiss.Index,
parameter_range: List[T],
max_speed_ms: float, # milliseconds
hyperparameter_str_from_param: Callable[[T], str],
timeout_boost_for_precision_search: float = 6.0,
use_gpu: bool = False,
max_timeout_per_iteration_s: float = 1.0, # seconds
) -> T:
"""
Apply a binary search on a given hyperparameter to maximize the recall given
a query speed constraint in milliseconds/query.
Parameters
----------
index: faiss.Index
Index to search on.
parameter_range: List[T]
List of possible values for the hyperparameter. This list is sorted.
max_speed_ms: float
Maximum query speed in milliseconds/query.
hyperparameter_str_from_param: Callable[[T], str]
Function to generate a hyperparameter string from the hyperparameter value
on which we do a binary search.
timeout_boost_for_precision_search: float
Timeout boost for the precision search phase.
use_gpu: bool
Whether the index is on the GPU.
max_timeout_per_iteration_s: float
Maximum timeout per iteration in seconds.
"""
query_vectors = index.reconstruct_n(0, min(index.ntotal, 4000))
timout_s = 15 * max_speed_ms / 1000
get_speed = partial(
speed_test_ms_per_query, query=query_vectors, ksearch=40, timout_s=min(max_timeout_per_iteration_s, timout_s)
)
def is_not_acceptable_speed(rank: int) -> bool:
parameter_value = parameter_range[rank]
param_str = hyperparameter_str_from_param(parameter_value)
set_search_hyperparameters(index, param_str, use_gpu)
speed = get_speed(index)
return speed >= max_speed_ms
best_rank = max(0, discrete_binary_search(is_not_acceptable_speed, len(parameter_range)) - 1)
# make sure that the query time is respected by spending X more time to evaluate the query speed
decreasing_ratio = 0.95
query_vectors = index.reconstruct_n(0, min(index.ntotal, 50000))
get_speed = partial(
speed_test_ms_per_query,
query=query_vectors,
ksearch=40,
timout_s=min(max_timeout_per_iteration_s, timeout_boost_for_precision_search * timout_s),
)
while is_not_acceptable_speed(best_rank) and best_rank > 1:
best_rank -= max(1, floor((1 - decreasing_ratio) * best_rank))
best_rank = max(0, min(best_rank, len(parameter_range) - 1))
return parameter_range[best_rank]
def get_optimal_hyperparameters(
index,
index_key: str,
max_speed_ms: float, # milliseconds
use_gpu: bool = False,
max_timeout_per_iteration_s: float = 1.0, # seconds
min_ef_search: int = 32,
min_nearest_neighbors_to_retrieve: int = 20,
) -> str:
"""Find the optimal hyperparameters to maximize the recall given a query speed in milliseconds/query"""
params = [int(x) for x in re.findall(r"\d+", index_key)]
if any(re.findall(r"OPQ\d+_\d+,IVF\d+,PQ\d+", index_key)):
ht = 2048
nb_clusters = int(params[2])
hyperparameter_str_from_param = lambda nprobe: f"nprobe={nprobe},ht={ht}"
parameter_range = list(range(1, min(6144, nb_clusters) + 1))
timeout_boost_for_precision_search = 6.0
elif any(re.findall(r"OPQ\d+_\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
ht = 2048
nb_clusters = int(params[2])
hyperparameter_str_from_param = lambda nprobe: f"nprobe={nprobe},efSearch={2*nprobe},ht={ht}"
parameter_range = list(range(max(1, min_ef_search // 2), min(6144, nb_clusters) + 1))
timeout_boost_for_precision_search = 12.0
elif any(re.findall(r"HNSW\d+", index_key)):
hyperparameter_str_from_param = lambda ef_search: f"efSearch={ef_search}"
parameter_range = list(range(16, 2 ** 14))
timeout_boost_for_precision_search = 6.0
elif any(re.findall(r"IVF\d+,Flat", index_key)):
nb_clusters = int(params[0])
hyperparameter_str_from_param = lambda nprobe: f"nprobe={nprobe}"
parameter_range = list(range(1, nb_clusters + 1))
timeout_boost_for_precision_search = 6.0
elif index_key == "Flat":
return ""
else:
raise NotImplementedError(f"we don't have heuristics for that kind or index ({index_key})")
min_param_value = get_min_param_value_for_best_neighbors_coverage(
index, parameter_range, hyperparameter_str_from_param, min_nearest_neighbors_to_retrieve, use_gpu=use_gpu
)
parameter_range = [param_value for param_value in parameter_range if param_value >= min_param_value]
parameter_range = parameter_range or [min_param_value]
optimal_param = binary_search_on_param(
index,
parameter_range,
max_speed_ms,
hyperparameter_str_from_param,
timeout_boost_for_precision_search,
use_gpu,
max_timeout_per_iteration_s,
)
return hyperparameter_str_from_param(optimal_param)
def optimize_and_measure_index(
embedding_reader: EmbeddingReader,
index: faiss.Index,
index_infos_path: Optional[str],
index_key: str,
index_param: Optional[str],
index_path: Optional[str],
*,
max_index_query_time_ms: float,
min_nearest_neighbors_to_retrieve: int,
save_on_disk: bool,
use_gpu: bool,
):
"""Optimize one index by selecting the best hyperparameters and calculate its metrics"""
if index_param is None:
with Timeit(f"Computing best hyperparameters for index {index_path}", indent=1):
index_param = get_optimal_hyperparameters(
index,
index_key,
max_speed_ms=max_index_query_time_ms,
min_nearest_neighbors_to_retrieve=min_nearest_neighbors_to_retrieve,
use_gpu=use_gpu,
)
# Set search hyperparameters for the index
set_search_hyperparameters(index, index_param, use_gpu)
logger.info(f"The best hyperparameters are: {index_param}")
metric_infos = {"index_key": index_key, "index_param": index_param, "index_path": index_path}
with Timeit("Compute fast metrics", indent=1):
metric_infos.update(compute_fast_metrics(embedding_reader, index))
if save_on_disk:
with Timeit("Saving the index on local disk", indent=1):
with fsspec.open(index_path, "wb").open() as f:
faiss.write_index(index, faiss.PyCallbackIOWriter(f.write))
with fsspec.open(index_infos_path, "w").open() as f:
json.dump(metric_infos, f)
return metric_infos
|
"""
Index metadata for Faiss indices.
"""
import re
from enum import Enum
from math import ceil
from autofaiss.utils.cast import cast_bytes_to_memory_string
from autofaiss.external.descriptions import (
INDEX_DESCRIPTION_BLOCKS,
IndexBlock,
TUNABLE_PARAMETERS_DESCRIPTION_BLOCKS,
TunableParam,
)
class IndexType(Enum):
FLAT = 0
HNSW = 1
OPQ_IVF_PQ = 2
OPQ_IVF_HNSW_PQ = 3
PAD_IVF_HNSW_PQ = 4
NOT_SUPPORTED = 5
IVF_FLAT = 6
class IndexMetadata:
"""
Class to compute index metadata given the index_key, the number of vectors and their dimension.
Note: We don't create classes for each index type in order to keep the code simple.
"""
def __init__(self, index_key: str, nb_vectors: int, dim_vector: int, make_direct_map: bool = False):
self.index_key = index_key
self.nb_vectors = nb_vectors
self.dim_vector = dim_vector
self.fast_description = ""
self.description_blocs = []
self.tunable_params = []
self.params = {}
self.make_direct_map = make_direct_map
params = [int(x) for x in re.findall(r"\d+", index_key)]
if any(re.findall(r"OPQ\d+_\d+,IVF\d+,PQ\d+", index_key)):
self.index_type = IndexType.OPQ_IVF_PQ
self.fast_description = "An inverted file index (IVF) with quantization and OPQ preprocessing."
self.description_blocs = [IndexBlock.IVF, IndexBlock.PQ, IndexBlock.OPQ]
self.tunable_params = [TunableParam.NPROBE, TunableParam.HT]
self.params["pq"] = params[3]
self.params["nbits"] = params[4] if len(params) == 5 else 8 # default value
self.params["ncentroids"] = params[2]
self.params["out_d"] = params[1]
self.params["M_OPQ"] = params[0]
elif any(re.findall(r"OPQ\d+_\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
self.index_type = IndexType.OPQ_IVF_HNSW_PQ
self.fast_description = "An inverted file index (IVF) with quantization, OPQ preprocessing, and HNSW index."
self.description_blocs = [IndexBlock.IVF_HNSW, IndexBlock.HNSW, IndexBlock.PQ, IndexBlock.OPQ]
self.tunable_params = [TunableParam.NPROBE, TunableParam.EFSEARCH, TunableParam.HT]
self.params["M_HNSW"] = params[3]
self.params["pq"] = params[4]
self.params["nbits"] = params[5] if len(params) == 6 else 8 # default value
self.params["ncentroids"] = params[2]
self.params["out_d"] = params[1]
self.params["M_OPQ"] = params[0]
elif any(re.findall(r"Pad\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
self.index_type = IndexType.PAD_IVF_HNSW_PQ
self.fast_description = (
"An inverted file index (IVF) with quantization, a padding on input vectors, and HNSW index."
)
self.description_blocs = [IndexBlock.IVF_HNSW, IndexBlock.HNSW, IndexBlock.PQ, IndexBlock.PAD]
self.tunable_params = [TunableParam.NPROBE, TunableParam.EFSEARCH, TunableParam.HT]
self.params["out_d"] = params[0]
self.params["M_HNSW"] = params[2]
self.params["pq"] = params[3]
self.params["nbits"] = params[4] if len(params) == 5 else 8 # default value
self.params["ncentroids"] = params[1]
elif any(re.findall(r"HNSW\d+", index_key)):
self.index_type = IndexType.HNSW
self.fast_description = "An HNSW index."
self.description_blocs = [IndexBlock.HNSW]
self.tunable_params = [TunableParam.EFSEARCH]
self.params["M_HNSW"] = params[0]
elif any(re.findall(r"IVF\d+,Flat", index_key)):
self.index_type = IndexType.IVF_FLAT
self.fast_description = "An inverted file index (IVF) with no quantization"
self.description_blocs = [IndexBlock.IVF]
self.tunable_params = [TunableParam.NPROBE]
self.params["ncentroids"] = params[0]
elif index_key == "Flat":
self.index_type = IndexType.FLAT
self.fast_description = "A simple flat index."
self.description_blocs = [IndexBlock.FLAT]
self.tunable_params = []
else:
self.index_type = IndexType.NOT_SUPPORTED
self.fast_description = "No description for this index, feel free to contribute :)"
self.description_blocs = []
self.tunable_params = []
def get_index_type(self) -> IndexType:
"""
return the index type.
"""
return self.index_type
def estimated_index_size_in_bytes(self) -> int:
"""
Compute the estimated size of the index in bytes.
"""
if self.index_type == IndexType.FLAT:
return self.nb_vectors * self.dim_vector * 4
if self.index_type == IndexType.HNSW:
# M bidirectional links per vector in the HNSW graph
hnsw_graph_in_bytes = self.nb_vectors * self.params["M_HNSW"] * 2 * 4
vectors_size_in_bytes = self.nb_vectors * self.dim_vector * 4
return vectors_size_in_bytes + hnsw_graph_in_bytes
if self.index_type in [IndexType.OPQ_IVF_PQ, IndexType.OPQ_IVF_HNSW_PQ, IndexType.PAD_IVF_HNSW_PQ]:
direct_map_overhead = 8 * self.nb_vectors if self.make_direct_map else 0
# We neglict the size of the OPQ table for the moment.
code_size = ceil(self.params["pq"] * self.params["nbits"] / 8)
# https://github.com/facebookresearch/faiss/blob/main/faiss/invlists/InvertedLists.h#L193
embedding_id_byte = 8
vector_size_byte = code_size + embedding_id_byte
vectors_size_in_bytes = self.nb_vectors * vector_size_byte
centroid_size_in_bytes = self.params["ncentroids"] * self.dim_vector * 4
total_size_in_byte = direct_map_overhead + vectors_size_in_bytes + centroid_size_in_bytes
if self.index_type in [IndexType.OPQ_IVF_HNSW_PQ, IndexType.PAD_IVF_HNSW_PQ]:
total_size_in_byte += self.params["ncentroids"] * self.params["M_HNSW"] * 2 * 4
if self.index_type in [IndexType.OPQ_IVF_PQ, IndexType.OPQ_IVF_HNSW_PQ]:
total_size_in_byte += self.params["M_OPQ"] * self.params["out_d"] * 4
return total_size_in_byte
if self.index_type == IndexType.IVF_FLAT:
direct_map_overhead = 8 * self.nb_vectors if self.make_direct_map else 0
vectors_size_in_bytes = self.nb_vectors * self.dim_vector * 4
centroid_size_in_bytes = self.params["ncentroids"] * self.dim_vector * 4
return direct_map_overhead + vectors_size_in_bytes + centroid_size_in_bytes
return -1
def get_index_description(self, tunable_parameters_infos=False) -> str:
"""
Gives a generic description of the index.
"""
description = self.fast_description
if self.index_type == IndexType.NOT_SUPPORTED:
return description
description += "\n"
index_size_string = cast_bytes_to_memory_string(self.estimated_index_size_in_bytes())
description += f"The size of the index should be around {index_size_string}.\n\n"
description += "\n".join(INDEX_DESCRIPTION_BLOCKS[desc] for desc in self.description_blocs) + "\n\n"
if tunable_parameters_infos:
if not self.tunable_params:
description += "No parameters can be tuned to find a query speed VS recall tradeoff\n\n"
else:
description += "List of parameters that can be tuned to find a query speed VS recall tradeoff:\n"
description += (
"\n".join(TUNABLE_PARAMETERS_DESCRIPTION_BLOCKS[desc] for desc in self.tunable_params) + "\n\n"
)
description += """
For all indices except the flat index, the query speed can be adjusted.
The lower the speed limit the lower the recall. With a looser constraint
on the query time, the recall can be higher, but it is limited by the index
structure (if there is quantization for instance).
"""
return description
def compute_memory_necessary_for_training(self, nb_training_vectors: int) -> float:
"""
Function that computes the memory necessary to train an index with nb_training_vectors vectors
"""
if self.index_type == IndexType.FLAT:
return 0
elif self.index_type == IndexType.IVF_FLAT:
return self.compute_memory_necessary_for_ivf_flat(nb_training_vectors)
elif self.index_type == IndexType.HNSW:
return 0
elif self.index_type == IndexType.OPQ_IVF_PQ:
return self.compute_memory_necessary_for_opq_ivf_pq(nb_training_vectors)
elif self.index_type == IndexType.OPQ_IVF_HNSW_PQ:
return self.compute_memory_necessary_for_opq_ivf_hnsw_pq(nb_training_vectors)
elif self.index_type == IndexType.PAD_IVF_HNSW_PQ:
return self.compute_memory_necessary_for_pad_ivf_hnsw_pq(nb_training_vectors)
else:
return 500 * 10 ** 6
def compute_memory_necessary_for_ivf_flat(self, nb_training_vectors: int):
"""Compute the memory estimation for index type IVF_FLAT."""
ivf_memory_in_bytes = self._get_ivf_training_memory_usage_in_bytes()
# TODO: remove x1.5 when estimation is correct
return self._get_vectors_training_memory_usage_in_bytes(nb_training_vectors) * 1.5 + ivf_memory_in_bytes
def compute_memory_necessary_for_opq_ivf_pq(self, nb_training_vectors: int) -> float:
"""Compute the memory estimation for index type OPQ_IVF_PQ."""
# TODO: remove x1.5 when estimation is correct
return (
self._get_vectors_training_memory_usage_in_bytes(nb_training_vectors) * 1.5
+ self._get_opq_training_memory_usage_in_bytes(nb_training_vectors)
+ self._get_ivf_training_memory_usage_in_bytes()
+ self._get_pq_training_memory_usage_in_bytes()
)
def compute_memory_necessary_for_opq_ivf_hnsw_pq(self, nb_training_vectors: int) -> float:
"""Compute the memory estimation for index type OPQ_IVF_HNSW_PQ."""
# TODO: remove x1.5 when estimation is correct
return (
self._get_vectors_training_memory_usage_in_bytes(nb_training_vectors) * 1.5
+ self._get_opq_training_memory_usage_in_bytes(nb_training_vectors)
+ self._get_ivf_training_memory_usage_in_bytes()
+ self._get_ivf_hnsw_training_memory_usage_in_bytes()
+ self._get_pq_training_memory_usage_in_bytes()
)
def compute_memory_necessary_for_pad_ivf_hnsw_pq(self, nb_training_vectors: int):
"""Compute the memory estimation for index type PAD_IVF_HNSW_PQ."""
# TODO: remove x1.5 when estimation is correct
return (
self._get_vectors_training_memory_usage_in_bytes(nb_training_vectors) * 1.5
+ self._get_ivf_training_memory_usage_in_bytes()
+ self._get_ivf_hnsw_training_memory_usage_in_bytes()
+ self._get_pq_training_memory_usage_in_bytes()
)
def _get_vectors_training_memory_usage_in_bytes(self, nb_training_vectors: int):
"""Get vectors memory estimation in bytes."""
return 4.0 * self.dim_vector * nb_training_vectors
def _get_ivf_training_memory_usage_in_bytes(self):
"""Get IVF memory estimation in bytes."""
return 4.0 * self.params["ncentroids"] * self.dim_vector
def _get_ivf_hnsw_training_memory_usage_in_bytes(self):
"""Get HNSW followed by IVF memory estimation in bytes."""
hnsw_graph_in_bytes = self.params["ncentroids"] * self.params["M_HNSW"] * 2 * 4
vectors_size_in_bytes = self.params["ncentroids"] * self.dim_vector * 4
return vectors_size_in_bytes + hnsw_graph_in_bytes
def _get_opq_training_memory_usage_in_bytes(self, nb_training_vectors: int):
"""Get OPQ memory estimation in bytes."""
# see OPQMatrix::train on https://github.com/facebookresearch/faiss/blob/main/faiss/VectorTransform.cpp#L987
d_in, d_out, code_size = self.dim_vector, self.params["out_d"], self.params["M_OPQ"]
n = min(256 * 256, nb_training_vectors)
d = max(d_in, d_out)
d2 = d_out
xproj = d2 * n
pq_recons = d2 * n
xxr = d * n
tmp = d * d * 4
mem_code_size = code_size * n
opq_memory_in_bytes = (xproj + pq_recons + xxr + tmp) * 4.0 + mem_code_size * 1.0
return opq_memory_in_bytes
def _get_pq_training_memory_usage_in_bytes(self):
"""Get PQ memory estimation in bytes."""
return ceil(self.params["pq"] * self.params["nbits"] / 8)
def compute_memory_necessary_for_training_wrapper(nb_training_vectors: int, index_key: str, dim_vector: int):
# nb_vectors is useless for training memory estimation, so just put -1
return IndexMetadata(index_key, -1, dim_vector).compute_memory_necessary_for_training(
nb_training_vectors=nb_training_vectors
)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 27