Retrieval models I've trained
Collection
5 items
•
Updated
splade-ko-v1.0 is a Korean-specific SPLADE Sparse Encoder model finetuned from skt/A.X-Encoder-base using the sentence-transformers library. It maps sentences & paragraphs to a 50000-dimensional sparse vector space and can be used for semantic search and sparse retrieval.
SparseEncoder(
(0): MLMTransformer({'max_seq_length': 8192, 'do_lower_case': False, 'architecture': 'ModernBertForMaskedLM'})
(1): SpladePooling({'pooling_strategy': 'max', 'activation_function': 'relu', 'word_embedding_dimension': 50000})
)
First install the Sentence Transformers library:
pip install -U sentence-transformers
Then you can load this model and run inference.
from sentence_transformers import SparseEncoder
# Download from the 🤗 Hub
model = SparseEncoder("yjoonjang/splade-ko-v1.0")
# Run inference
sentences = [
'양이온 최적화 방법은 산소공공을 감소시키기 때문에 전자 농도가 증가하는 문제점을 갖고있을까?',
'산화물 TFT 소자 신뢰성 열화기구\n그러나 이와 같은 양이온 최적화 방법은 산소공공을 감소시키기 때문에 전자농도 역시 감소하게 되어 전계 이동도가 감소하는 문제점을 않고 있다.\n이는 산화물 반도체의 전도기구가 Percolation Conduction에 따르기 때문이다. ',
'세포대사 기능 분석을 위한 광학센서 기반 용존산소와 pH 측정 시스템의 제작 및 특성 분석\n수소이온 농도가 증가하는 경우인 가 낮아지면 다수의 수소이온들과 충돌한 방출 광이 에너지를 잃고 짧은 검출시간을 갖는다. \n반대로 가 높아질수록 형광물질로부터 방출된 광의 수명이 길어져 긴 검출시간을 가진다.',
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 50000]
# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities)
# tensor([[ 46.0239, 57.8961, 22.8014],
# [ 57.8961, 270.6235, 56.5666],
# [ 22.8014, 56.5666, 275.8828]], device='cuda:0')
I evaluated all the Korean Retrieval Benchmarks on MTEB
| Dataset | Description | Average Length (characters) |
|---|---|---|
| Ko-StrategyQA | Korean ODQA multi-hop retrieval dataset (translated from StrategyQA) | 305.15 |
| AutoRAGRetrieval | Korean document retrieval dataset constructed by parsing PDFs across 5 domains: finance, public sector, healthcare, legal, and commerce | 823.60 |
| MIRACLRetrieval | Wikipedia-based Korean document retrieval dataset | 166.63 |
| PublicHealthQA | Korean document retrieval dataset for medical and public health domains | 339.00 |
| BelebeleRetrieval | FLORES-200-based Korean document retrieval dataset | 243.11 |
| MrTidyRetrieval | Wikipedia-based Korean document retrieval dataset | 166.90 |
| MultiLongDocRetrieval | Korean long document retrieval dataset across various domains | 13,813.44 |
{
"query": "Is it unopened?",
"document": "No. It is a renewed product."
},
{
"query": "Is it compatible with iPad Air 3?",
"document": "Yes, it is possible."
}
Our evaluation uses the SparseInformationRetrievalEvaluator from the sentence-transformers library.
from sentence_transformers import SparseEncoder
from datasets import load_dataset
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
import os
import pandas as pd
from tqdm import tqdm
import json
from multiprocessing import Process, current_process
import torch
from setproctitle import setproctitle
import traceback
# GPU별로 평가할 데이터셋 매핑
DATASET_GPU_MAPPING = {
0: [
"yjoonjang/markers_bm",
"taeminlee/Ko-StrategyQA",
"facebook/belebele",
"xhluca/publichealth-qa",
"Shitao/MLDR"
],
1: [
"miracl/mmteb-miracl",
],
2: [
"mteb/mrtidy",
]
}
model_name = "yjoonjang/splade-ko-v1.0"
def evaluate_dataset(model_name, gpu_id, eval_datasets):
"""단일 GPU에서 할당된 데이터셋들을 평가하는 함수"""
import torch
try:
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
device = torch.device(f"cuda:0")
# device = torch.device(f"cuda:{str(gpu_id)}")
torch.cuda.set_device(device)
setproctitle(f"yjoonjang splade-eval-gpu{gpu_id}")
print(f"Running datasets: {eval_datasets} on GPU {gpu_id} in process {current_process().name}")
# 모델 로드
model = SparseEncoder(model_name, trust_remote_code=True, device=device)
for eval_dataset in eval_datasets:
short_dataset_name = eval_dataset.split("/")[-1]
output_dir = f"./results/{model_name}"
os.makedirs(output_dir, exist_ok=True)
prediction_filepath = f"{output_dir}/{short_dataset_name}.json"
if os.path.exists(prediction_filepath):
print(f"Skipping evaluation for {eval_dataset} as output already exists at {prediction_filepath}")
continue
corpus = {}
queries = {}
relevant_docs = {}
split = "dev"
if eval_dataset == "yjoonjang/markers_bm" or eval_dataset == "yjoonjang/squad_kor_v1":
split = "test"
if eval_dataset in ["yjoonjang/markers_bm", "taeminlee/Ko-StrategyQA"]:
dev_corpus = load_dataset(eval_dataset, "corpus", split="corpus")
dev_queries = load_dataset(eval_dataset, "queries", split="queries")
relevant_docs_data = load_dataset(eval_dataset, "default", split=split)
queries = dict(zip(dev_queries["_id"], dev_queries["text"]))
corpus = dict(zip(dev_corpus["_id"], dev_corpus["text"]))
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid_str = str(qid)
corpus_ids_str = str(corpus_ids)
if qid_str not in relevant_docs:
relevant_docs[qid_str] = set()
relevant_docs[qid_str].add(corpus_ids_str)
elif eval_dataset == "facebook/belebele":
split = "test"
ds = load_dataset(eval_dataset, "kor_Hang", split=split)
corpus_df = pd.DataFrame(ds)
corpus_df = corpus_df.drop_duplicates(subset=["link"])
corpus_df["cid"] = [f"C{i}" for i in range(len(corpus_df))]
corpus = dict(zip(corpus_df["cid"], corpus_df["flores_passage"]))
link_to_cid = dict(zip(corpus_df["link"], corpus_df["cid"]))
queries_df = pd.DataFrame(ds)
queries_df = queries_df.drop_duplicates(subset=["question"])
queries_df["qid"] = [f"Q{i}" for i in range(len(queries_df))]
queries = dict(zip(queries_df["qid"], queries_df["question"]))
question_to_qid = dict(zip(queries_df["question"], queries_df["qid"]))
for row in tqdm(ds, desc="Processing belebele"):
qid = question_to_qid[row["question"]]
cid = link_to_cid[row["link"]]
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(cid)
elif eval_dataset == "jinaai/xpqa":
split = "test"
ds = load_dataset(eval_dataset, "ko", split=split, trust_remote_code=True)
corpus_df = pd.DataFrame(ds)
corpus_df = corpus_df.drop_duplicates(subset=["answer"])
corpus_df["cid"] = [f"C{i}" for i in range(len(corpus_df))]
corpus = dict(zip(corpus_df["cid"], corpus_df["answer"]))
answer_to_cid = dict(zip(corpus_df["answer"], corpus_df["cid"]))
queries_df = pd.DataFrame(ds)
queries_df = queries_df.drop_duplicates(subset=["question"])
queries_df["qid"] = [f"Q{i}" for i in range(len(queries_df))]
queries = dict(zip(queries_df["qid"], queries_df["question"]))
question_to_qid = dict(zip(queries_df["question"], queries_df["qid"]))
for row in tqdm(ds, desc="Processing xpqa"):
qid = question_to_qid[row["question"]]
cid = answer_to_cid[row["answer"]]
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(cid)
elif eval_dataset == "miracl/mmteb-miracl":
split = "dev"
corpus_ds = load_dataset(eval_dataset, "corpus-ko", split="corpus")
queries_ds = load_dataset(eval_dataset, "queries-ko", split="queries")
qrels_ds = load_dataset(eval_dataset, "ko", split=split)
corpus = {row['docid']: row['text'] for row in corpus_ds}
queries = {row['query_id']: row['query'] for row in queries_ds}
for row in qrels_ds:
qid = row["query_id"]
cid = row["docid"]
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(cid)
elif eval_dataset == "mteb/mrtidy":
split = "test"
corpus_ds = load_dataset(eval_dataset, "korean-corpus", split="train", trust_remote_code=True)
queries_ds = load_dataset(eval_dataset, "korean-queries", split=split, trust_remote_code=True)
qrels_ds = load_dataset(eval_dataset, "korean-qrels", split=split, trust_remote_code=True)
corpus = {row['_id']: row['text'] for row in corpus_ds}
queries = {row['_id']: row['text'] for row in queries_ds}
for row in qrels_ds:
qid = str(row["query-id"])
cid = str(row["corpus-id"])
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(cid)
elif eval_dataset == "Shitao/MLDR":
split = "dev"
corpus_ds = load_dataset(eval_dataset, "corpus-ko", split="corpus")
lang_data = load_dataset(eval_dataset, "ko", split=split)
corpus = {row['docid']: row['text'] for row in corpus_ds}
queries = {row['query_id']: row['query'] for row in lang_data}
for row in lang_data:
qid = row["query_id"]
cid = row["positive_passages"][0]["docid"]
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(cid)
elif eval_dataset == "xhluca/publichealth-qa":
split = "test"
ds = load_dataset(eval_dataset, "korean", split=split)
ds = ds.filter(lambda x: x["question"] is not None and x["answer"] is not None)
corpus_df = pd.DataFrame(list(ds))
corpus_df = corpus_df.drop_duplicates(subset=["answer"])
corpus_df["cid"] = [f"D{i}" for i in range(len(corpus_df))]
corpus = dict(zip(corpus_df["cid"], corpus_df["answer"]))
answer_to_cid = dict(zip(corpus_df["answer"], corpus_df["cid"]))
queries_df = pd.DataFrame(list(ds))
queries_df = queries_df.drop_duplicates(subset=["question"])
queries_df["qid"] = [f"Q{i}" for i in range(len(queries_df))]
queries = dict(zip(queries_df["qid"], queries_df["question"]))
question_to_qid = dict(zip(queries_df["question"], queries_df["qid"]))
for row in tqdm(ds, desc="Processing publichealth-qa"):
qid = question_to_qid[row["question"]]
cid = answer_to_cid[row["answer"]]
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(cid)
else:
continue
evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
write_csv=False,
name=f"{eval_dataset}",
show_progress_bar=True,
batch_size=32,
write_predictions=False
)
short_dataset_name = eval_dataset.split("/")[-1]
output_filepath = f"./results/{model_name}"
metrics = evaluator(model)
print(f"GPU {gpu_id} - {eval_dataset} metrics: {metrics}")
with open(f"{output_filepath}/{short_dataset_name}.json", "w", encoding="utf-8") as f:
json.dump(metrics, f, ensure_ascii=False, indent=2)
except Exception as ex:
print(f"Error on GPU {gpu_id}: {ex}")
traceback.print_exc()
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
print(f"Starting evaluation for model: {model_name}")
processes = []
for gpu_id, datasets in DATASET_GPU_MAPPING.items():
p = Process(target=evaluate_dataset, args=(model_name, gpu_id, datasets))
p.start()
processes.append(p)
for p in processes:
p.join()
print(f"Completed evaluation for model: {model_name}")
| Model | Parameters | Recall@10 | NDCG@10 | MRR@10 | AVG_Query_Active_Dims | AVG_Corpus_Active_Dims |
|---|---|---|---|---|---|---|
| yjoonjang/splade-ko-v1.0 | 0.1B | 0.7626 | 0.7037 | 0.7379 | 110.7664 | 778.6494 |
| telepix/PIXIE-Splade-Preview | 0.1B | 0.7382 | 0.6869 | 0.7204 | 108.3300 | 718.5110 |
| opensearch-project/opensearch-neural-sparse-encoding-multilingual-v1 | 0.1B | 0.5900 | 0.5137 | 0.5455 | 27.8722 | 177.5564 |
eval_strategy: stepsper_device_train_batch_size: 4per_device_eval_batch_size: 2learning_rate: 2e-05num_train_epochs: 2warmup_ratio: 0.1bf16: Truenegs_per_query: 6 (from our dataset)gather_device: True (Makes samples available to be shared across devices)overwrite_output_dir: Falsedo_predict: Falseeval_strategy: stepsprediction_loss_only: Trueper_device_train_batch_size: 4per_device_eval_batch_size: 2per_gpu_train_batch_size: Noneper_gpu_eval_batch_size: Nonegradient_accumulation_steps: 1eval_accumulation_steps: Nonetorch_empty_cache_steps: Nonelearning_rate: 2e-05weight_decay: 0.0adam_beta1: 0.9adam_beta2: 0.999adam_epsilon: 1e-08max_grad_norm: 1.0num_train_epochs: 2max_steps: -1lr_scheduler_type: linearlr_scheduler_kwargs: {}warmup_ratio: 0.1warmup_steps: 0log_level: passivelog_level_replica: warninglog_on_each_node: Truelogging_nan_inf_filter: Truesave_safetensors: Truesave_on_each_node: Falsesave_only_model: Falserestore_callback_states_from_checkpoint: Falseno_cuda: Falseuse_cpu: Falseuse_mps_device: Falseseed: 42data_seed: Nonejit_mode_eval: Falseuse_ipex: Falsebf16: Truefp16: Falsefp16_opt_level: O1half_precision_backend: autobf16_full_eval: Falsefp16_full_eval: Falsetf32: Nonelocal_rank: 7ddp_backend: Nonetpu_num_cores: Nonetpu_metrics_debug: Falsedebug: []dataloader_drop_last: Truedataloader_num_workers: 0dataloader_prefetch_factor: Nonepast_index: -1disable_tqdm: Falseremove_unused_columns: Truelabel_names: Noneload_best_model_at_end: Falseignore_data_skip: Falsefsdp: []fsdp_min_num_params: 0fsdp_config: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}fsdp_transformer_layer_cls_to_wrap: Noneaccelerator_config: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}parallelism_config: Nonedeepspeed: Nonelabel_smoothing_factor: 0.0optim: adamw_torch_fusedoptim_args: Noneadafactor: Falsegroup_by_length: Falselength_column_name: lengthddp_find_unused_parameters: Noneddp_bucket_cap_mb: Noneddp_broadcast_buffers: Falsedataloader_pin_memory: Truedataloader_persistent_workers: Falseskip_memory_metrics: Trueuse_legacy_prediction_loop: Falsepush_to_hub: Falseresume_from_checkpoint: Nonehub_model_id: Nonehub_strategy: every_savehub_private_repo: Nonehub_always_push: Falsehub_revision: Nonegradient_checkpointing: Falsegradient_checkpointing_kwargs: Noneinclude_inputs_for_metrics: Falseinclude_for_metrics: []eval_do_concat_batches: Truefp16_backend: autopush_to_hub_model_id: Nonepush_to_hub_organization: Nonemp_parameters: auto_find_batch_size: Falsefull_determinism: Falsetorchdynamo: Noneray_scope: lastddp_timeout: 1800torch_compile: Falsetorch_compile_backend: Nonetorch_compile_mode: Noneinclude_tokens_per_second: Falseinclude_num_input_tokens_seen: Falseneftune_noise_alpha: Noneoptim_target_modules: Nonebatch_eval_metrics: Falseeval_on_start: Falseuse_liger_kernel: Falseliger_kernel_config: Noneeval_use_gather_object: Falseaverage_tokens_across_devices: Trueprompts: Nonebatch_sampler: batch_samplermulti_dataset_batch_sampler: proportionalrouter_mapping: {}learning_rate_mapping: {}@inproceedings{reimers-2019-sentence-bert,
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2019",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/1908.10084",
}
@misc{formal2022distillationhardnegativesampling,
title={From Distillation to Hard Negative Sampling: Making Sparse Neural IR Models More Effective},
author={Thibault Formal and Carlos Lassance and Benjamin Piwowarski and Stéphane Clinchant},
year={2022},
eprint={2205.04733},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2205.04733},
}
@misc{henderson2017efficient,
title={Efficient Natural Language Response Suggestion for Smart Reply},
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
year={2017},
eprint={1705.00652},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{'o}czos, Barnab{'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
Base model
skt/A.X-Encoder-base