YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)

How to load the model and run inferences

Download all the files to a local directory model_dir

Initiate ONNX Session

from torch.onnx import export
import onnxruntime as ort
from transformers import PreTrainedTokenizerFast

session = ort.InferenceSession("w2p_bart.onnx")
tokenizer = PreTrainedTokenizerFast.from_pretrained(model_dir)

bos_token_id = tokenizer.bos_token_id
eos_token_id = tokenizer.eos_token_id

Batch Inference

def g2p_onnx_batch(text, max_len=16):
    # 1. Preprocess: tokenize and space-out characters
    words = text.strip().split()
    spaced_words = [" ".join(list(word)) for word in words]

    encoded = tokenizer(spaced_words, return_tensors="np", padding=True, truncation=True, max_length=32)
    input_ids = encoded["input_ids"]
    attention_mask = encoded["attention_mask"]

    batch_size = input_ids.shape[0]
    decoder_input_ids = np.full((batch_size, 1), bos_token_id, dtype=np.int64)

    finished = np.zeros(batch_size, dtype=bool)

    for _ in range(max_len):
        ort_inputs = {
            "input_ids": input_ids,
            "attention_mask": attention_mask,
            "decoder_input_ids": decoder_input_ids
        }
        logits = session.run(["logits"], ort_inputs)[0]
        next_token_logits = logits[:, -1, :]
        next_token_ids = np.argmax(next_token_logits, axis=-1)

        decoder_input_ids = np.concatenate([decoder_input_ids, next_token_ids[:, None]], axis=1)

        finished |= (next_token_ids == eos_token_id)
        if finished.all():
            break

    decoded = tokenizer.batch_decode(decoder_input_ids, skip_special_tokens=True)
    phonemes = [r.replace(" ", "") for r in decoded]
    return " ".join(phonemes)

Example:

result = g2p_onnx_batch("banana apple question")
print(result)

This should return:

bənˈænə ˈæpᵊl kwˈɛsʧᵊn
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support