junnei commited on
Commit
e6cb942
·
verified ·
1 Parent(s): c923988

Upload finetune_speech.py

Browse files
Files changed (1) hide show
  1. examples/finetune_speech.py +0 -89
examples/finetune_speech.py CHANGED
@@ -25,36 +25,6 @@ import soundfile as sf
25
  from datasets import Audio
26
  import random
27
 
28
- class MultipleTokenBatchStoppingCriteria(StoppingCriteria):
29
- """Stopping criteria capable of receiving multiple stop-tokens and handling batched inputs."""
30
-
31
- def __init__(self, stop_tokens: torch.LongTensor, batch_size: int = 1) -> None:
32
- """Initialize the multiple token batch stopping criteria.
33
-
34
- Args:
35
- stop_tokens: Stop-tokens.
36
- batch_size: Batch size.
37
-
38
- """
39
-
40
- self.stop_tokens = stop_tokens
41
- self.max_stop_tokens = stop_tokens.shape[-1]
42
- self.stop_tokens_idx = torch.zeros(batch_size, dtype=torch.long, device=stop_tokens.device)
43
-
44
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
45
- # Only gather the maximum number of inputs compatible with stop tokens
46
- # and checks whether generated inputs are equal to `stop_tokens`
47
- generated_inputs = torch.eq(input_ids[:, -self.max_stop_tokens :].unsqueeze(1), self.stop_tokens)
48
- equal_generated_inputs = torch.all(generated_inputs, dim=2)
49
-
50
- # Mark the position where a stop token has been produced for each input in the batch,
51
- # but only if the corresponding entry is not already set
52
- sequence_idx = torch.any(equal_generated_inputs, dim=1)
53
- sequence_set_mask = self.stop_tokens_idx == 0
54
- self.stop_tokens_idx[sequence_idx & sequence_set_mask] = input_ids.shape[-1]
55
-
56
- return torch.all(self.stop_tokens_idx)
57
-
58
  class BaseAudioDataset(Dataset):
59
  def __init__(self, processor, split, sampling_rate=16000, debug=False):
60
  self.processor = processor
@@ -581,65 +551,6 @@ def create_model(model_name_or_path, revision="main", use_flash_attention = Fals
581
 
582
  return model
583
 
584
- @torch.no_grad()
585
- def evaluate(model, processor, eval_dataset, save_path=None, disable_tqdm=False, eval_batch_size=1):
586
- model.eval()
587
- all_generated_texts = []
588
- all_labels = []
589
-
590
- eval_dataloader = torch.utils.data.DataLoader(
591
- eval_dataset,
592
- batch_size=eval_batch_size,
593
- collate_fn=covost_collate_fn,
594
- shuffle=False,
595
- drop_last=False,
596
- num_workers=8,
597
- prefetch_factor=2,
598
- pin_memory=True,
599
- )
600
- stop_tokens = [processor.tokenizer.eos_token]
601
- stop_tokens_ids = processor.tokenizer(stop_tokens, add_special_tokens=False, padding="longest", return_tensors="pt")["input_ids"]
602
- stop_tokens_ids = stop_tokens_ids.to('cuda')
603
-
604
- for inputs in tqdm(
605
- eval_dataloader, disable= disable_tqdm, desc='running eval'
606
- ):
607
- stopping_criteria=StoppingCriteriaList([MultipleTokenBatchStoppingCriteria(stop_tokens_ids, batch_size=inputs.input_ids.size(0))])
608
- inputs = inputs.to('cuda').to(model.dtype)
609
- generated_ids = model.generate(
610
- **inputs, eos_token_id=processor.tokenizer.eos_token_id, max_new_tokens=64,
611
- stopping_criteria=stopping_criteria,
612
- )
613
-
614
- stop_tokens_idx = stopping_criteria[0].stop_tokens_idx.reshape(inputs.input_ids.size(0), -1)[:, 0]
615
-
616
- stop_tokens_idx = torch.where(
617
- stop_tokens_idx > 0,
618
- stop_tokens_idx - stop_tokens_ids.shape[-1],
619
- generated_ids.shape[-1],
620
- )
621
- generated_text = [
622
- processor.decode(_pred_ids[inputs["input_ids"].shape[1] : _stop_tokens_idx], skip_special_tokens=True, clean_up_tokenization_spaces=False)
623
- for _pred_ids, _stop_tokens_idx in zip(generated_ids, stop_tokens_idx)
624
- ]
625
- all_generated_texts.extend(generated_text)
626
- labels = [processor.decode(_label_ids[_label_ids != 0]).removesuffix(ANSWER_SUFFIX) for _label_ids in inputs["labels"]]
627
- all_labels.extend(labels)
628
-
629
- assert len(all_generated_texts) == len(all_labels)
630
- bleu = sacrebleu.corpus_bleu(all_generated_texts, [all_labels])
631
- print(bleu)
632
- if save_path:
633
- with open(save_path, 'w') as f:
634
- save_dict = {
635
- 'all_generated_texts': all_generated_texts,
636
- 'all_labels': all_labels,
637
- 'score': bleu.score,
638
- }
639
- json.dump(save_dict, f)
640
-
641
- return bleu.score
642
-
643
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
644
 
645
  INSTRUCTION = {
 
25
  from datasets import Audio
26
  import random
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  class BaseAudioDataset(Dataset):
29
  def __init__(self, processor, split, sampling_rate=16000, debug=False):
30
  self.processor = processor
 
551
 
552
  return model
553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
555
 
556
  INSTRUCTION = {