emanuelaboros commited on
Commit
8e5dcff
·
1 Parent(s): 1d52a4b
Files changed (1) hide show
  1. generic_ner.py +2 -6
generic_ner.py CHANGED
@@ -687,6 +687,7 @@ def remove_trailing_stopwords(entities):
687
  print(f"Remained entities: {len(new_entities)}")
688
  return new_entities
689
 
 
690
  class MultitaskTokenClassificationPipeline(Pipeline):
691
 
692
  def _sanitize_parameters(self, **kwargs):
@@ -717,6 +718,7 @@ class MultitaskTokenClassificationPipeline(Pipeline):
717
  attention_mask = torch.tensor([inputs["attention_mask"]], dtype=torch.long).to(
718
  self.model.device
719
  )
 
720
  with torch.no_grad():
721
  outputs = self.model(input_ids, attention_mask)
722
  return outputs, text_sentences, text
@@ -755,12 +757,6 @@ class MultitaskTokenClassificationPipeline(Pipeline):
755
 
756
  entities[task] = get_entities(words_list, preds_list, confidence_list, text)
757
 
758
- # add titles to comp entities
759
- # from pprint import pprint
760
-
761
- # print("Before:")
762
- # pprint(entities)
763
-
764
  all_entities = []
765
  coarse_entities = []
766
  for key in entities:
 
687
  print(f"Remained entities: {len(new_entities)}")
688
  return new_entities
689
 
690
+
691
  class MultitaskTokenClassificationPipeline(Pipeline):
692
 
693
  def _sanitize_parameters(self, **kwargs):
 
718
  attention_mask = torch.tensor([inputs["attention_mask"]], dtype=torch.long).to(
719
  self.model.device
720
  )
721
+ print(f"Let's check the model: {self.model}")
722
  with torch.no_grad():
723
  outputs = self.model(input_ids, attention_mask)
724
  return outputs, text_sentences, text
 
757
 
758
  entities[task] = get_entities(words_list, preds_list, confidence_list, text)
759
 
 
 
 
 
 
 
760
  all_entities = []
761
  coarse_entities = []
762
  for key in entities: