Text Classification
Transformers
Safetensors
bert
memyprokotow commited on
Commit
9820043
·
verified ·
1 Parent(s): 701970f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +7 -36
README.md CHANGED
@@ -38,19 +38,12 @@ E5-EG-small (E5 EverGreen - Small) is an efficient multilingual text classificat
38
  ## How to Get Started with the Model
39
 
40
  ```python
41
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
42
  import torch
43
- import time
44
 
45
  # Load model and tokenizer
46
- model_name = "s-nlp/E5-EG-small"
47
- tokenizer = AutoTokenizer.from_pretrained(model_name)
48
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
49
-
50
- # For optimal performance, use GPU if available
51
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
52
- model = model.to(device)
53
- model.eval()
54
 
55
  # Batch classification example
56
  questions = [
@@ -58,35 +51,13 @@ questions = [
58
  "Who won the latest World Cup?",
59
  "What is the speed of light?",
60
  "What is the current Bitcoin price?"
 
 
61
  ]
62
 
63
- # Tokenize all questions
64
- inputs = tokenizer(
65
- questions,
66
- return_tensors="pt",
67
- padding=True,
68
- truncation=True,
69
- max_length=64
70
- ).to(device)
71
-
72
  # Classify
73
- start_time = time.time()
74
- with torch.no_grad():
75
- outputs = model(**inputs)
76
- predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
77
- predicted_classes = torch.argmax(predictions, dim=-1)
78
-
79
- inference_time = (time.time() - start_time) * 1000 # ms
80
-
81
- # Display results
82
- class_names = ["Immutable", "Mutable"]
83
- for i, question in enumerate(questions):
84
- print(f"Q: {question}")
85
- print(f" Classification: {class_names[predicted_classes[i].item()]}")
86
- print(f" Confidence: {predictions[i][predicted_classes[i]].item():.2f}")
87
-
88
- print(f"\nTotal inference time: {inference_time:.2f}ms")
89
- print(f"Average per question: {inference_time/len(questions):.2f}ms")
90
  ```
91
 
92
  ## Training Details
 
38
  ## How to Get Started with the Model
39
 
40
  ```python
41
+ from transformers import pipeline
42
  import torch
 
43
 
44
  # Load model and tokenizer
45
+ model_name = "s-nlp/E5-EverGreen-Multilingual-Small"
46
+ pipe = pipeline("text-classification", model_name)
 
 
 
 
 
 
47
 
48
  # Batch classification example
49
  questions = [
 
51
  "Who won the latest World Cup?",
52
  "What is the speed of light?",
53
  "What is the current Bitcoin price?"
54
+ "How old is Elon Musk",
55
+ "How old was Leo Tolstoy when he died?"
56
  ]
57
 
 
 
 
 
 
 
 
 
 
58
  # Classify
59
+ results = pipe(questions)
60
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  ```
62
 
63
  ## Training Details