Update README.md
Browse files
README.md
CHANGED
|
@@ -64,8 +64,33 @@ tokenizer = AutoTokenizer.from_pretrained('DeepNeural/ner_classifier_v2')
|
|
| 64 |
model = AutoModelForTokenClassification.from_pretrained('DeepNeural/ner_classifier_v2')
|
| 65 |
|
| 66 |
```
|
|
|
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
### Framework versions
|
| 70 |
|
| 71 |
- Transformers 4.56.2
|
|
|
|
| 64 |
model = AutoModelForTokenClassification.from_pretrained('DeepNeural/ner_classifier_v2')
|
| 65 |
|
| 66 |
```
|
| 67 |
+
### Making predictions
|
| 68 |
|
| 69 |
+
1. Preparing the model
|
| 70 |
+
```python
|
| 71 |
+
#Creating an easy tags function
|
| 72 |
+
#Custom configured model needs improvement, let's train it
|
| 73 |
+
def tag_text(text, tags, model, tokenizer) -> pd.DataFrame:
|
| 74 |
+
#Get tokens with special characters
|
| 75 |
+
tokens = tokenizer(text).tokens()
|
| 76 |
+
#Encode the sequence into IDs
|
| 77 |
+
input_ids = tokenizer(text, return_tensors="pt").input_ids.to(device)
|
| 78 |
+
#Get predictions as a distribution over 7 classes
|
| 79 |
+
outputs = model(input_ids)[0]
|
| 80 |
+
#Take argmax to get most likely class per token
|
| 81 |
+
predictions = torch.argmax(outputs, dim=2)
|
| 82 |
+
#Convert to DataFrame
|
| 83 |
+
preds = [ner_tags.names[p] for p in predictions[0].cpu().numpy()]
|
| 84 |
+
return pd.DataFrame([tokens, preds], index=["Tokens", "Tags"])
|
| 85 |
+
|
| 86 |
+
```
|
| 87 |
|
| 88 |
+
2. Example for making predictions
|
| 89 |
+
```python
|
| 90 |
+
#Testing the model
|
| 91 |
+
dummy_text = "DeepNeural is an organization seeking to revolutionize healthcare"
|
| 92 |
+
tag_text(dummy_text, ner_tags, trainer.model, tokenizer)
|
| 93 |
+
```
|
| 94 |
### Framework versions
|
| 95 |
|
| 96 |
- Transformers 4.56.2
|