alon-albalak
commited on
Commit
•
7b9dc80
1
Parent(s):
a3152aa
Update README.md
Browse files
README.md
CHANGED
@@ -30,4 +30,54 @@ Evaluated on held-out test set from XQuAD
|
|
30 |
"exact_match": 87.12546816479401,
|
31 |
"f1": 94.77703248802527,
|
32 |
"test_samples": 2307
|
33 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
"exact_match": 87.12546816479401,
|
31 |
"f1": 94.77703248802527,
|
32 |
"test_samples": 2307
|
33 |
+
```
|
34 |
+
|
35 |
+
# Usage
|
36 |
+
|
37 |
+
## In Transformers
|
38 |
+
```python
|
39 |
+
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
40 |
+
|
41 |
+
model_name = "alon-albalak/xlm-roberta-large-xquad"
|
42 |
+
|
43 |
+
# a) Get predictions
|
44 |
+
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
45 |
+
QA_input = {
|
46 |
+
'question': 'Why is model conversion important?',
|
47 |
+
'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
|
48 |
+
}
|
49 |
+
res = nlp(QA_input)
|
50 |
+
|
51 |
+
# b) Load model & tokenizer
|
52 |
+
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
53 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
54 |
+
```
|
55 |
+
|
56 |
+
## In FARM
|
57 |
+
```python
|
58 |
+
from farm.modeling.adaptive_model import AdaptiveModel
|
59 |
+
from farm.modeling.tokenization import Tokenizer
|
60 |
+
from farm.infer import QAInferencer
|
61 |
+
|
62 |
+
model_name = "alon-albalak/xlm-roberta-large-xquad"
|
63 |
+
|
64 |
+
# a) Get predictions
|
65 |
+
nlp = QAInferencer.load(model_name)
|
66 |
+
QA_input = [{"questions": ["Why is model conversion important?"],
|
67 |
+
"text": "The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks."}]
|
68 |
+
res = nlp.inference_from_dicts(dicts=QA_input, rest_api_schema=True)
|
69 |
+
|
70 |
+
# b) Load model & tokenizer
|
71 |
+
model = AdaptiveModel.convert_from_transformers(model_name, device="cpu", task_type="question_answering")
|
72 |
+
tokenizer = Tokenizer.load(model_name)
|
73 |
+
```
|
74 |
+
|
75 |
+
## In Haystack
|
76 |
+
|
77 |
+
```python
|
78 |
+
reader = FARMReader(model_name_or_path="alon-albalak/xlm-roberta-large-xquad")
|
79 |
+
# or
|
80 |
+
reader = TransformersReader(model="alon-albalak/xlm-roberta-large-xquad",tokenizer="alon-albalak/xlm-roberta-large-xquad")
|
81 |
+
```
|
82 |
+
|
83 |
+
Usage instructions for FARM and Haystack were adopted from https://huggingface.co/deepset/xlm-roberta-large-squad2
|