model update
Browse files- README.md +268 -0
 - analogy.bidirection.json +1 -0
 - analogy.forward.json +1 -0
 - analogy.reverse.json +1 -0
 - classification.json +1 -0
 - config.json +31 -0
 - finetuning_config.json +24 -0
 - merges.txt +0 -0
 - pytorch_model.bin +3 -0
 - relation_mapping.json +0 -0
 - special_tokens_map.json +15 -0
 - tokenizer.json +0 -0
 - tokenizer_config.json +16 -0
 - vocab.json +0 -0
 
    	
        README.md
    ADDED
    
    | 
         @@ -0,0 +1,268 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            ---
         
     | 
| 2 | 
         
            +
            datasets:
         
     | 
| 3 | 
         
            +
            - relbert/semeval2012_relational_similarity
         
     | 
| 4 | 
         
            +
            model-index:
         
     | 
| 5 | 
         
            +
            - name: relbert/relbert-roberta-base-nce-a-semeval2012-average
         
     | 
| 6 | 
         
            +
              results:
         
     | 
| 7 | 
         
            +
              - task:
         
     | 
| 8 | 
         
            +
                  name: Relation Mapping
         
     | 
| 9 | 
         
            +
                  type: sorting-task
         
     | 
| 10 | 
         
            +
                dataset:
         
     | 
| 11 | 
         
            +
                  name: Relation Mapping
         
     | 
| 12 | 
         
            +
                  args: relbert/relation_mapping
         
     | 
| 13 | 
         
            +
                  type: relation-mapping
         
     | 
| 14 | 
         
            +
                metrics:
         
     | 
| 15 | 
         
            +
                - name: Accuracy
         
     | 
| 16 | 
         
            +
                  type: accuracy
         
     | 
| 17 | 
         
            +
                  value: 0.8007341269841269
         
     | 
| 18 | 
         
            +
              - task:
         
     | 
| 19 | 
         
            +
                  name: Analogy Questions (SAT full)
         
     | 
| 20 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 21 | 
         
            +
                dataset:
         
     | 
| 22 | 
         
            +
                  name: SAT full
         
     | 
| 23 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 24 | 
         
            +
                  type: analogy-questions
         
     | 
| 25 | 
         
            +
                metrics:
         
     | 
| 26 | 
         
            +
                - name: Accuracy
         
     | 
| 27 | 
         
            +
                  type: accuracy
         
     | 
| 28 | 
         
            +
                  value: 0.5909090909090909
         
     | 
| 29 | 
         
            +
              - task:
         
     | 
| 30 | 
         
            +
                  name: Analogy Questions (SAT)
         
     | 
| 31 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 32 | 
         
            +
                dataset:
         
     | 
| 33 | 
         
            +
                  name: SAT
         
     | 
| 34 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 35 | 
         
            +
                  type: analogy-questions
         
     | 
| 36 | 
         
            +
                metrics:
         
     | 
| 37 | 
         
            +
                - name: Accuracy
         
     | 
| 38 | 
         
            +
                  type: accuracy
         
     | 
| 39 | 
         
            +
                  value: 0.599406528189911
         
     | 
| 40 | 
         
            +
              - task:
         
     | 
| 41 | 
         
            +
                  name: Analogy Questions (BATS)
         
     | 
| 42 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 43 | 
         
            +
                dataset:
         
     | 
| 44 | 
         
            +
                  name: BATS
         
     | 
| 45 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 46 | 
         
            +
                  type: analogy-questions
         
     | 
| 47 | 
         
            +
                metrics:
         
     | 
| 48 | 
         
            +
                - name: Accuracy
         
     | 
| 49 | 
         
            +
                  type: accuracy
         
     | 
| 50 | 
         
            +
                  value: 0.6864924958310172
         
     | 
| 51 | 
         
            +
              - task:
         
     | 
| 52 | 
         
            +
                  name: Analogy Questions (Google)
         
     | 
| 53 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 54 | 
         
            +
                dataset:
         
     | 
| 55 | 
         
            +
                  name: Google
         
     | 
| 56 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 57 | 
         
            +
                  type: analogy-questions
         
     | 
| 58 | 
         
            +
                metrics:
         
     | 
| 59 | 
         
            +
                - name: Accuracy
         
     | 
| 60 | 
         
            +
                  type: accuracy
         
     | 
| 61 | 
         
            +
                  value: 0.88
         
     | 
| 62 | 
         
            +
              - task:
         
     | 
| 63 | 
         
            +
                  name: Analogy Questions (U2)
         
     | 
| 64 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 65 | 
         
            +
                dataset:
         
     | 
| 66 | 
         
            +
                  name: U2
         
     | 
| 67 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 68 | 
         
            +
                  type: analogy-questions
         
     | 
| 69 | 
         
            +
                metrics:
         
     | 
| 70 | 
         
            +
                - name: Accuracy
         
     | 
| 71 | 
         
            +
                  type: accuracy
         
     | 
| 72 | 
         
            +
                  value: 0.5570175438596491
         
     | 
| 73 | 
         
            +
              - task:
         
     | 
| 74 | 
         
            +
                  name: Analogy Questions (U4)
         
     | 
| 75 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 76 | 
         
            +
                dataset:
         
     | 
| 77 | 
         
            +
                  name: U4
         
     | 
| 78 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 79 | 
         
            +
                  type: analogy-questions
         
     | 
| 80 | 
         
            +
                metrics:
         
     | 
| 81 | 
         
            +
                - name: Accuracy
         
     | 
| 82 | 
         
            +
                  type: accuracy
         
     | 
| 83 | 
         
            +
                  value: 0.5625
         
     | 
| 84 | 
         
            +
              - task:
         
     | 
| 85 | 
         
            +
                  name: Analogy Questions (ConceptNet Analogy)
         
     | 
| 86 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 87 | 
         
            +
                dataset:
         
     | 
| 88 | 
         
            +
                  name: ConceptNet Analogy
         
     | 
| 89 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 90 | 
         
            +
                  type: analogy-questions
         
     | 
| 91 | 
         
            +
                metrics:
         
     | 
| 92 | 
         
            +
                - name: Accuracy
         
     | 
| 93 | 
         
            +
                  type: accuracy
         
     | 
| 94 | 
         
            +
                  value: 0.39429530201342283
         
     | 
| 95 | 
         
            +
              - task:
         
     | 
| 96 | 
         
            +
                  name: Analogy Questions (TREX Analogy)
         
     | 
| 97 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 98 | 
         
            +
                dataset:
         
     | 
| 99 | 
         
            +
                  name: TREX Analogy
         
     | 
| 100 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 101 | 
         
            +
                  type: analogy-questions
         
     | 
| 102 | 
         
            +
                metrics:
         
     | 
| 103 | 
         
            +
                - name: Accuracy
         
     | 
| 104 | 
         
            +
                  type: accuracy
         
     | 
| 105 | 
         
            +
                  value: 0.6557377049180327
         
     | 
| 106 | 
         
            +
              - task:
         
     | 
| 107 | 
         
            +
                  name: Analogy Questions (NELL-ONE Analogy)
         
     | 
| 108 | 
         
            +
                  type: multiple-choice-qa
         
     | 
| 109 | 
         
            +
                dataset:
         
     | 
| 110 | 
         
            +
                  name: NELL-ONE Analogy
         
     | 
| 111 | 
         
            +
                  args: relbert/analogy_questions
         
     | 
| 112 | 
         
            +
                  type: analogy-questions
         
     | 
| 113 | 
         
            +
                metrics:
         
     | 
| 114 | 
         
            +
                - name: Accuracy
         
     | 
| 115 | 
         
            +
                  type: accuracy
         
     | 
| 116 | 
         
            +
                  value: 0.6383333333333333
         
     | 
| 117 | 
         
            +
              - task:
         
     | 
| 118 | 
         
            +
                  name: Lexical Relation Classification (BLESS)
         
     | 
| 119 | 
         
            +
                  type: classification
         
     | 
| 120 | 
         
            +
                dataset:
         
     | 
| 121 | 
         
            +
                  name: BLESS
         
     | 
| 122 | 
         
            +
                  args: relbert/lexical_relation_classification
         
     | 
| 123 | 
         
            +
                  type: relation-classification
         
     | 
| 124 | 
         
            +
                metrics:
         
     | 
| 125 | 
         
            +
                - name: F1
         
     | 
| 126 | 
         
            +
                  type: f1
         
     | 
| 127 | 
         
            +
                  value: 0.8998041283712521
         
     | 
| 128 | 
         
            +
                - name: F1 (macro)
         
     | 
| 129 | 
         
            +
                  type: f1_macro
         
     | 
| 130 | 
         
            +
                  value: 0.8977934264238211
         
     | 
| 131 | 
         
            +
              - task:
         
     | 
| 132 | 
         
            +
                  name: Lexical Relation Classification (CogALexV)
         
     | 
| 133 | 
         
            +
                  type: classification
         
     | 
| 134 | 
         
            +
                dataset:
         
     | 
| 135 | 
         
            +
                  name: CogALexV
         
     | 
| 136 | 
         
            +
                  args: relbert/lexical_relation_classification
         
     | 
| 137 | 
         
            +
                  type: relation-classification
         
     | 
| 138 | 
         
            +
                metrics:
         
     | 
| 139 | 
         
            +
                - name: F1
         
     | 
| 140 | 
         
            +
                  type: f1
         
     | 
| 141 | 
         
            +
                  value: 0.8272300469483568
         
     | 
| 142 | 
         
            +
                - name: F1 (macro)
         
     | 
| 143 | 
         
            +
                  type: f1_macro
         
     | 
| 144 | 
         
            +
                  value: 0.6397137935143544
         
     | 
| 145 | 
         
            +
              - task:
         
     | 
| 146 | 
         
            +
                  name: Lexical Relation Classification (EVALution)
         
     | 
| 147 | 
         
            +
                  type: classification
         
     | 
| 148 | 
         
            +
                dataset:
         
     | 
| 149 | 
         
            +
                  name: BLESS
         
     | 
| 150 | 
         
            +
                  args: relbert/lexical_relation_classification
         
     | 
| 151 | 
         
            +
                  type: relation-classification
         
     | 
| 152 | 
         
            +
                metrics:
         
     | 
| 153 | 
         
            +
                - name: F1
         
     | 
| 154 | 
         
            +
                  type: f1
         
     | 
| 155 | 
         
            +
                  value: 0.6462621885157096
         
     | 
| 156 | 
         
            +
                - name: F1 (macro)
         
     | 
| 157 | 
         
            +
                  type: f1_macro
         
     | 
| 158 | 
         
            +
                  value: 0.6446245083980608
         
     | 
| 159 | 
         
            +
              - task:
         
     | 
| 160 | 
         
            +
                  name: Lexical Relation Classification (K&H+N)
         
     | 
| 161 | 
         
            +
                  type: classification
         
     | 
| 162 | 
         
            +
                dataset:
         
     | 
| 163 | 
         
            +
                  name: K&H+N
         
     | 
| 164 | 
         
            +
                  args: relbert/lexical_relation_classification
         
     | 
| 165 | 
         
            +
                  type: relation-classification
         
     | 
| 166 | 
         
            +
                metrics:
         
     | 
| 167 | 
         
            +
                - name: F1
         
     | 
| 168 | 
         
            +
                  type: f1
         
     | 
| 169 | 
         
            +
                  value: 0.9412951241566391
         
     | 
| 170 | 
         
            +
                - name: F1 (macro)
         
     | 
| 171 | 
         
            +
                  type: f1_macro
         
     | 
| 172 | 
         
            +
                  value: 0.8510085443796092
         
     | 
| 173 | 
         
            +
              - task:
         
     | 
| 174 | 
         
            +
                  name: Lexical Relation Classification (ROOT09)
         
     | 
| 175 | 
         
            +
                  type: classification
         
     | 
| 176 | 
         
            +
                dataset:
         
     | 
| 177 | 
         
            +
                  name: ROOT09
         
     | 
| 178 | 
         
            +
                  args: relbert/lexical_relation_classification
         
     | 
| 179 | 
         
            +
                  type: relation-classification
         
     | 
| 180 | 
         
            +
                metrics:
         
     | 
| 181 | 
         
            +
                - name: F1
         
     | 
| 182 | 
         
            +
                  type: f1
         
     | 
| 183 | 
         
            +
                  value: 0.8752742087120025
         
     | 
| 184 | 
         
            +
                - name: F1 (macro)
         
     | 
| 185 | 
         
            +
                  type: f1_macro
         
     | 
| 186 | 
         
            +
                  value: 0.8731443734393283
         
     | 
| 187 | 
         
            +
             
     | 
| 188 | 
         
            +
            ---
         
     | 
| 189 | 
         
            +
            # relbert/relbert-roberta-base-nce-a-semeval2012-average
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
            RelBERT based on [roberta-base](https://huggingface.co/roberta-base) fine-tuned on [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity) (see the [`relbert`](https://github.com/asahi417/relbert) for more detail of fine-tuning).
         
     | 
| 192 | 
         
            +
            This model achieves the following results on the relation understanding tasks:
         
     | 
| 193 | 
         
            +
            - Analogy Question ([dataset](https://huggingface.co/datasets/relbert/analogy_questions), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-a-semeval2012-average/raw/main/analogy.forward.json)):
         
     | 
| 194 | 
         
            +
                - Accuracy on SAT (full): 0.5909090909090909 
         
     | 
| 195 | 
         
            +
                - Accuracy on SAT: 0.599406528189911
         
     | 
| 196 | 
         
            +
                - Accuracy on BATS: 0.6864924958310172
         
     | 
| 197 | 
         
            +
                - Accuracy on U2: 0.5570175438596491
         
     | 
| 198 | 
         
            +
                - Accuracy on U4: 0.5625
         
     | 
| 199 | 
         
            +
                - Accuracy on Google: 0.88
         
     | 
| 200 | 
         
            +
                - Accuracy on ConceptNet Analogy: 0.39429530201342283
         
     | 
| 201 | 
         
            +
                - Accuracy on T-Rex Analogy: 0.6557377049180327
         
     | 
| 202 | 
         
            +
                - Accuracy on NELL-ONE Analogy: 0.6383333333333333
         
     | 
| 203 | 
         
            +
            - Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-a-semeval2012-average/raw/main/classification.json)):
         
     | 
| 204 | 
         
            +
                - Micro F1 score on BLESS: 0.8998041283712521
         
     | 
| 205 | 
         
            +
                - Micro F1 score on CogALexV: 0.8272300469483568
         
     | 
| 206 | 
         
            +
                - Micro F1 score on EVALution: 0.6462621885157096
         
     | 
| 207 | 
         
            +
                - Micro F1 score on K&H+N: 0.9412951241566391
         
     | 
| 208 | 
         
            +
                - Micro F1 score on ROOT09: 0.8752742087120025
         
     | 
| 209 | 
         
            +
            - Relation Mapping ([dataset](https://huggingface.co/datasets/relbert/relation_mapping), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-a-semeval2012-average/raw/main/relation_mapping.json)):
         
     | 
| 210 | 
         
            +
                - Accuracy on Relation Mapping: 0.8007341269841269 
         
     | 
| 211 | 
         
            +
             
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
            ### Usage
         
     | 
| 214 | 
         
            +
            This model can be used through the [relbert library](https://github.com/asahi417/relbert). Install the library via pip   
         
     | 
| 215 | 
         
            +
            ```shell
         
     | 
| 216 | 
         
            +
            pip install relbert
         
     | 
| 217 | 
         
            +
            ```
         
     | 
| 218 | 
         
            +
            and activate model as below.
         
     | 
| 219 | 
         
            +
            ```python
         
     | 
| 220 | 
         
            +
            from relbert import RelBERT
         
     | 
| 221 | 
         
            +
            model = RelBERT("relbert/relbert-roberta-base-nce-a-semeval2012-average")
         
     | 
| 222 | 
         
            +
            vector = model.get_embedding(['Tokyo', 'Japan'])  # shape of (n_dim, )
         
     | 
| 223 | 
         
            +
            ```
         
     | 
| 224 | 
         
            +
             
     | 
| 225 | 
         
            +
            ### Training hyperparameters
         
     | 
| 226 | 
         
            +
             
     | 
| 227 | 
         
            +
             - model: roberta-base
         
     | 
| 228 | 
         
            +
             - max_length: 64
         
     | 
| 229 | 
         
            +
             - epoch: 10
         
     | 
| 230 | 
         
            +
             - batch: 32
         
     | 
| 231 | 
         
            +
             - random_seed: 0
         
     | 
| 232 | 
         
            +
             - lr: 5e-06
         
     | 
| 233 | 
         
            +
             - lr_warmup: 10
         
     | 
| 234 | 
         
            +
             - aggregation_mode: average
         
     | 
| 235 | 
         
            +
             - data: relbert/semeval2012_relational_similarity
         
     | 
| 236 | 
         
            +
             - data_name: None
         
     | 
| 237 | 
         
            +
             - exclude_relation: None
         
     | 
| 238 | 
         
            +
             - split: train
         
     | 
| 239 | 
         
            +
             - split_valid: validation
         
     | 
| 240 | 
         
            +
             - loss_function: nce
         
     | 
| 241 | 
         
            +
             - classification_loss: False
         
     | 
| 242 | 
         
            +
             - loss_function_config: {'temperature': 0.05, 'num_negative': 400, 'num_positive': 10}
         
     | 
| 243 | 
         
            +
             - augment_negative_by_positive: True
         
     | 
| 244 | 
         
            +
             
     | 
| 245 | 
         
            +
            See the full configuration at [config file](https://huggingface.co/relbert/relbert-roberta-base-nce-a-semeval2012-average/raw/main/finetuning_config.json).
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
            ### Reference
         
     | 
| 248 | 
         
            +
            If you use any resource from RelBERT, please consider to cite our [paper](https://aclanthology.org/2021.emnlp-main.712/).
         
     | 
| 249 | 
         
            +
             
     | 
| 250 | 
         
            +
            ```
         
     | 
| 251 | 
         
            +
             
     | 
| 252 | 
         
            +
            @inproceedings{ushio-etal-2021-distilling,
         
     | 
| 253 | 
         
            +
                title = "Distilling Relation Embeddings from Pretrained Language Models",
         
     | 
| 254 | 
         
            +
                author = "Ushio, Asahi  and
         
     | 
| 255 | 
         
            +
                  Camacho-Collados, Jose  and
         
     | 
| 256 | 
         
            +
                  Schockaert, Steven",
         
     | 
| 257 | 
         
            +
                booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
         
     | 
| 258 | 
         
            +
                month = nov,
         
     | 
| 259 | 
         
            +
                year = "2021",
         
     | 
| 260 | 
         
            +
                address = "Online and Punta Cana, Dominican Republic",
         
     | 
| 261 | 
         
            +
                publisher = "Association for Computational Linguistics",
         
     | 
| 262 | 
         
            +
                url = "https://aclanthology.org/2021.emnlp-main.712",
         
     | 
| 263 | 
         
            +
                doi = "10.18653/v1/2021.emnlp-main.712",
         
     | 
| 264 | 
         
            +
                pages = "9044--9062",
         
     | 
| 265 | 
         
            +
                abstract = "Pre-trained language models have been found to capture a surprisingly rich amount of lexical knowledge, ranging from commonsense properties of everyday concepts to detailed factual knowledge about named entities. Among others, this makes it possible to distill high-quality word vectors from pre-trained language models. However, it is currently unclear to what extent it is possible to distill relation embeddings, i.e. vectors that characterize the relationship between two words. Such relation embeddings are appealing because they can, in principle, encode relational knowledge in a more fine-grained way than is possible with knowledge graphs. To obtain relation embeddings from a pre-trained language model, we encode word pairs using a (manually or automatically generated) prompt, and we fine-tune the language model such that relationally similar word pairs yield similar output vectors. We find that the resulting relation embeddings are highly competitive on analogy (unsupervised) and relation classification (supervised) benchmarks, even without any task-specific fine-tuning. Source code to reproduce our experimental results and the model checkpoints are available in the following repository: https://github.com/asahi417/relbert",
         
     | 
| 266 | 
         
            +
            }
         
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
            ```
         
     | 
    	
        analogy.bidirection.json
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {"scan/test": 0.28094059405940597, "sat_full/test": 0.5802139037433155, "sat/test": 0.5756676557863502, "u2/test": 0.5701754385964912, "u4/test": 0.5810185185185185, "google/test": 0.906, "bats/test": 0.6998332406892718, "t_rex_relational_similarity/test": 0.6775956284153005, "conceptnet_relational_similarity/test": 0.41023489932885904, "nell_relational_similarity/test": 0.7283333333333334, "scan/validation": 0.29213483146067415, "sat/validation": 0.6216216216216216, "u2/validation": 0.5416666666666666, "u4/validation": 0.625, "google/validation": 0.94, "bats/validation": 0.7487437185929648, "semeval2012_relational_similarity/validation": 0.7468354430379747, "t_rex_relational_similarity/validation": 0.26411290322580644, "conceptnet_relational_similarity/validation": 0.32194244604316546, "nell_relational_similarity/validation": 0.5725}
         
     | 
    	
        analogy.forward.json
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {"semeval2012_relational_similarity/validation": 0.7721518987341772, "scan/test": 0.2524752475247525, "sat_full/test": 0.5909090909090909, "sat/test": 0.599406528189911, "u2/test": 0.5570175438596491, "u4/test": 0.5625, "google/test": 0.88, "bats/test": 0.6864924958310172, "t_rex_relational_similarity/test": 0.6557377049180327, "conceptnet_relational_similarity/test": 0.39429530201342283, "nell_relational_similarity/test": 0.6383333333333333, "scan/validation": 0.25842696629213485, "sat/validation": 0.5135135135135135, "u2/validation": 0.5, "u4/validation": 0.6041666666666666, "google/validation": 0.96, "bats/validation": 0.7537688442211056, "t_rex_relational_similarity/validation": 0.25, "conceptnet_relational_similarity/validation": 0.31384892086330934, "nell_relational_similarity/validation": 0.58}
         
     | 
    	
        analogy.reverse.json
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {"scan/test": 0.26794554455445546, "sat_full/test": 0.5481283422459893, "sat/test": 0.5400593471810089, "u2/test": 0.5482456140350878, "u4/test": 0.5810185185185185, "google/test": 0.872, "bats/test": 0.6625903279599777, "t_rex_relational_similarity/test": 0.6229508196721312, "conceptnet_relational_similarity/test": 0.3573825503355705, "nell_relational_similarity/test": 0.7566666666666667, "scan/validation": 0.25280898876404495, "sat/validation": 0.6216216216216216, "u2/validation": 0.5416666666666666, "u4/validation": 0.6458333333333334, "google/validation": 0.88, "bats/validation": 0.7085427135678392, "semeval2012_relational_similarity/validation": 0.7088607594936709, "t_rex_relational_similarity/validation": 0.24798387096774194, "conceptnet_relational_similarity/validation": 0.26169064748201437, "nell_relational_similarity/validation": 0.5575}
         
     | 
    	
        classification.json
    ADDED
    
    | 
         @@ -0,0 +1 @@ 
     | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {"lexical_relation_classification/BLESS": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8998041283712521, "test/f1_macro": 0.8977934264238211, "test/f1_micro": 0.8998041283712521, "test/p_macro": 0.8917486357523923, "test/p_micro": 0.8998041283712521, "test/r_macro": 0.9051892726118357, "test/r_micro": 0.8998041283712521}, "lexical_relation_classification/CogALexV": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8272300469483568, "test/f1_macro": 0.6397137935143544, "test/f1_micro": 0.8272300469483568, "test/p_macro": 0.6619784132799931, "test/p_micro": 0.8272300469483568, "test/r_macro": 0.6213250025985675, "test/r_micro": 0.8272300469483568}, "lexical_relation_classification/EVALution": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.6462621885157096, "test/f1_macro": 0.6446245083980608, "test/f1_micro": 0.6462621885157096, "test/p_macro": 0.6537160185135298, "test/p_micro": 0.6462621885157096, "test/r_macro": 0.6384690628989079, "test/r_micro": 0.6462621885157096}, "lexical_relation_classification/K&H+N": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9412951241566391, "test/f1_macro": 0.8510085443796092, "test/f1_micro": 0.9412951241566391, "test/p_macro": 0.867195617415925, "test/p_micro": 0.9412951241566391, "test/r_macro": 0.8368361305440628, "test/r_micro": 0.9412951241566391}, "lexical_relation_classification/ROOT09": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8752742087120025, "test/f1_macro": 0.8731443734393283, "test/f1_micro": 0.8752742087120025, "test/p_macro": 0.8682973841822011, "test/p_micro": 0.8752742087120025, "test/r_macro": 0.8787091659586669, "test/r_micro": 0.8752742087120025}}
         
     | 
    	
        config.json
    ADDED
    
    | 
         @@ -0,0 +1,31 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "_name_or_path": "roberta-base",
         
     | 
| 3 | 
         
            +
              "architectures": [
         
     | 
| 4 | 
         
            +
                "RobertaModel"
         
     | 
| 5 | 
         
            +
              ],
         
     | 
| 6 | 
         
            +
              "attention_probs_dropout_prob": 0.1,
         
     | 
| 7 | 
         
            +
              "bos_token_id": 0,
         
     | 
| 8 | 
         
            +
              "classifier_dropout": null,
         
     | 
| 9 | 
         
            +
              "eos_token_id": 2,
         
     | 
| 10 | 
         
            +
              "hidden_act": "gelu",
         
     | 
| 11 | 
         
            +
              "hidden_dropout_prob": 0.1,
         
     | 
| 12 | 
         
            +
              "hidden_size": 768,
         
     | 
| 13 | 
         
            +
              "initializer_range": 0.02,
         
     | 
| 14 | 
         
            +
              "intermediate_size": 3072,
         
     | 
| 15 | 
         
            +
              "layer_norm_eps": 1e-05,
         
     | 
| 16 | 
         
            +
              "max_position_embeddings": 514,
         
     | 
| 17 | 
         
            +
              "model_type": "roberta",
         
     | 
| 18 | 
         
            +
              "num_attention_heads": 12,
         
     | 
| 19 | 
         
            +
              "num_hidden_layers": 12,
         
     | 
| 20 | 
         
            +
              "pad_token_id": 1,
         
     | 
| 21 | 
         
            +
              "position_embedding_type": "absolute",
         
     | 
| 22 | 
         
            +
              "relbert_config": {
         
     | 
| 23 | 
         
            +
                "aggregation_mode": "average",
         
     | 
| 24 | 
         
            +
                "template": "Today, I finally discovered the relation between <subj> and <obj> : <subj> is the <mask> of <obj>"
         
     | 
| 25 | 
         
            +
              },
         
     | 
| 26 | 
         
            +
              "torch_dtype": "float32",
         
     | 
| 27 | 
         
            +
              "transformers_version": "4.26.1",
         
     | 
| 28 | 
         
            +
              "type_vocab_size": 1,
         
     | 
| 29 | 
         
            +
              "use_cache": true,
         
     | 
| 30 | 
         
            +
              "vocab_size": 50265
         
     | 
| 31 | 
         
            +
            }
         
     | 
    	
        finetuning_config.json
    ADDED
    
    | 
         @@ -0,0 +1,24 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "template": "Today, I finally discovered the relation between <subj> and <obj> : <subj> is the <mask> of <obj>",
         
     | 
| 3 | 
         
            +
              "model": "roberta-base",
         
     | 
| 4 | 
         
            +
              "max_length": 64,
         
     | 
| 5 | 
         
            +
              "epoch": 10,
         
     | 
| 6 | 
         
            +
              "batch": 32,
         
     | 
| 7 | 
         
            +
              "random_seed": 0,
         
     | 
| 8 | 
         
            +
              "lr": 5e-06,
         
     | 
| 9 | 
         
            +
              "lr_warmup": 10,
         
     | 
| 10 | 
         
            +
              "aggregation_mode": "average",
         
     | 
| 11 | 
         
            +
              "data": "relbert/semeval2012_relational_similarity",
         
     | 
| 12 | 
         
            +
              "data_name": null,
         
     | 
| 13 | 
         
            +
              "exclude_relation": null,
         
     | 
| 14 | 
         
            +
              "split": "train",
         
     | 
| 15 | 
         
            +
              "split_valid": "validation",
         
     | 
| 16 | 
         
            +
              "loss_function": "nce",
         
     | 
| 17 | 
         
            +
              "classification_loss": false,
         
     | 
| 18 | 
         
            +
              "loss_function_config": {
         
     | 
| 19 | 
         
            +
                "temperature": 0.05,
         
     | 
| 20 | 
         
            +
                "num_negative": 400,
         
     | 
| 21 | 
         
            +
                "num_positive": 10
         
     | 
| 22 | 
         
            +
              },
         
     | 
| 23 | 
         
            +
              "augment_negative_by_positive": true
         
     | 
| 24 | 
         
            +
            }
         
     | 
    	
        merges.txt
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        pytorch_model.bin
    ADDED
    
    | 
         @@ -0,0 +1,3 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            version https://git-lfs.github.com/spec/v1
         
     | 
| 2 | 
         
            +
            oid sha256:7efa8beef13f77d5eb16cd7e31d802db97eab142ecbed16d149ca6d9729ead60
         
     | 
| 3 | 
         
            +
            size 498653741
         
     | 
    	
        relation_mapping.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        special_tokens_map.json
    ADDED
    
    | 
         @@ -0,0 +1,15 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "bos_token": "<s>",
         
     | 
| 3 | 
         
            +
              "cls_token": "<s>",
         
     | 
| 4 | 
         
            +
              "eos_token": "</s>",
         
     | 
| 5 | 
         
            +
              "mask_token": {
         
     | 
| 6 | 
         
            +
                "content": "<mask>",
         
     | 
| 7 | 
         
            +
                "lstrip": true,
         
     | 
| 8 | 
         
            +
                "normalized": false,
         
     | 
| 9 | 
         
            +
                "rstrip": false,
         
     | 
| 10 | 
         
            +
                "single_word": false
         
     | 
| 11 | 
         
            +
              },
         
     | 
| 12 | 
         
            +
              "pad_token": "<pad>",
         
     | 
| 13 | 
         
            +
              "sep_token": "</s>",
         
     | 
| 14 | 
         
            +
              "unk_token": "<unk>"
         
     | 
| 15 | 
         
            +
            }
         
     | 
    	
        tokenizer.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         | 
    	
        tokenizer_config.json
    ADDED
    
    | 
         @@ -0,0 +1,16 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "add_prefix_space": false,
         
     | 
| 3 | 
         
            +
              "bos_token": "<s>",
         
     | 
| 4 | 
         
            +
              "cls_token": "<s>",
         
     | 
| 5 | 
         
            +
              "eos_token": "</s>",
         
     | 
| 6 | 
         
            +
              "errors": "replace",
         
     | 
| 7 | 
         
            +
              "mask_token": "<mask>",
         
     | 
| 8 | 
         
            +
              "model_max_length": 512,
         
     | 
| 9 | 
         
            +
              "name_or_path": "roberta-base",
         
     | 
| 10 | 
         
            +
              "pad_token": "<pad>",
         
     | 
| 11 | 
         
            +
              "sep_token": "</s>",
         
     | 
| 12 | 
         
            +
              "special_tokens_map_file": null,
         
     | 
| 13 | 
         
            +
              "tokenizer_class": "RobertaTokenizer",
         
     | 
| 14 | 
         
            +
              "trim_offsets": true,
         
     | 
| 15 | 
         
            +
              "unk_token": "<unk>"
         
     | 
| 16 | 
         
            +
            }
         
     | 
    	
        vocab.json
    ADDED
    
    | 
         The diff for this file is too large to render. 
		See raw diff 
     | 
| 
         |