revert back to nl2bash
Browse files- nl2bash_m.py +24 -30
nl2bash_m.py
CHANGED
|
@@ -107,45 +107,39 @@ class nl2bash_m(evaluate.Metric):
|
|
| 107 |
predictions = np.char.translate(predictions, table=repl_table)
|
| 108 |
references = np.char.translate(references, table=repl_table)
|
| 109 |
|
| 110 |
-
|
| 111 |
final_score = 0
|
| 112 |
-
for pred, refs in zip(predictions, references):
|
| 113 |
best_score = 0
|
| 114 |
-
for ref in refs:
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
# elif len(pred) == 0 or min([len(ref) for ref in refs]) == 0:
|
| 122 |
-
# score = 0
|
| 123 |
-
# else:
|
| 124 |
-
# best_score = 0
|
| 125 |
-
# for ref in refs:
|
| 126 |
-
# pred_words, ref_words = pred.split(), ref.split()
|
| 127 |
|
| 128 |
|
| 129 |
-
|
| 130 |
-
|
| 131 |
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
|
| 145 |
-
|
| 146 |
-
|
| 147 |
|
| 148 |
-
|
| 149 |
|
| 150 |
final_score = final_score/len(predictions)
|
| 151 |
|
|
|
|
| 107 |
predictions = np.char.translate(predictions, table=repl_table)
|
| 108 |
references = np.char.translate(references, table=repl_table)
|
| 109 |
|
| 110 |
+
|
| 111 |
final_score = 0
|
| 112 |
+
for pred, refs in zip(predictions, references):
|
| 113 |
best_score = 0
|
| 114 |
+
if len(pred) == 0 and min([len(ref) for ref in refs]) == 0:
|
| 115 |
+
best_score = 1
|
| 116 |
+
elif len(pred) == 0 or min([len(ref) for ref in refs]) == 0:
|
| 117 |
+
best_score = 0
|
| 118 |
+
else:
|
| 119 |
+
for ref in refs:
|
| 120 |
+
pred_words, ref_words = pred.split(), ref.split()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
|
| 123 |
+
# Get the cmd of predicted and ref
|
| 124 |
+
cmd_corr = 1 if pred_words.pop(0)==ref_words.pop(0) else 0
|
| 125 |
|
| 126 |
+
# Get the option of predicted and ref
|
| 127 |
+
pred_option = [ x for x in pred_words if x[0] == '-']
|
| 128 |
+
ref_option = [ x for x in ref_words if x[0] == '-']
|
| 129 |
|
| 130 |
+
# Get the arguments of predicted and ref
|
| 131 |
+
pred_args = [ x for x in pred_words if x[0] != '-']
|
| 132 |
+
ref_args = [ x for x in ref_words if x[0] != '-']
|
| 133 |
|
| 134 |
+
# Calculate scores
|
| 135 |
+
cmd_score = cmd_weight * cmd_corr
|
| 136 |
+
opt_score = opt_weight * self.get_score(pred_option, ref_option)
|
| 137 |
+
arg_score = arg_weight * self.get_score(pred_args, ref_args)
|
| 138 |
|
| 139 |
+
score = cmd_score + opt_score + arg_score
|
| 140 |
+
best_score = max(best_score, score)
|
| 141 |
|
| 142 |
+
final_score += best_score
|
| 143 |
|
| 144 |
final_score = final_score/len(predictions)
|
| 145 |
|