fix dataset
Browse files- data/tweet_ner7/test.jsonl +2 -2
- data/tweet_ner7/train.jsonl +2 -2
- data/tweet_ner7/validation.jsonl +2 -2
- data/tweet_topic/test.jsonl +2 -2
- data/tweet_topic/train.jsonl +2 -2
- data/tweet_topic/validation.jsonl +2 -2
- process/unify_sp_symbol.py +2 -1
- super_tweeteval.py +3 -4
data/tweet_ner7/test.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c81a0636e99f9389e2df5e5eb731965e714e2ace6f3b734ce7944f46a161d619
|
| 3 |
+
size 2272370
|
data/tweet_ner7/train.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1319759f9b33741bd24b0c2ec990ca8798ed5592a156f76ee4a9ee5f0d94671d
|
| 3 |
+
size 3751975
|
data/tweet_ner7/validation.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:391d7aa9abec70be6693b4c9c0e821e5b1e1b9dc318aec03889fde9ac5c1e89d
|
| 3 |
+
size 464937
|
data/tweet_topic/test.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc9608ff97c872ed5770b6651cc9d1e7718575dafe8b486e7bb8468352209935
|
| 3 |
+
size 464731
|
data/tweet_topic/train.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ba297de0531647cfa2e12634e6402a39bf20749539036cc435ea1713dd3c303
|
| 3 |
+
size 1286581
|
data/tweet_topic/validation.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b3d0ea649e19a43970a8a6fe9841602bd924538f998ac3e9b87488582334d0b
|
| 3 |
+
size 160549
|
process/unify_sp_symbol.py
CHANGED
|
@@ -27,7 +27,8 @@ for i in glob("data/tweet_ner7/*.jsonl"):
|
|
| 27 |
d['text'] = d['text'].replace(t, t_new)
|
| 28 |
d['text_tokenized'] = [y if y != t else t_new for y in d['text_tokenized']]
|
| 29 |
for e in d['entities']:
|
| 30 |
-
e['']
|
|
|
|
| 31 |
|
| 32 |
with open(i, "w") as f:
|
| 33 |
f.write("\n".join([json.dumps(j) for j in data]))
|
|
|
|
| 27 |
d['text'] = d['text'].replace(t, t_new)
|
| 28 |
d['text_tokenized'] = [y if y != t else t_new for y in d['text_tokenized']]
|
| 29 |
for e in d['entities']:
|
| 30 |
+
e['entity'] = e['entity'].replace(t, t_new)
|
| 31 |
+
e['entity'] = e['entity'].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
|
| 32 |
|
| 33 |
with open(i, "w") as f:
|
| 34 |
f.write("\n".join([json.dumps(j) for j in data]))
|
super_tweeteval.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
import json
|
| 3 |
import datasets
|
| 4 |
|
| 5 |
-
_VERSION = "0.1.
|
| 6 |
_SUPER_TWEETEVAL_CITATION = """TBA"""
|
| 7 |
_SUPER_TWEETEVAL_DESCRIPTION = """TBA"""
|
| 8 |
_TWEET_TOPIC_DESCRIPTION = """
|
|
@@ -309,6 +309,7 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
| 309 |
datasets.features.ClassLabel(names=names))
|
| 310 |
features["text_tokenized"] = datasets.Sequence(
|
| 311 |
datasets.Value("string"))
|
|
|
|
| 312 |
if self.config.name in ["tweet_intimacy", "tweet_similarity"]:
|
| 313 |
features["gold_score"] = datasets.Value("float32")
|
| 314 |
if self.config.name == "tempo_wic":
|
|
@@ -348,11 +349,9 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
| 348 |
names=label_classes)
|
| 349 |
features["text"] = datasets.Value("string")
|
| 350 |
if self.config.name == "tweet_sentiment":
|
| 351 |
-
label_classes = ["strongly negative", "negative",
|
| 352 |
-
"negative or neutral", "positive", "strongly positive"]
|
| 353 |
features["text"] = datasets.Value("string")
|
| 354 |
names = ['anger', 'anticipation', 'disgust', 'fear', 'joy',
|
| 355 |
-
|
| 356 |
features["gold_label_list"] = datasets.Sequence(
|
| 357 |
datasets.features.ClassLabel(names=names))
|
| 358 |
|
|
|
|
| 2 |
import json
|
| 3 |
import datasets
|
| 4 |
|
| 5 |
+
_VERSION = "0.1.5"
|
| 6 |
_SUPER_TWEETEVAL_CITATION = """TBA"""
|
| 7 |
_SUPER_TWEETEVAL_DESCRIPTION = """TBA"""
|
| 8 |
_TWEET_TOPIC_DESCRIPTION = """
|
|
|
|
| 309 |
datasets.features.ClassLabel(names=names))
|
| 310 |
features["text_tokenized"] = datasets.Sequence(
|
| 311 |
datasets.Value("string"))
|
| 312 |
+
features["entities"] = datasets.Sequence({"entity": datasets.Value("string"), "type": datasets.Value("string")})
|
| 313 |
if self.config.name in ["tweet_intimacy", "tweet_similarity"]:
|
| 314 |
features["gold_score"] = datasets.Value("float32")
|
| 315 |
if self.config.name == "tempo_wic":
|
|
|
|
| 349 |
names=label_classes)
|
| 350 |
features["text"] = datasets.Value("string")
|
| 351 |
if self.config.name == "tweet_sentiment":
|
|
|
|
|
|
|
| 352 |
features["text"] = datasets.Value("string")
|
| 353 |
names = ['anger', 'anticipation', 'disgust', 'fear', 'joy',
|
| 354 |
+
'love', 'optimism', 'pessimism', 'sadness', 'surprise', 'trust']
|
| 355 |
features["gold_label_list"] = datasets.Sequence(
|
| 356 |
datasets.features.ClassLabel(names=names))
|
| 357 |
|