KoichiYasuoka commited on
Commit
6d03916
·
1 Parent(s): e2677ee

initial release

Browse files
Files changed (9) hide show
  1. README.md +29 -0
  2. config.json +0 -0
  3. maker.py +117 -0
  4. pytorch_model.bin +3 -0
  5. special_tokens_map.json +37 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +63 -0
  8. ud.py +150 -0
  9. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "en"
4
+ tags:
5
+ - "turkish"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ base_model: 99eren99/ModernBERT-base-Turkish-uncased-mlm
10
+ datasets:
11
+ - "universal_dependencies"
12
+ license: "apache-2.0"
13
+ pipeline_tag: "token-classification"
14
+ ---
15
+
16
+ # modernbert-base-turkish-ud-embeds
17
+
18
+ ## Model Description
19
+
20
+ This is a ModernBERT model for POS-tagging and dependency-parsing, derived from [ModernBERT-base-Turkish-uncased-mlm](https://huggingface.co/99eren99/ModernBERT-base-Turkish-uncased-mlm).
21
+
22
+ ## How to Use
23
+
24
+ ```py
25
+ from transformers import pipeline
26
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/modernbert-base-turkish-ud-embeds",trust_remote_code=True)
27
+ print(nlp("Ay dağın diğer tarafında yükseldi"))
28
+ ```
29
+
config.json ADDED
The diff for this file is too large to render. See raw diff
 
maker.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ import os,json
3
+ src="99eren99/ModernBERT-base-Turkish-uncased-mlm"
4
+ tgt="KoichiYasuoka/modernbert-base-turkish-ud-embeds"
5
+ url="https://github.com/UniversalDependencies/UD_Turkish-"
6
+ for e in ["Kenet","Penn","BOUN","Tourism","IMST","Atis","FrameNet"]:
7
+ u=url+e
8
+ d=os.path.basename(u)
9
+ os.system("test -d "+d+" || git clone --depth=1 "+u)
10
+ os.system("for F in train dev test ; do cat UD_Turkish-*/*-$F.conllu > $F.conllu ; done")
11
+ class UDEmbedsDataset(object):
12
+ def __init__(self,conllu,tokenizer,embeddings=None):
13
+ self.conllu=open(conllu,"r",encoding="utf-8")
14
+ self.tokenizer=tokenizer
15
+ self.embeddings=embeddings
16
+ self.seeks=[0]
17
+ label=set(["SYM","SYM.","SYM|_"])
18
+ dep=set()
19
+ s=self.conllu.readline()
20
+ while s!="":
21
+ if s=="\n":
22
+ self.seeks.append(self.conllu.tell())
23
+ else:
24
+ w=s.split("\t")
25
+ if len(w)==10:
26
+ if w[0].isdecimal():
27
+ p=w[3]
28
+ q="" if w[5]=="_" else "|"+w[5]
29
+ d=("|" if w[6]=="0" else "|l-" if int(w[0])<int(w[6]) else "|r-")+w[7]
30
+ for k in [p,p+".","B-"+p,"B-"+p+".","I-"+p,"I-"+p+".",p+q+"|_",p+q+d]:
31
+ label.add(k)
32
+ s=self.conllu.readline()
33
+ self.label2id={l:i for i,l in enumerate(sorted(label))}
34
+ def __call__(*args):
35
+ lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
36
+ for t in args:
37
+ t.label2id=lid
38
+ return lid
39
+ def __del__(self):
40
+ self.conllu.close()
41
+ __len__=lambda self:(len(self.seeks)-1)*2
42
+ def __getitem__(self,i):
43
+ self.conllu.seek(self.seeks[int(i/2)])
44
+ z,c,t,s=i%2,[],[""],False
45
+ while t[0]!="\n":
46
+ t=self.conllu.readline().split("\t")
47
+ if len(t)==10 and t[0].isdecimal():
48
+ if s:
49
+ t[1]=" "+t[1]
50
+ c.append(t)
51
+ s=t[9].find("SpaceAfter=No")<0
52
+ x=[True if t[6]=="0" or int(t[6])>j or sum([1 if int(c[i][6])==j+1 else 0 for i in range(j+1,len(c))])>0 else False for j,t in enumerate(c)]
53
+ v=self.tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
54
+ if z==0:
55
+ ids,upos=[self.tokenizer.cls_token_id],["SYM."]
56
+ for i,(j,k) in enumerate(zip(v,c)):
57
+ if j==[]:
58
+ j=[self.tokenizer.unk_token_id]
59
+ p=k[3] if x[i] else k[3]+"."
60
+ ids+=j
61
+ upos+=[p] if len(j)==1 else ["B-"+p]+["I-"+p]*(len(j)-1)
62
+ ids.append(self.tokenizer.sep_token_id)
63
+ upos.append("SYM.")
64
+ emb=self.embeddings
65
+ else:
66
+ import torch
67
+ if len(x)<127:
68
+ x=[True]*len(x)
69
+ w=(len(x)+2)*(len(x)+1)/2
70
+ else:
71
+ w=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1
72
+ for i in range(len(x)):
73
+ if x[i]==False and w+len(x)-i<8192:
74
+ x[i]=True
75
+ w+=len(x)-i+1
76
+ p=[t[3] if t[5]=="_" else t[3]+"|"+t[5] for i,t in enumerate(c)]
77
+ d=[t[7] if t[6]=="0" else "l-"+t[7] if int(t[0])<int(t[6]) else "r-"+t[7] for t in c]
78
+ ids,upos=[-1],["SYM|_"]
79
+ for i in range(len(x)):
80
+ if x[i]:
81
+ ids.append(i)
82
+ upos.append(p[i]+"|"+d[i] if c[i][6]=="0" else p[i]+"|_")
83
+ for j in range(i+1,len(x)):
84
+ ids.append(j)
85
+ upos.append(p[j]+"|"+d[j] if int(c[j][6])==i+1 else p[i]+"|"+d[i] if int(c[i][6])==j+1 else p[j]+"|_")
86
+ if w>8192 and i>0:
87
+ while w>8192 and upos[-1].endswith("|_"):
88
+ upos.pop(-1)
89
+ ids.pop(-1)
90
+ w-=1
91
+ ids.append(-1)
92
+ upos.append("SYM|_")
93
+ with torch.no_grad():
94
+ m=[]
95
+ for j in v:
96
+ if j==[]:
97
+ j=[self.tokenizer.unk_token_id]
98
+ m.append(self.embeddings[j,:].sum(axis=0))
99
+ m.append(self.embeddings[self.tokenizer.sep_token_id,:])
100
+ emb=torch.stack(m)
101
+ return{"inputs_embeds":emb[ids[:8192],:],"labels":[self.label2id[p] for p in upos[:8192]]}
102
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
103
+ from tokenizers.normalizers import Sequence,Replace,BertNormalizer
104
+ tkz=AutoTokenizer.from_pretrained(src)
105
+ tkz.backend_tokenizer.normalizer=Sequence([Replace("İ","i"),Replace("I","ı"),BertNormalizer(lowercase=True,strip_accents=False)])
106
+ trainDS=UDEmbedsDataset("train.conllu",tkz)
107
+ devDS=UDEmbedsDataset("dev.conllu",tkz)
108
+ testDS=UDEmbedsDataset("test.conllu",tkz)
109
+ lid=trainDS(devDS,testDS)
110
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True,trust_remote_code=True)
111
+ mdl=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True,trust_remote_code=True)
112
+ trainDS.embeddings=mdl.get_input_embeddings().weight
113
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=1,dataloader_pin_memory=False,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
114
+ trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
115
+ trn.train()
116
+ trn.save_model(tgt)
117
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1870ef3ec1d236b23d48f21a0cdecc78dc6cae298e961bf10ee12e1dcbfabd48
3
+ size 592177074
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "max_len": 999999999,
51
+ "model_max_length": 999999999,
52
+ "model_input_names": [
53
+ "input_ids",
54
+ "attention_mask"
55
+ ],
56
+ "never_split": null,
57
+ "pad_token": "[PAD]",
58
+ "sep_token": "[SEP]",
59
+ "strip_accents": null,
60
+ "tokenize_chinese_chars": true,
61
+ "tokenizer_class": "BertTokenizerFast",
62
+ "unk_token": "[UNK]"
63
+ }
ud.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ from transformers import TokenClassificationPipeline
3
+
4
+ class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
5
+ def __init__(self,**kwargs):
6
+ super().__init__(**kwargs)
7
+ x=self.model.config.label2id
8
+ y=[k for k in x if k.find("|")<0 and not k.startswith("I-")]
9
+ self.transition=numpy.full((len(x),len(x)),-numpy.inf)
10
+ for k,v in x.items():
11
+ if k.find("|")<0:
12
+ for j in ["I-"+k[2:]] if k.startswith("B-") else [k]+y if k.startswith("I-") else y:
13
+ self.transition[v,x[j]]=0
14
+ def check_model_type(self,supported_models):
15
+ pass
16
+ def postprocess(self,model_outputs,**kwargs):
17
+ if "logits" not in model_outputs:
18
+ return self.postprocess(model_outputs[0],**kwargs)
19
+ return self.bellman_ford_token_classification(model_outputs,**kwargs)
20
+ def bellman_ford_token_classification(self,model_outputs,**kwargs):
21
+ m=model_outputs["logits"][0].numpy()
22
+ e=numpy.exp(m-numpy.max(m,axis=-1,keepdims=True))
23
+ z=e/e.sum(axis=-1,keepdims=True)
24
+ for i in range(m.shape[0]-1,0,-1):
25
+ m[i-1]+=numpy.max(m[i]+self.transition,axis=1)
26
+ k=[numpy.argmax(m[0]+self.transition[0])]
27
+ for i in range(1,m.shape[0]):
28
+ k.append(numpy.argmax(m[i]+self.transition[k[-1]]))
29
+ w=[{"entity":self.model.config.id2label[j],"start":s,"end":e,"score":z[i,j]} for i,((s,e),j) in enumerate(zip(model_outputs["offset_mapping"][0].tolist(),k)) if s<e]
30
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
31
+ for i,t in reversed(list(enumerate(w))):
32
+ p=t.pop("entity")
33
+ if p.startswith("I-"):
34
+ w[i-1]["score"]=min(w[i-1]["score"],t["score"])
35
+ w[i-1]["end"]=w.pop(i)["end"]
36
+ elif p.startswith("B-"):
37
+ t["entity_group"]=p[2:]
38
+ else:
39
+ t["entity_group"]=p
40
+ for t in w:
41
+ t["text"]=model_outputs["sentence"][t["start"]:t["end"]]
42
+ return w
43
+
44
+ class UniversalDependenciesPipeline(BellmanFordTokenClassificationPipeline):
45
+ def __init__(self,**kwargs):
46
+ kwargs["aggregation_strategy"]="simple"
47
+ super().__init__(**kwargs)
48
+ x=self.model.config.label2id
49
+ self.root=numpy.full((len(x)),-numpy.inf)
50
+ self.left_arc=numpy.full((len(x)),-numpy.inf)
51
+ self.right_arc=numpy.full((len(x)),-numpy.inf)
52
+ for k,v in x.items():
53
+ if k.endswith("|root"):
54
+ self.root[v]=0
55
+ elif k.find("|l-")>0:
56
+ self.left_arc[v]=0
57
+ elif k.find("|r-")>0:
58
+ self.right_arc[v]=0
59
+ def postprocess(self,model_outputs,**kwargs):
60
+ import torch
61
+ kwargs["aggregation_strategy"]="simple"
62
+ if "logits" not in model_outputs:
63
+ return self.postprocess(model_outputs[0],**kwargs)
64
+ w=self.bellman_ford_token_classification(model_outputs,**kwargs)
65
+ off=[(t["start"],t["end"]) for t in w]
66
+ for i,(s,e) in reversed(list(enumerate(off))):
67
+ if s<e:
68
+ d=w[i]["text"]
69
+ j=len(d)-len(d.lstrip())
70
+ if j>0:
71
+ d=d.lstrip()
72
+ off[i]=(off[i][0]+j,off[i][1])
73
+ j=len(d)-len(d.rstrip())
74
+ if j>0:
75
+ d=d.rstrip()
76
+ off[i]=(off[i][0],off[i][1]-j)
77
+ if d.strip()=="":
78
+ off.pop(i)
79
+ w.pop(i)
80
+ v=self.tokenizer([t["text"] for t in w],add_special_tokens=False)
81
+ x=[not t["entity_group"].endswith(".") for t in w]
82
+ if len(x)<127:
83
+ x=[True]*len(x)
84
+ else:
85
+ k=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1
86
+ for i in numpy.argsort(numpy.array([t["score"] for t in w])):
87
+ if x[i]==False and k+len(x)-i<8192:
88
+ x[i]=True
89
+ k+=len(x)-i+1
90
+ ids=[-1]
91
+ for i in range(len(x)):
92
+ if x[i]:
93
+ ids.append(i)
94
+ for j in range(i+1,len(x)):
95
+ ids.append(j)
96
+ ids.append(-1)
97
+ with torch.no_grad():
98
+ e=self.model.get_input_embeddings().weight
99
+ m=[]
100
+ for j in v["input_ids"]:
101
+ if j==[]:
102
+ j=[self.tokenizer.unk_token_id]
103
+ m.append(e[j,:].sum(axis=0))
104
+ m.append(e[self.tokenizer.sep_token_id,:])
105
+ m=torch.stack(m).to(self.device)
106
+ e=self.model(inputs_embeds=torch.unsqueeze(m[ids,:],0))
107
+ m=e.logits[0].cpu().numpy()
108
+ e=numpy.full((len(x),len(x),m.shape[-1]),m.min())
109
+ k=1
110
+ for i in range(len(x)):
111
+ if x[i]:
112
+ e[i,i]=m[k]+self.root
113
+ k+=1
114
+ for j in range(1,len(x)-i):
115
+ e[i+j,i]=m[k]+self.left_arc
116
+ e[i,i+j]=m[k]+self.right_arc
117
+ k+=1
118
+ k+=1
119
+ m,p=numpy.max(e,axis=2),numpy.argmax(e,axis=2)
120
+ h=self.chu_liu_edmonds(m)
121
+ z=[i for i,j in enumerate(h) if i==j]
122
+ if len(z)>1:
123
+ k,h=z[numpy.argmax(m[z,z])],numpy.min(m)-numpy.max(m)
124
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
125
+ h=self.chu_liu_edmonds(m)
126
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
127
+ t=model_outputs["sentence"].replace("\n"," ")
128
+ u="# text = "+t+"\n"
129
+ for i,(s,e) in enumerate(off):
130
+ u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","_" if len(q[i])<3 else "|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),"root" if q[i][-1]=="root" else q[i][-1][2:],"_","_" if i+1<len(off) and e<off[i+1][0] else "SpaceAfter=No"])+"\n"
131
+ return u+"\n"
132
+ def chu_liu_edmonds(self,matrix):
133
+ h=numpy.argmax(matrix,axis=0)
134
+ x=[-1 if i==j else j for i,j in enumerate(h)]
135
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
136
+ y=[]
137
+ while x!=y:
138
+ y=list(x)
139
+ for i,j in enumerate(x):
140
+ x[i]=b(x,i,j)
141
+ if max(x)<0:
142
+ return h
143
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
144
+ z=matrix-numpy.max(matrix,axis=0)
145
+ m=numpy.block([[z[x,:][:,x],numpy.max(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.max(z[y,:][:,x],axis=0),numpy.max(z[y,y])]])
146
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.argmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
147
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
148
+ i=y[numpy.argmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
149
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
150
+ return h
vocab.txt ADDED
The diff for this file is too large to render. See raw diff