Unigramトークナイザの最大トークン長と最大語彙数は係り受け解析に影響するのか
Unigramトークナイザにおける最大トークン長Mと最大語彙数Vが、UPOS/LAS/MLASにどう影響するか調査した。DeBERTa(V2)モデルの製作には、ja_gsdluw-ud-train.conlluの各文だけを用いている。なお、参考として、トークナイザをBertJapaneseTokenizerに入れ替えた場合のUPOS/LAS/MLASも示した。
ja_gsdluw-ud-dev.conlluで評価
| V=4000 | V=8000 | V=16000 | V=32000 |
M=1 |
64.60/47.09/24.94 |
66.86/51.64/28.21 |
67.46/52.68/29.63 |
65.95/48.89/26.52 |
---|
M=2 |
75.54/63.22/39.99 |
75.15/60.16/38.74 |
76.91/63.87/41.98 |
74.69/59.19/37.71 |
---|
M=4 |
75.95/62.72/39.94 |
79.04/67.59/45.80 |
79.12/67.26/45.22 |
80.26/69.70/47.97 |
---|
M=8 |
74.29/59.53/37.07 |
76.51/62.12/40.78 |
79.38/67.28/46.46 |
79.95/68.93/47.26 |
---|
M=16 |
75.31/60.52/38.62 |
80.05/69.61/47.69 |
79.59/66.51/45.93 |
79.99/68.24/47.24 |
---|
BJT |
70.57/62.26/35.49 |
80.04/70.76/48.48 |
82.30/73.29/52.32 |
83.76/76.24/55.40 |
---|
ja_gsdluw-ud-test.conlluでテスト
| V=4000 | V=8000 | V=16000 | V=32000 |
M=1 |
61.54/44.53/21.50 |
64.19/49.14/25.45 |
64.58/49.33/26.11 |
62.78/45.49/23.61 |
---|
M=2 |
73.38/60.28/36.52 |
72.73/56.82/35.41 |
74.67/59.85/38.16 |
72.54/55.72/34.60 |
---|
M=4 |
73.30/58.80/36.03 |
77.20/64.98/43.16 |
77.54/65.10/43.57 |
78.20/66.39/44.65 |
---|
M=8 |
71.84/56.64/33.74 |
74.44/59.68/38.05 |
76.93/63.44/41.45 |
77.57/65.03/43.62 |
---|
M=16 |
72.65/58.09/34.99 |
77.39/65.72/43.17 |
77.25/63.88/42.63 |
77.75/65.31/44.43 |
---|
BJT |
69.21/62.14/33.72 |
78.83/68.84/46.72 |
81.17/72.00/49.85 |
82.73/74.21/53.16 |
---|
作業環境
mdx 1GPU (NVIDIA A100-SXM4-40GB)
- tokenizers 0.12.1
- transformers 4.19.1
- esupar 1.2.7
- torch 1.11.0+cu113
- Universal Dependencies 2.10
/bin/shスクリプト
#! /bin/sh
URL=https://github.com/UniversalDependencies/UD_Japanese-GSDLUW
D=`basename $URL`
test -d $D || git clone --depth=1 $URL
for F in train dev test
do nawk -F'\t' '{OFS=FS;if(NF==10)$6="_";print}' $D/*-$F*.conllu > $F.conllu
sed -n 's/^# text = //p' $F.conllu > $F.txt
done
S='{if(NF==10&&$1~/^[1-9][0-9]*$/)printf($1>1?" %s":"%s",$2);if(NF==0)print}'
nawk -F'\t' "$S" $D/*-train.conllu > token.txt
U=http://universaldependencies.org/conll18/conll18_ud_eval.py
C=`basename $U`
test -f $C || curl -LO $U
for M in 1 2 4 8 16
do for V in 4000 8000 16000 32000
do test -d deberta$M-$V || python3 -c m,v=$M,$V'
from transformers import (DataCollatorForLanguageModeling,TrainingArguments,
DebertaV2TokenizerFast,DebertaV2Config,DebertaV2ForMaskedLM,Trainer)
from tokenizers import (Tokenizer,models,pre_tokenizers,normalizers,processors,
decoders,trainers)
s=["[CLS]","[PAD]","[SEP]","[UNK]","[MASK]"]
spt=Tokenizer(models.Unigram())
spt.pre_tokenizer=pre_tokenizers.Sequence([pre_tokenizers.Whitespace(),
pre_tokenizers.Punctuation()])
spt.normalizer=normalizers.Sequence([normalizers.Nmt(),normalizers.NFKC()])
spt.post_processor=processors.TemplateProcessing(single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",special_tokens=[("[CLS]",0),("[SEP]",2)])
spt.decoder=decoders.WordPiece(prefix="",cleanup=True)
spt.train(trainer=trainers.UnigramTrainer(vocab_size=v,max_piece_length=m,
special_tokens=s,unk_token="[UNK]",n_sub_iterations=2),files=["token.txt"])
spt.save("tokenizer.json")
tkz=DebertaV2TokenizerFast(tokenizer_file="tokenizer.json",
do_lower_case=False,keep_accents=True,bos_token="[CLS]",cls_token="[CLS]",
pad_token="[PAD]",sep_token="[SEP]",unk_token="[UNK]",mask_token="[MASK]",
vocab_file="/dev/null",model_max_length=512,split_by_punct=True)
t=tkz.convert_tokens_to_ids(s)
cfg=DebertaV2Config(hidden_size=768,num_hidden_layers=12,num_attention_heads=12,
intermediate_size=3072,max_position_embeddings=tkz.model_max_length,
vocab_size=len(tkz),tokenizer_class=type(tkz).__name__,
bos_token_id=t[0],pad_token_id=t[1],eos_token_id=t[2])
arg=TrainingArguments(num_train_epochs=8,per_device_train_batch_size=64,
output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2)
class ReadLineDS(object):
def __init__(self,file,tokenizer):
self.tokenizer=tokenizer
with open(file,"r",encoding="utf-8") as r:
self.lines=[s.strip() for s in r if s.strip()!=""]
__len__=lambda self:len(self.lines)
__getitem__=lambda self,i:self.tokenizer(self.lines[i],truncation=True,
add_special_tokens=True,max_length=self.tokenizer.model_max_length-2)
trn=Trainer(args=arg,data_collator=DataCollatorForLanguageModeling(tkz),
model=DebertaV2ForMaskedLM(cfg),train_dataset=ReadLineDS("train.txt",tkz))
trn.train()
trn.save_model("deberta{}-{}".format(m,v))
tkz.save_pretrained("deberta{}-{}".format(m,v))'
test -d upos$M-$V || python3 -m esupar.train deberta$M-$V upos$M-$V .
test -f result$M-$V/result && continue
mkdir -p result$M-$V
for F in dev test
do cat $F.txt | python3 -c 'mdl,f="upos'$M-$V'","result'$M-$V/$F'.conllu"
import esupar
nlp=esupar.load(mdl)
with open(f,"w",encoding="utf-8") as w:
while True:
try:
doc=nlp(input().strip())
except:
quit()
print(doc,file=w)'
done
( echo '***' upos$M-$V dev
python3 $C -v dev.conllu result$M-$V/dev.conllu
echo '***' upos$M-$V test
python3 $C -v test.conllu result$M-$V/test.conllu
) | tee result$M-$V/result
done
done