Unigramトークナイザの最大トークン長と最大語彙数は係り受け解析に影響するのか

中国語(簡化字)UD_Chinese-GSDSimpによるDeBERTaモデル

Unigramトークナイザにおける最大トークン長Mと最大語彙数Vが、UPOS/LAS/MLASにどう影響するか調査した。DeBERTaモデルの製作には、zh_gsdsimp-ud-train.conlluの各文だけを用いている。

zh_gsdsimp-ud-dev.conlluで評価

V=4000V=8000V=16000V=32000
M=1 52.87/27.65/17.36 52.97/27.78/17.02 53.18/28.00/17.33 52.89/28.16/17.24
M=2 53.55/27.82/17.36 53.57/28.24/17.44 53.23/28.06/17.30 53.54/27.91/17.52
M=4 53.41/27.40/17.33 53.50/28.37/17.36 53.69/27.87/17.38 53.36/28.17/17.76
M=8 53.53/27.64/17.53 53.18/27.62/17.15 53.47/28.03/17.72 53.46/27.81/17.64
M=16 53.50/28.51/17.68 53.69/27.46/17.44 53.36/27.97/17.63 53.24/28.43/17.76

zh_gsdsimp-ud-test.conlluでテスト

V=4000V=8000V=16000V=32000
M=1 54.19/29.97/19.08 53.93/30.26/19.01 53.98/30.13/19.01 54.33/30.21/19.18
M=2 54.20/30.52/19.41 54.79/30.33/19.54 54.94/31.60/20.59 54.80/30.76/19.77
M=4 54.64/30.86/19.51 54.89/30.58/19.87 54.66/30.49/19.51 54.83/31.23/19.90
M=8 54.76/30.82/19.99 54.67/30.89/19.64 55.12/32.17/20.45 54.94/31.16/19.96
M=16 54.56/30.53/19.42 54.52/31.62/20.10 54.91/31.09/19.95 54.58/31.25/20.25

作業環境

mdx 1GPU (NVIDIA A100-SXM4-40GB)

/bin/shスクリプト

#! /bin/sh
URL=https://github.com/UniversalDependencies/UD_Chinese-GSDSimp
D=`basename $URL`
test -d $D || git clone --depth=1 $URL
for F in train dev test
do nawk -F'\t' '{OFS=FS;if(NF==10)$6="_";print}' $D/*-$F*.conllu > $F.conllu
   sed -n 's/^# text = //p' $F.conllu > $F.txt
done
S='{if(NF==10&&$1~/^[1-9][0-9]*$/)printf($1>1?" %s":"%s",$2);if(NF==0)print}'
nawk -F'\t' "$S" $D/*-train.conllu > token.txt
U=http://universaldependencies.org/conll18/conll18_ud_eval.py
C=`basename $U`
test -f $C || curl -LO $U
for M in 1 2 4 8 16
do for V in 4000 8000 16000 32000
   do test -d deberta$M-$V || python3 -c m,v=$M,$V'
from transformers import (DataCollatorForLanguageModeling,TrainingArguments,
  DebertaV2TokenizerFast,DebertaV2Config,DebertaV2ForMaskedLM,Trainer)
from tokenizers import (Tokenizer,models,pre_tokenizers,normalizers,processors,
  decoders,trainers)
s=["[CLS]","[PAD]","[SEP]","[UNK]","[MASK]"]
spt=Tokenizer(models.Unigram())
spt.pre_tokenizer=pre_tokenizers.Sequence([pre_tokenizers.Whitespace(),
  pre_tokenizers.Punctuation()])
spt.normalizer=normalizers.Sequence([normalizers.Nmt(),normalizers.NFKC()])
spt.post_processor=processors.TemplateProcessing(single="[CLS] $A [SEP]",
  pair="[CLS] $A [SEP] $B:1 [SEP]:1",special_tokens=[("[CLS]",0),("[SEP]",2)])
spt.decoder=decoders.WordPiece(prefix="",cleanup=True)
spt.train(trainer=trainers.UnigramTrainer(vocab_size=v,max_piece_length=m,
  special_tokens=s,unk_token="[UNK]",n_sub_iterations=2),files=["token.txt"])
spt.save("tokenizer.json")
tkz=DebertaV2TokenizerFast(tokenizer_file="tokenizer.json",
  do_lower_case=False,keep_accents=True,bos_token="[CLS]",cls_token="[CLS]",
  pad_token="[PAD]",sep_token="[SEP]",unk_token="[UNK]",mask_token="[MASK]",
  vocab_file="/dev/null",model_max_length=512,split_by_punct=True)
t=tkz.convert_tokens_to_ids(s)
cfg=DebertaV2Config(hidden_size=768,num_hidden_layers=12,num_attention_heads=12,
  intermediate_size=3072,max_position_embeddings=tkz.model_max_length,
  vocab_size=len(tkz),tokenizer_class=type(tkz).__name__,
  bos_token_id=t[0],pad_token_id=t[1],eos_token_id=t[2])
arg=TrainingArguments(num_train_epochs=8,per_device_train_batch_size=64,
  output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2)
class ReadLineDS(object):
  def __init__(self,file,tokenizer):
    self.tokenizer=tokenizer
    with open(file,"r",encoding="utf-8") as r:
      self.lines=[s.strip() for s in r if s.strip()!=""]
  __len__=lambda self:len(self.lines)
  __getitem__=lambda self,i:self.tokenizer(self.lines[i],truncation=True,
    add_special_tokens=True,max_length=self.tokenizer.model_max_length-2)
trn=Trainer(args=arg,data_collator=DataCollatorForLanguageModeling(tkz),
  model=DebertaV2ForMaskedLM(cfg),train_dataset=ReadLineDS("train.txt",tkz))
trn.train()
trn.save_model("deberta{}-{}".format(m,v))
tkz.save_pretrained("deberta{}-{}".format(m,v))'
      test -d upos$M-$V || python3 -m esupar.train deberta$M-$V upos$M-$V .
      test -f result$M-$V/result && continue
      mkdir -p result$M-$V
      for F in dev test
      do cat $F.txt | python3 -c 'mdl,f="upos'$M-$V'","result'$M-$V/$F'.conllu"
import esupar
nlp=esupar.load(mdl)
with open(f,"w",encoding="utf-8") as w:
  while True:
    try:
      doc=nlp(input().strip())
    except:
      quit()
    print(doc,file=w)'
      done
      ( echo '***' upos$M-$V dev
        python3 $C -v dev.conllu result$M-$V/dev.conllu
        echo '***' upos$M-$V test
        python3 $C -v test.conllu result$M-$V/test.conllu
      ) | tee result$M-$V/result
   done
done