Unigramトークナイザの最大トークン長と最大語彙数は係り受け解析に影響するのか

中国語(繁體字)UD_Chinese-GSDによるRoBERTaモデル

Unigramトークナイザにおける最大トークン長Mと最大語彙数Vが、UPOS/LAS/MLASにどう影響するか調査した。RoBERTaモデルの製作には、zh_gsd-ud-train.conlluの各文だけを用いている。

zh_gsd-ud-dev.conlluで評価

V=4000V=8000V=16000V=32000
M=1 52.60/27.99/16.81 52.57/27.24/16.59 52.75/27.18/16.44 52.55/27.48/16.84
M=2 53.06/28.16/17.43 52.91/27.23/17.52 53.34/28.67/17.47 53.28/27.67/17.23
M=4 53.16/27.11/16.85 53.08/27.60/17.10 52.81/27.72/17.25 53.08/27.15/16.84
M=8 53.05/27.45/16.91 52.90/27.12/16.94 52.96/27.92/17.04 52.86/27.85/16.98
M=16 53.27/27.66/17.15 53.30/28.32/17.53 52.92/27.89/17.18 52.73/28.16/17.49

zh_gsd-ud-test.conlluでテスト

V=4000V=8000V=16000V=32000
M=1 53.93/30.89/19.64 53.59/29.56/18.85 53.71/30.55/19.18 53.45/29.77/18.74
M=2 54.62/31.43/20.08 54.13/30.40/19.42 53.74/30.13/19.11 54.43/30.95/19.84
M=4 54.44/30.76/19.36 54.05/30.06/19.29 54.10/29.83/18.54 54.03/29.64/18.87
M=8 53.96/29.73/18.95 54.27/30.69/19.51 53.90/30.33/19.41 54.13/30.36/19.38
M=16 53.97/29.71/19.19 54.35/30.62/19.17 53.73/30.09/18.94 53.76/29.66/18.66

作業環境

mdx 1GPU (NVIDIA A100-SXM4-40GB)

/bin/shスクリプト

#! /bin/sh
URL=https://github.com/UniversalDependencies/UD_Chinese-GSD
D=`basename $URL`
test -d $D || git clone --depth=1 $URL
for F in train dev test
do nawk -F'\t' '{OFS=FS;if(NF==10)$6="_";print}' $D/*-$F*.conllu > $F.conllu
   sed -n 's/^# text = //p' $F.conllu > $F.txt
done
S='{if(NF==10&&$1~/^[1-9][0-9]*$/)printf($1>1?" %s":"%s",$2);if(NF==0)print}'
nawk -F'\t' "$S" $D/*-train.conllu > token.txt
U=http://universaldependencies.org/conll18/conll18_ud_eval.py
C=`basename $U`
test -f $C || curl -LO $U
for M in 1 2 4 8 16
do for V in 4000 8000 16000 32000
   do test -d roberta$M-$V || python3 -c m,v=$M,$V'
from transformers import (RemBertTokenizerFast,RobertaConfig,RobertaForMaskedLM,
  DataCollatorForLanguageModeling,TrainingArguments,Trainer)
from tokenizers import (Tokenizer,models,pre_tokenizers,normalizers,processors,
  decoders,trainers)
s=["[CLS]","[PAD]","[SEP]","[UNK]","[MASK]"]
spt=Tokenizer(models.Unigram())
spt.pre_tokenizer=pre_tokenizers.Whitespace()
spt.normalizer=normalizers.Sequence([normalizers.Nmt(),normalizers.NFKC()])
spt.post_processor=processors.TemplateProcessing(single="[CLS] $A [SEP]",
  pair="[CLS] $A [SEP] $B:1 [SEP]:1",special_tokens=[("[CLS]",0),("[SEP]",2)])
spt.decoder=decoders.WordPiece(prefix="",cleanup=True)
spt.train(trainer=trainers.UnigramTrainer(vocab_size=v,max_piece_length=m,
  special_tokens=s,unk_token="[UNK]",n_sub_iterations=2),files=["token.txt"])
spt.save("tokenizer.json")
tkz=RemBertTokenizerFast(tokenizer_file="tokenizer.json",vocab_file="/dev/null",
  do_lower_case=False,keep_accents=True,bos_token="[CLS]",cls_token="[CLS]",
  pad_token="[PAD]",sep_token="[SEP]",unk_token="[UNK]",mask_token="[MASK]",
  model_max_length=512)
t=tkz.convert_tokens_to_ids(s)
cfg=RobertaConfig(hidden_size=768,num_hidden_layers=12,num_attention_heads=12,
  intermediate_size=3072,max_position_embeddings=tkz.model_max_length,
  vocab_size=len(tkz),tokenizer_class=type(tkz).__name__,
  bos_token_id=t[0],pad_token_id=t[1],eos_token_id=t[2])
arg=TrainingArguments(num_train_epochs=8,per_device_train_batch_size=64,
  output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2)
class ReadLineDataset(object):
  def __init__(self,file,tokenizer):
    self.tokenizer=tokenizer
    with open(file,"r",encoding="utf-8") as r:
      self.lines=[s.strip() for s in r if s.strip()!=""]
  __len__=lambda self:len(self.lines)
  __getitem__=lambda self,i:self.tokenizer(self.lines[i],truncation=True,
    add_special_tokens=True,max_length=self.tokenizer.model_max_length-2)
trn=Trainer(args=arg,data_collator=DataCollatorForLanguageModeling(tkz),
  model=RobertaForMaskedLM(cfg),train_dataset=ReadLineDataset("train.txt",tkz))
trn.train()
trn.save_model("roberta{}-{}".format(m,v))
tkz.save_pretrained("roberta{}-{}".format(m,v))'
      test -d upos$M-$V || python3 -m esupar.train roberta$M-$V upos$M-$V .
      test -f result$M-$V/result && continue
      mkdir -p result$M-$V
      for F in dev test
      do cat $F.txt | python3 -c 'mdl,f="upos'$M-$V'","result'$M-$V/$F'.conllu"
import esupar
nlp=esupar.load(mdl)
with open(f,"w",encoding="utf-8") as w:
  while True:
    try:
      doc=nlp(input().strip())
    except:
      quit()
    print(doc,file=w)'
      done
      ( echo '***' upos$M-$V dev
        python3 $C -v dev.conllu result$M-$V/dev.conllu
        echo '***' upos$M-$V test
        python3 $C -v test.conllu result$M-$V/test.conllu
      ) | tee result$M-$V/result
   done
done