Unigramトークナイザの最大トークン長と最大語彙数は係り受け解析に影響するのか

古典中国語UD_Classical_Chinese-KyotoによるRoBERTaモデル

Unigramトークナイザにおける最大トークン長Mと最大語彙数Vが、UPOS/LAS/MLASにどう影響するか調査した。RoBERTaモデルの製作には、lzh_kyoto-ud-train.conlluの各文だけを用いている。

lzh_kyoto-ud-dev.conlluで評価

V=8000V=16000V=32000V=64000
M=1 85.72/72.02/66.48 85.61/72.20/66.54 85.23/72.00/66.15 85.38/71.63/65.87
M=2 85.42/71.81/66.11 85.75/72.29/66.75 85.54/71.96/66.29 84.70/71.37/65.33
M=4 85.88/72.03/66.58 85.69/72.21/66.63 85.08/72.01/65.91 85.75/72.23/66.53
M=8 85.58/72.36/66.76 85.26/72.16/66.36 85.60/72.16/66.51 84.54/71.08/65.04
M=16 85.12/71.74/65.86 84.67/71.24/65.27 85.53/71.65/66.03 85.35/72.05/66.35

lzh_kyoto-ud-test.conlluでテスト

V=8000V=16000V=32000V=64000
M=1 87.13/74.41/68.31 87.18/74.31/68.19 87.18/74.06/68.08 87.31/74.61/68.61
M=2 87.44/74.22/68.37 87.45/74.16/68.29 87.24/74.26/68.25 86.79/74.18/67.81
M=4 87.46/74.40/68.51 87.36/74.34/68.32 87.00/74.26/68.00 87.23/74.71/68.59
M=8 87.27/74.37/68.28 87.16/74.46/68.24 87.42/74.44/68.41 86.40/73.94/67.21
M=16 86.90/74.15/67.81 86.41/74.28/67.78 86.98/74.10/67.86 87.33/74.28/68.23

作業環境

mdx 1GPU (NVIDIA A100-SXM4-40GB)

/bin/shスクリプト

#! /bin/sh
URL=https://github.com/UniversalDependencies/UD_Classical_Chinese-Kyoto
D=`basename $URL`
test -d $D || git clone --depth=1 $URL
for F in train dev test
do nawk -F'\t' '{OFS=FS;if(NF==10)$6="_";print}' $D/*-$F*.conllu > $F.conllu
   sed -n 's/^# text = //p' $F.conllu > $F.txt
done
S='{if(NF==10&&$1~/^[1-9][0-9]*$/)printf($1>1?" %s":"%s",$2);if(NF==0)print}'
nawk -F'\t' "$S" $D/*-train.conllu > token.txt
U=http://universaldependencies.org/conll18/conll18_ud_eval.py
C=`basename $U`
test -f $C || curl -LO $U
for M in 1 2 4 8 16
do for V in 8000 16000 32000 64000
   do test -d roberta$M-$V || python3 -c m,v=$M,$V'
from transformers import (RemBertTokenizerFast,RobertaConfig,RobertaForMaskedLM,
  DataCollatorForLanguageModeling,TrainingArguments,Trainer)
from tokenizers import (Tokenizer,models,pre_tokenizers,normalizers,processors,
  decoders,trainers)
s=["[CLS]","[PAD]","[SEP]","[UNK]","[MASK]"]
spt=Tokenizer(models.Unigram())
spt.pre_tokenizer=pre_tokenizers.Whitespace()
spt.normalizer=normalizers.Sequence([normalizers.Nmt(),normalizers.NFKC()])
spt.post_processor=processors.TemplateProcessing(single="[CLS] $A [SEP]",
  pair="[CLS] $A [SEP] $B:1 [SEP]:1",special_tokens=[("[CLS]",0),("[SEP]",2)])
spt.decoder=decoders.WordPiece(prefix="",cleanup=True)
spt.train(trainer=trainers.UnigramTrainer(vocab_size=v,max_piece_length=m,
  special_tokens=s,unk_token="[UNK]",n_sub_iterations=2),files=["token.txt"])
spt.save("tokenizer.json")
tkz=RemBertTokenizerFast(tokenizer_file="tokenizer.json",vocab_file="/dev/null",
  do_lower_case=False,keep_accents=True,bos_token="[CLS]",cls_token="[CLS]",
  pad_token="[PAD]",sep_token="[SEP]",unk_token="[UNK]",mask_token="[MASK]",
  model_max_length=512)
t=tkz.convert_tokens_to_ids(s)
cfg=RobertaConfig(hidden_size=768,num_hidden_layers=12,num_attention_heads=12,
  intermediate_size=3072,max_position_embeddings=tkz.model_max_length,
  vocab_size=len(tkz),tokenizer_class=type(tkz).__name__,
  bos_token_id=t[0],pad_token_id=t[1],eos_token_id=t[2])
arg=TrainingArguments(num_train_epochs=8,per_device_train_batch_size=64,
  output_dir="/tmp",overwrite_output_dir=True,save_total_limit=2)
class ReadLineDataset(object):
  def __init__(self,file,tokenizer):
    self.tokenizer=tokenizer
    with open(file,"r",encoding="utf-8") as r:
      self.lines=[s.strip() for s in r if s.strip()!=""]
  __len__=lambda self:len(self.lines)
  __getitem__=lambda self,i:self.tokenizer(self.lines[i],truncation=True,
    add_special_tokens=True,max_length=self.tokenizer.model_max_length-2)
trn=Trainer(args=arg,data_collator=DataCollatorForLanguageModeling(tkz),
  model=RobertaForMaskedLM(cfg),train_dataset=ReadLineDataset("train.txt",tkz))
trn.train()
trn.save_model("roberta{}-{}".format(m,v))
tkz.save_pretrained("roberta{}-{}".format(m,v))'
      test -d upos$M-$V || python3 -m esupar.train roberta$M-$V upos$M-$V .
      test -f result$M-$V/result && continue
      mkdir -p result$M-$V
      for F in dev test
      do cat $F.txt | python3 -c 'mdl,f="upos'$M-$V'","result'$M-$V/$F'.conllu"
import esupar
nlp=esupar.load(mdl)
with open(f,"w",encoding="utf-8") as w:
  while True:
    try:
      doc=nlp(input().strip())
    except:
      quit()
    print(doc,file=w)'
      done
      ( echo '***' upos$M-$V dev
        python3 $C -v dev.conllu result$M-$V/dev.conllu
        echo '***' upos$M-$V test
        python3 $C -v test.conllu result$M-$V/test.conllu
      ) | tee result$M-$V/result
   done
done