File size: 2,680 Bytes
97c46f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
from __future__ import annotations
from typing import NamedTuple
import MeCab
from transformers import PreTrainedTokenizer
class MeCabResult(NamedTuple):
"""MeCab解析結果の型
"""
hyosokei: str
hinshi: str
hinshi_saibunrui_1: str
hinshi_saibunrui_2: str
hinshi_saibunrui_3: str
katsuyokei_1: str
katsuyokei_2: str
genkei: str
yomi: str
hatsuon: str
class MeCabTokenizer(PreTrainedTokenizer):
def __init__(self,
hinshi: list[str] | None = None,
mecab_dicdir: str | None = None,
**kwargs):
"""初期化処理
Args:
hinshi (list[str] | None): 抽出する品詞
mecab_dicdir (str | None, optional): dicrcのあるディレクトリ
"""
self.target_hinshi = hinshi
if mecab_dicdir is not None:
self.mecab = MeCab.Tagger(f"-d {mecab_dicdir}")
else:
self.mecab = MeCab.Tagger()
super().__init__(**kwargs)
def _tokenize(self, text: str) -> list[str]:
"""文章から特定の品詞の単語を返します。
Args:
text (str): 文章
Returns:
list[str]: 特定の品詞の単語
"""
out = []
# Mecabで分析します。
result_words = self.mecab_analyze(text)
for result_word in result_words:
# 最初と最後は空文字
if result_word.hyosokei == "":
continue
if self.target_hinshi is not None:
if result_word.hinshi in self.target_hinshi:
# 特定の品詞のみ返します。
out.append(result_word.hyosokei)
else:
continue
else:
out.append(result_word.hyosokei)
return out
def mecab_analyze(self, text: str) -> list[MeCabResult]:
"""文章をMecabで分析します。
Args:
text (str): 文章
Returns:
list[MeCabResult]: MeCabの解析結果
"""
node = self.mecab.parseToNode(text)
#形態素1つ1つを処理
out = []
while node:
args = []
args.append(node.surface)
feature = node.feature.split(",")
args.extend(feature)
mecab_result = MeCabResult(args[0], args[1], args[2], args[3],
args[4], args[5], args[6], args[7],
args[8], args[9])
out.append(mecab_result)
node = node.next # 最後のEOSを省く
return out
|