|
from opencc import OpenCC |
|
cc = OpenCC('t2s') |
|
from h_corpus import Fileset |
|
import unicodedata |
|
import json,re |
|
|
|
def fullwidth_to_halfwidth(input_str): |
|
return ''.join([unicodedata.normalize('NFKC', char) for char in input_str]) |
|
|
|
def clearT(s): |
|
s = cc.convert(fullwidth_to_halfwidth(s)) |
|
return s.lstrip('n').strip().strip(r'\n').replace('\n','\\n') |
|
|
|
def custom_sort_key(s): |
|
|
|
return [int(x) if x.isdigit() else x for x in re.split('([0-9]+)', s)] |
|
|
|
_n = { |
|
"mob": "mob", |
|
"emi": "绘美", |
|
"kanna": "栞奈", |
|
"jin": "萧然", |
|
"sakura": "佐仓", |
|
"kou": "小墨", |
|
"mama": "妈妈", |
|
"ryopa": "景怡", |
|
"ryoma": "思秋", |
|
"sen": "文茜", |
|
"chimob": "chimob", |
|
"ryou": "和平", |
|
"sanae": "早苗", |
|
"higuchi": "樋口", |
|
"emimama": "emimama" |
|
} |
|
|
|
|
|
|
|
|
|
a = Fileset(r'E:\tmp\ChristmasTina\chapter\MonoBehaviour', ext='.json') |
|
sorted_indices = sorted(range(len(a)), key=lambda i: custom_sort_key(a[i])) |
|
a = [a[i] for i in sorted_indices] |
|
b = r'D:\datasets\tmp' |
|
sc = {} |
|
for i in range(len(a)): |
|
path = a[i] |
|
name = path[path.rindex('\\')+1:path.rindex('.')] |
|
print(name) |
|
if 'DLC01' in name or 'DLC04' in name: |
|
name = name.rstrip('.book') |
|
else: |
|
name = name.rstrip('.book0123456789') |
|
if name not in sc: |
|
sc[name] = [] |
|
with open(path, encoding='utf-8') as f: |
|
tmp = json.load(f) |
|
tmp = tmp['importGridList'] |
|
for rows in tmp: |
|
rows = rows['rows'] |
|
args = rows[0]['strings'] |
|
rows = rows[1:] |
|
idx_t = args.index('Text') |
|
idx_v = args.index('Voice') |
|
for row in rows: |
|
row = row['strings'] |
|
if len(row) < idx_t + 1: |
|
continue |
|
else: |
|
d = clearT(row[idx_t]) |
|
if len(row) < idx_v + 1 or not row[idx_v]: |
|
n = '旁白' |
|
else: |
|
n = row[idx_v] |
|
if 'emimama' in n: |
|
n = 'emimama' |
|
else: |
|
if n.startswith('DLC'): |
|
n = n[6:] |
|
n = n[:n.rindex('/')].rstrip('0123456789') |
|
|
|
n = _n[n] |
|
if d: |
|
sc[name].append(n + ':' + d) |
|
|
|
for k, v in sc.items(): |
|
with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f: |
|
f.write('\n'.join(v)) |
|
|
|
tmp = json.dumps(_n, ensure_ascii=False, indent=2) |