Datasets:

Languages:
Chinese
Tags:
Not-For-All-Audiences
License:
Limour commited on
Commit
e132358
·
verified ·
1 Parent(s): de487bb

恋狱~月狂病@不化之雪汉化组

Browse files
v-corpus-zh/InnocentGrey/恋狱~月狂病/0.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c535e4e3280e427b9ccfe66f75e29dbae022dd1598688c1ed59e2b9f53fff7c5
3
+ size 456238
v-corpus-zh/InnocentGrey/恋狱~月狂病/a.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6edd359422b747e3872ef917354e9f0af08f33187deb3d9148ebb6b06970daad
3
+ size 63048
v-corpus-zh/InnocentGrey/恋狱~月狂病/恋狱~月狂病.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_all_files_in_directory(directory, ext=''):
2
+ import os
3
+ import re
4
+ custom_sort_key_re = re.compile('([0-9]+)')
5
+
6
+ def custom_sort_key(s):
7
+ # 将字符串中的数字部分转换为整数,然后进行排序
8
+ return [int(x) if x.isdigit() else x for x in custom_sort_key_re.split(s)]
9
+
10
+ all_files = []
11
+ for root, dirs, files in os.walk(directory):
12
+ for file in files:
13
+ if file.endswith(ext):
14
+ file_path = os.path.join(root, file)
15
+ all_files.append(file_path)
16
+ return sorted(all_files, key=custom_sort_key)
17
+
18
+
19
+ def clearT():
20
+ import unicodedata
21
+ from opencc import OpenCC
22
+
23
+ def full2half(input_str):
24
+ return ''.join([unicodedata.normalize('NFKC', char) for char in input_str])
25
+
26
+ cc = OpenCC('t2s') # 't2s'表示繁体转简体
27
+
28
+ def _clearT(s):
29
+ s = cc.convert(full2half(s))
30
+ return s.strip().replace('\n', '\\n')
31
+
32
+ return _clearT
33
+
34
+
35
+ clearT = clearT()
36
+
37
+
38
+ def simpleSplit(_s: str, _sp, _st = 0, _shift=True):
39
+ _idx = _s.index(_sp, _st)
40
+ if _shift:
41
+ return _s[:_idx], _s[_idx+len(_sp):]
42
+ else:
43
+ return _s[:_idx], _s[_idx:]
44
+
45
+
46
+ # =================
47
+
48
+ a = get_all_files_in_directory(r'E:\tmp\恋狱~月狂病\text', ext='')
49
+ b = r'E:\tmp\恋狱~月狂病\c'
50
+
51
+ # =================
52
+
53
+ sc = {}
54
+
55
+ _n = {
56
+ "@秋五": "秋五",
57
+ "@有岛": "有岛",
58
+ "@???": "?",
59
+ "@和菜": "和菜",
60
+ "@庆一郎": "庆一郎",
61
+ "@由良": "由良",
62
+ "@老爹": "老爹",
63
+ "@凛": "凛",
64
+ "@初音": "初音",
65
+ "@乙羽": "乙羽",
66
+ "@七七": "七七",
67
+ "@小雪": "小雪",
68
+ "@冬史": "冬史",
69
+ "@良子": "良子",
70
+ "@男": "男",
71
+ "@大妈": "大妈",
72
+ "@小孩": "小孩",
73
+ "@板长": "板长",
74
+ "@修女": "修女",
75
+ "@警察": "警察",
76
+ "@围观者": "围观者",
77
+ "@八木沼": "八木沼",
78
+ "@雨雀": "雨雀",
79
+ "@厨师长": "厨师长",
80
+ "@领班": "领班",
81
+ "@楼子": "楼子",
82
+ "@赤尾": "赤尾",
83
+ "@信徒": "信徒",
84
+ "@时子": "时子",
85
+ "@信者": "信者",
86
+ "@芹": "芹",
87
+ "@樱": "樱",
88
+ "@警察官": "警察官",
89
+ "@刑事": "刑事",
90
+ "@护士": "护士",
91
+ "@护士小姐": "护士小姐",
92
+ "@薇奥莉塔": "薇奥莉塔",
93
+ "@阿弗列德": "阿弗列德",
94
+ "@乔治": "乔治",
95
+ "@男爵": "男爵",
96
+ "@侍女": "侍女",
97
+ "@少女": "少女"
98
+ }
99
+
100
+ for path in a:
101
+ name = path[path.rindex('\\') + 1:]
102
+ name = name[:1]
103
+
104
+ if name not in sc:
105
+ sc[name] = []
106
+ print(name)
107
+ # =================
108
+
109
+ with open(path, 'r', encoding='utf-8') as f:
110
+ data = list(filter(lambda x: x,
111
+ (x.rstrip() for x in f.readlines())))
112
+
113
+ # =================
114
+ n = '旁白'
115
+ w_i = -1
116
+ prefix = ''
117
+ while w_i < len(data) - 1:
118
+ w_i += 1
119
+ line: str = data[w_i]
120
+ # =================
121
+ if '|' not in line:
122
+ continue
123
+ tmp, line = simpleSplit(line, '|')
124
+ if tmp != prefix:
125
+ prefix = tmp
126
+ continue
127
+ if line.startswith('@'):
128
+ n, line = simpleSplit(line, '@', 1)
129
+ if n in _n:
130
+ n = _n[n]
131
+ else:
132
+ _n[n] = clearT(n.strip('@')).replace('/', '&')
133
+ print(line)
134
+ else:
135
+ n = '旁白'
136
+
137
+ d = clearT(line)
138
+ if d:
139
+ sc[name].append(n + ':' + d)
140
+
141
+ # =================
142
+ for k, v in sc.items():
143
+ if v:
144
+ with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f:
145
+ f.write('\n'.join(v))
146
+
147
+ # =================
148
+ import json
149
+
150
+ tmp = json.dumps(_n, ensure_ascii=False, indent=4)