Datasets:

Languages:
Chinese
Tags:
Not-For-All-Audiences
License:
Limour commited on
Commit
95c569c
·
verified ·
1 Parent(s): d82f74c

Upload 2 files

Browse files
v-corpus-zh/IdleFancy/幼性反应/1.txt.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ad66891e39beaa8769f86815d3e12926306e8879eaa6fbf0360a5b9304b7a15
3
+ size 18352
v-corpus-zh/IdleFancy/幼性反应/幼性反应_ks.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_all_files_in_directory(directory, ext=''):
2
+ import os
3
+ import re
4
+ custom_sort_key_re = re.compile('([0-9]+)')
5
+
6
+ def custom_sort_key(s):
7
+ # 将字符串中的数字部分转换为整数,然后进行排序
8
+ return [int(x) if x.isdigit() else x for x in custom_sort_key_re.split(s)]
9
+
10
+ all_files = []
11
+ for root, dirs, files in os.walk(directory):
12
+ for file in files:
13
+ if file.endswith(ext):
14
+ file_path = os.path.join(root, file)
15
+ all_files.append(file_path)
16
+ return sorted(all_files, key=custom_sort_key)
17
+
18
+
19
+ def clearT():
20
+ import unicodedata
21
+ from opencc import OpenCC
22
+
23
+ def full2half(input_str):
24
+ return ''.join([unicodedata.normalize('NFKC', char) for char in input_str])
25
+
26
+ cc = OpenCC('t2s') # 't2s'表示繁体转简体
27
+
28
+ def _clearT(s):
29
+ s = cc.convert(full2half(s))
30
+ return s.lstrip('n').strip().strip(r'\n').replace('\n', '\\n')
31
+
32
+ return _clearT
33
+
34
+
35
+ clearT = clearT()
36
+
37
+
38
+ def startsWithAny(s: str, keys):
39
+ for x in keys:
40
+ if s.startswith(x):
41
+ return x
42
+ else:
43
+ return False
44
+
45
+
46
+ def startsWithAlnum(s: str, _chrs):
47
+ retn = ''
48
+ for char in s:
49
+ if (char.isalnum()) or (char in _chrs):
50
+ retn += char
51
+ else:
52
+ break
53
+ return retn
54
+
55
+
56
+ def listRfind_idx(_i, _data, _condition):
57
+ for j in range(_i - 1, -1, -1):
58
+ if _condition(_data[j]):
59
+ return j
60
+ return -1
61
+
62
+
63
+ def listRfind(_i, _data, _condition, _filter, delta=1):
64
+ return list(filter(_filter, _data[max(listRfind_idx(_i, _data, _condition) + delta, 0):_i + 1]))
65
+
66
+
67
+ def startsWithCmd(s: str, _chrs=None):
68
+ if _chrs is None:
69
+ _chrs = {'_', '@'}
70
+ cmd = startsWithAlnum(s, _chrs)
71
+ if cmd:
72
+ return s.startswith(cmd + ' ')
73
+ else:
74
+ return False
75
+
76
+
77
+ # =================
78
+
79
+ a = get_all_files_in_directory(r'E:\tmp\幼性反应', ext='.txt')
80
+ b = r'D:\datasets\tmp'
81
+
82
+ # =================
83
+
84
+ sc = {}
85
+
86
+ _n = {}
87
+
88
+ # =================
89
+ for path in a:
90
+ name = path[path.rindex('\\'):]
91
+ if name not in sc:
92
+ sc[name] = []
93
+ print(name)
94
+ # =================
95
+
96
+ with open(path, 'r', encoding='utf-8') as f:
97
+ data = list(filter(lambda x: not x.startswith(';'), (x.rstrip() for x in f.readlines())))
98
+
99
+
100
+ csel = {}
101
+ w_i = -1
102
+ while w_i < len(data) - 1:
103
+ w_i += 1
104
+ line = data[w_i]
105
+ if line.startswith('select'):
106
+ while w_i < len(data) - 1:
107
+ w_i += 1
108
+ line = data[w_i]
109
+ if not line:
110
+ break
111
+ line = line.split(',*')
112
+ assert len(line) == 2
113
+ csel[line[1].rstrip(',')] = '选择:' + clearT(line[0].strip('"')[1:])
114
+ # print(csel)
115
+ continue
116
+ if line.startswith('*') and line.isascii():
117
+ line = line[1:]
118
+ if line in csel:
119
+ sc[name].append('旁白' + ':' + csel[line])
120
+ continue
121
+ if not line.endswith('\\'):
122
+ continue
123
+ tmp = listRfind(w_i, data,
124
+ lambda x: x.startswith('dwave ') or x.endswith('\\') or not x,
125
+ lambda x: (x.startswith('dwave ') or not startsWithCmd(x)) and not(x.startswith('bgm')),
126
+ delta=0)
127
+ if (len(tmp) > 1 and tmp[0].endswith('\\')) or not tmp[0]:
128
+ tmp = tmp[1:]
129
+ if not tmp:
130
+ continue
131
+ if tmp[0].startswith('dwave '):
132
+ n = '由佳'
133
+ tmp = tmp[1:]
134
+ assert tmp
135
+ else:
136
+ n = '旁白'
137
+ d = '\\n'.join(tmp)
138
+ d = clearT(d)
139
+ if d:
140
+ sc[name].append(n + ':' + d)
141
+
142
+ # =================
143
+
144
+ for k, v in sc.items():
145
+ with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f:
146
+ f.write('\n'.join(v))
147
+
148
+ # =================
149
+ import json
150
+
151
+ tmp = json.dumps(_n, ensure_ascii=False, indent=2)