Datasets:

Languages:
Chinese
Tags:
Not-For-All-Audiences
License:
Limour commited on
Commit
7f4f0fd
·
verified ·
1 Parent(s): 0dccccc

Upload 7 files

Browse files
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/CCA.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8205972a0731dfb680f76d2c75d16a029e94c2511bd3770550074cf6efe96723
3
+ size 108458
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/CCB.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f8da3a449eee840f7388480a1fc69db5c216a93126575d74716e7675754e997
3
+ size 153805
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/CCC.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e5ba0fc8fc0fab0124888c7f0f739c46dd09106d50cbcea8dac092c3cc1df88
3
+ size 223157
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/CCD.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8909ec9caec4a5cc57645ce0ec42672dbe868c11c0408e2fb1a4a9ff0a279c41
3
+ size 184053
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/CCE.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf370f4e5a76066d44159b7fbf6ba027e6dbfee8064e06893975048cf42437ab
3
+ size 10654
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/CCX.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6f8542d225777d07897c73141396301a71df8aefb3fe21f5646f0aaf2ef3c72
3
+ size 19504
v-corpus-zh/FlyingShine/C&C/CROSS_CHANNEL/c_c.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ import unicodedata
5
+ from opencc import OpenCC
6
+
7
+
8
+ def get_all_files_in_directory(directory, ext=''):
9
+ custom_sort_key_re = re.compile('([0-9]+)')
10
+
11
+ def custom_sort_key(s):
12
+ # 将字符串中的数字部分转换为整数,然后进行排序
13
+ return [int(x) if x.isdigit() else x for x in custom_sort_key_re.split(s)]
14
+
15
+ all_files = []
16
+ for root, dirs, files in os.walk(directory):
17
+ for file in files:
18
+ if file.endswith(ext):
19
+ file_path = os.path.join(root, file)
20
+ all_files.append(file_path)
21
+ return sorted(all_files, key=custom_sort_key)
22
+
23
+
24
+ def clearT():
25
+ def full2half(input_str):
26
+ return ''.join([unicodedata.normalize('NFKC', char) for char in input_str])
27
+
28
+ cc = OpenCC('t2s') # 't2s'表示繁体转简体
29
+
30
+ def _clearT(s):
31
+ s = cc.convert(full2half(s))
32
+ return s.lstrip('n').strip().strip(r'\n').replace('\n', '\\n')
33
+
34
+ return _clearT
35
+
36
+
37
+ clearT = clearT()
38
+
39
+ a = get_all_files_in_directory(r'E:\tmp\cross-channel\TXT', ext='.TXT')
40
+
41
+ _n = {
42
+ "少女": "少女",
43
+ "太一": "太一",
44
+ "冬子": "冬子",
45
+ "社长": "社长",
46
+ "七香": "七香",
47
+ "见里": "见里",
48
+ "友贵": "友贵",
49
+ "美幸": "美幸",
50
+ "重藏": "重藏",
51
+ "美希": "美希",
52
+ "少年": "少年",
53
+ "我": "我",
54
+ "樱庭": "樱庭",
55
+ "游纱": "游纱",
56
+ "雾": "雾",
57
+ "雾·太一": "雾&太一",
58
+ "新川": "新川",
59
+ "一个声音": "声音",
60
+ "阿姨": "阿姨",
61
+ "岳母": "岳母",
62
+ "二人": "二人",
63
+ "女声": "女声",
64
+ "老控手道大师": "老控手道大师",
65
+ "冬子·见里·美希·友贵": "冬子&见里&美希&友贵",
66
+ "曜子": "曜子",
67
+ "三人": "三人",
68
+ "*": "?",
69
+ "**": "?",
70
+ "声音": "声音",
71
+ "曜子老师": "曜子老师",
72
+ "丰": "丰",
73
+ "两人": "两人",
74
+ "友贵的尸体": "友贵的尸体",
75
+ "肚子里的虫": "肚子里的虫",
76
+ "政宗": "政宗"
77
+ }
78
+
79
+ b = r'D:\datasets\tmp'
80
+ sc = {}
81
+
82
+ re_n = re.compile(r'^\[(.+?)]([“‘])')
83
+
84
+ for path in a:
85
+ idx = path.index('CC')
86
+ name = path[idx:idx+3]
87
+ if name not in sc:
88
+ sc[name] = []
89
+ print(name)
90
+ with open(path, 'r') as f:
91
+ for line in f:
92
+ line = line.strip()
93
+ n = re_n.findall(line)
94
+ if len(n) < 1:
95
+ n = '旁白'
96
+ else:
97
+ n = n[0]
98
+ idx = line.index(n[1])
99
+ n = n[0]
100
+ # if n not in _n:
101
+ # _n[n] = n
102
+ n = _n[n]
103
+ line = line[idx:]
104
+ sc[name].append(n + ':' + clearT(line))
105
+
106
+ for k, v in sc.items():
107
+ with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f:
108
+ f.write('\n'.join(v))
109
+
110
+ tmp = json.dumps(_n, ensure_ascii=False, indent=2)