Datasets:

Languages:
Chinese
Tags:
Not-For-All-Audiences
License:
Limour commited on
Commit
fb46a6a
·
verified ·
1 Parent(s): 79c3bc4

Upload 4 files

Browse files
视觉小说/format/ClassTangerine/TinySnow/END1.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31db2e090eba6d141c9cde226e24ca79d1805fd281e1ec2822050232459d405c
3
+ size 45281
视觉小说/format/ClassTangerine/TinySnow/END2.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c81679eb156d51e102b3b2bb30f83ed383014f41f1798e5a036835d7fbcdd16
3
+ size 33701
视觉小说/format/ClassTangerine/TinySnow/END3.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b0f21cee09f300ae52b131e1c0b8a36db7a818b65a971a3a5773841851e2650
3
+ size 43237
视觉小说/format/ClassTangerine/TinySnow/tmp_tinysnow.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencc import OpenCC
2
+ cc = OpenCC('t2s') # 't2s'表示繁体转简体
3
+ from h_corpus import Fileset
4
+ import unicodedata
5
+ import json,re
6
+
7
+ def fullwidth_to_halfwidth(input_str):
8
+ return ''.join([unicodedata.normalize('NFKC', char) for char in input_str])
9
+
10
+ def clearT(s):
11
+ s = cc.convert(fullwidth_to_halfwidth(s))
12
+ return s.lstrip('n').strip().strip(r'\n').replace('\n','\\n')
13
+
14
+ def custom_sort_key(s):
15
+ # 将字符串中的数字部分转换为整数,然后进行排序
16
+ return [int(x) if x.isdigit() else x for x in re.split('([0-9]+)', s)]
17
+
18
+ _n = {
19
+ "我": "我",
20
+ "??": "?",
21
+ "小茸": "小茸",
22
+ "妹妹": "妹妹",
23
+ "教授": "教授",
24
+ "记者": "记者",
25
+ "记着": "记者",
26
+ "记录": "记者",
27
+ "记者A": "记者A",
28
+ "记者B": "记者B",
29
+ "记者C": "记者C",
30
+ "男研究员A": "男研究员A",
31
+ "女研究员A": "女研究员A",
32
+ "男研究员B": "男研究员B",
33
+ "男研究员C": "男研究员C",
34
+ "女研究员B": "女研究员B",
35
+ "女研究员C": "女研究员C",
36
+ "男研究员D": "男研究员D",
37
+ "记者D": "记者D",
38
+ "警察": "警察",
39
+ "狱警": "狱警",
40
+ "小雪": "小雪",
41
+ "猪": "猪"
42
+ }
43
+
44
+ a = Fileset(r'E:\tmp\TinySnow', ext='.ks')
45
+ sorted_indices = sorted(range(len(a)), key=lambda i: custom_sort_key(a[i]))
46
+ a = [a[i] for i in sorted_indices]
47
+ b = r'D:\datasets\tmp'
48
+ sc = {}
49
+ reg = re.compile(r'\[[^]]+?]')
50
+ for i in range(len(a)):
51
+ path = a[i]
52
+ tmp = path[path.rindex('\\')+1:path.rindex('.')]
53
+ print(tmp)
54
+ if tmp == 'SECTION 01':
55
+ name = 'END1'
56
+ elif tmp == 'SECTION 08':
57
+ name = 'END2'
58
+ elif tmp == 'SECTION 15':
59
+ name = 'END3'
60
+ else:
61
+ pass
62
+ print(name)
63
+ if name not in sc:
64
+ sc[name] = []
65
+ with open(path, encoding='utf-16le') as f:
66
+ for line in f:
67
+ if '[' not in line and ']' not in line:
68
+ continue
69
+ if '【' in line and '】' in line:
70
+ tmp = line.index('】')
71
+ n = line[line.index('【')+1:tmp]
72
+ line = line[tmp+1:]
73
+ # _n[n] = clearT(n)
74
+ n = _n[n]
75
+ else:
76
+ n = '旁白'
77
+ # print(line)
78
+ d = clearT(reg.sub('', line))
79
+ # print(d)
80
+ if d:
81
+ sc[name].append(n + ':' + d)
82
+
83
+ for k, v in sc.items():
84
+ with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f:
85
+ f.write('\n'.join(v))
86
+
87
+ tmp = json.dumps(_n, ensure_ascii=False, indent=2)