Datasets:

Languages:
Chinese
Tags:
Not-For-All-Audiences
License:
Limour commited on
Commit
ba6ef87
·
verified ·
1 Parent(s): 0a7d63e

可塑性记忆 @ chenjunan02

Browse files
v-corpus-zh/5pb/可塑性记忆/0.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:943efcbc47161d6c3e8ac2848b26cb4adb355ae789081259fcd925ad99e50f20
3
+ size 421864
v-corpus-zh/5pb/可塑性记忆/可塑性记忆_scn.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/UlyssesWu/FreeMote/issues/142
2
+ # https://www.cnblogs.com/woqux02/p/13615513.html
3
+
4
+ import sys
5
+ sys.path.append('..')
6
+
7
+ from common import clearT, get_all_files_in_directory
8
+ # https://huggingface.co/datasets/Limour/archvie/blob/main/v-corpus-zh/common.py
9
+
10
+ # =================
11
+
12
+ a = get_all_files_in_directory(r'F:\galgame\可塑性记忆\test\scenario', ext='.txt.scn.m.json')
13
+ b = r'F:\galgame\可塑性记忆\txt'
14
+
15
+ # =================
16
+
17
+ import json
18
+ with open(r'F:\galgame\可塑性记忆\test\scenario\scenelist.scn.m.json', 'r', encoding='utf-8') as f:
19
+ tmp = json.load(f)
20
+ tmp = [(k,v) for k,v in tmp['map'].items()]
21
+ tmp.sort(key=lambda x:x[1])
22
+ s_a = []
23
+ for _path in tmp:
24
+ for i,path in enumerate(a):
25
+ if path.find(_path[0]) > 0:
26
+ s_a.append(a.pop(i))
27
+ continue
28
+ a = s_a
29
+ # =================
30
+
31
+ sc = {}
32
+ _n = {
33
+ "アイラ" : "艾拉",
34
+ "水柿司" : "水柿司",
35
+ "受付嬢" : "收营员",
36
+ "水柿司" : "水柿司",
37
+ "山野边" : "山野边",
38
+ "香月" : "香月",
39
+ "康斯坦斯" : "康斯坦斯",
40
+ "夏莉" : "夏莉",
41
+ "扎克" : "扎克",
42
+ "连也" : "连也",
43
+ "满" : "满",
44
+ "艾拉" : "艾拉",
45
+ "岩井女士" : "岩井女士",
46
+ "岩井先生" : "岩井先生",
47
+ "爱德华" : "爱德华",
48
+ "千津" : "千津",
49
+ "米兹克" : "米兹克",
50
+ "妮娜" : "妮娜",
51
+ "莲也" : "莲也",
52
+ "康隆" : "康隆",
53
+ "雪莉" : "雪莉",
54
+ "柳裕" : "柳裕",
55
+ "艾露" : "艾露",
56
+ "莲" : "莲",
57
+ "店員" : "店员",
58
+ "香草店的店员" : "香草店的店员",
59
+ " " : "?",
60
+ "玛莎" : "玛莎",
61
+ "桑太" : "桑太",
62
+ " " : "?",
63
+ "警備員A" : "警察A",
64
+ "東雲" : "东云",
65
+ "美娜可" : "美娜可",
66
+ "安迪" : "安迪",
67
+ "莉合" : "莉合",
68
+ "null" : "旁白",
69
+ "莎拉" : "莎拉",
70
+ "安东尼奥" : "安东尼奥",
71
+ "黑手党男" : "黑手党男",
72
+ "少女" : "少女",
73
+ "少年" : "少年",
74
+ "女仆" : "女仆",
75
+ "短发少年" : "短发少年",
76
+ "高个子少年" : "高个子少年",
77
+ "伍堂" : "伍堂",
78
+ "萌葱博士" : "萌葱博士",
79
+ "男性社員" : "男社员",
80
+ "馆内放送" : "馆内广播",
81
+ "服务员" : "服务员",
82
+ "妇人A" : "妇人A",
83
+ "妇人B" : "妇人B",
84
+ "妇人C" : "妇人C",
85
+ "兔子先生" : "兔子先生",
86
+ "企鹅饲养员" : "企鹅饲养员",
87
+ "海象饲养员" : "海象饲养员",
88
+ "咖啡店员" : "咖啡店员",
89
+ "幸子" : "幸子",
90
+ "辛子" : "幸子",
91
+ "蒂姆" : "蒂姆",
92
+ "女主" : "女主",
93
+ "主人公" : "主人公",
94
+ "须美" : "须美",
95
+ "タツル" : "水柿苏",
96
+ "切尔西" : "切尔西",
97
+ "广播" : "广播",
98
+ "司" : "司",
99
+ "有雀斑的女孩" : "有雀斑的女孩",
100
+ "佐堂" : "佐堂"
101
+ }
102
+
103
+ # =================
104
+ import json
105
+
106
+ for path in a:
107
+ name = path[path.rindex('\\') + 1:]
108
+ name = '0'
109
+ if name not in sc:
110
+ sc[name] = []
111
+ print(name)
112
+ # =================
113
+
114
+ with open(path, 'r', encoding='utf-8') as json_file:
115
+ data = json.load(json_file)
116
+
117
+ # =================
118
+ for texts in data['scenes']:
119
+ try:
120
+ for texts in texts['texts']:
121
+ # print(texts)
122
+ # continue
123
+ # texts = texts[2][1]
124
+ # ['雪莉', None, '听说恩比对康隆厌烦了,提出了调动申请。',
125
+ n = texts[0]
126
+ if not n:
127
+ n = '旁白'
128
+ else:
129
+ if n in _n:
130
+ n = _n[n]
131
+ else:
132
+ _n[n] = clearT(n).replace('・', '&')
133
+ n = _n[n]
134
+ print(texts, n)
135
+
136
+ # =================
137
+ d = clearT(texts[2])
138
+ if d:
139
+ sc[name].append(n + ':' + d)
140
+
141
+ except KeyError:
142
+ if type(texts) is not dict:
143
+ print(texts)
144
+
145
+
146
+ # =================
147
+
148
+ for k, v in sc.items():
149
+ if v:
150
+ with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f:
151
+ f.write('\n'.join(v))
152
+
153
+ # =================
154
+ import json
155
+ tmp = json.dumps(_n, ensure_ascii=False, indent=4)