Datasets:

Languages:
Chinese
Tags:
Not-For-All-Audiences
License:
Limour commited on
Commit
60998f4
·
verified ·
1 Parent(s): 5251dfd

Upload 3 files

Browse files
v-corpus-zh/橘子班/高考恋爱100天/0.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f82b72f1ec279163ac40c35b55a1f880edfff6c129b318ec626a2e26db0f143
3
+ size 430060
v-corpus-zh/橘子班/高考恋爱100天/extractdata.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # https://github.com/gumblex/extract-gk100d
5
+
6
+ # conda create -n gk100d conda-forge::python
7
+ # conda activate gk100d
8
+ # pip install python-magic-bin
9
+ # pip install python-magic
10
+ # pip show python-magic
11
+
12
+ # envs\gk100d\Lib\site-packages\magic
13
+ # 修改 `loader.py` ,在 `def _lib_candidates():` 中添加
14
+ # ```python
15
+ # def _lib_candidates():
16
+ # yield r'C:\Users\limou\miniconda3\envs\gk100d\Lib\site-packages\magic\libmagic\libmagic.dll'
17
+ # ```
18
+
19
+ # python extractdata.py -e data.xp3 output/
20
+ # conda remove -n gk100d --all
21
+
22
+ import os
23
+ import io
24
+ import sys
25
+ import zlib
26
+ import struct
27
+ import shutil
28
+ import itertools
29
+ import mimetypes
30
+ import collections
31
+
32
+ xor = lambda t, k: bytes(x^y for x,y in zip(t, itertools.cycle(k)))
33
+
34
+ def assert_string(infile, value):
35
+ assert infile.read(len(value)) == value
36
+
37
+
38
+ class AttrDict(dict):
39
+ def __init__(self, *args, **kwargs):
40
+ super(AttrDict, self).__init__(*args, **kwargs)
41
+ self.__dict__ = self
42
+
43
+
44
+ SegmChunk = collections.namedtuple('SegmChunk', 'flags offset size size_comp')
45
+
46
+ MAGICS = (
47
+ ('ogg', b'\x4f\x67\x67\x53', b'\x00\x02\x00\x00\x00\x00\x00\x00'),
48
+ ('wmv', b'\x30\x26\xB2\x75', b'\x8E\x66\xCF\x11\xA6\xD9\x00\xAA'),
49
+ ('tjs', b'\xFF\xFE\x76\x00', b'\x61\x00\x72\x00\x20\x00\x61\x00'),
50
+ )
51
+
52
+ class Xp3File:
53
+ MAGIC = b'XP3\r\n\x20\x0A\x1A\x8B\x67\x01'
54
+ KEYHEAD = b'\x0c\xf0\x04a\x00JB\x00'
55
+
56
+ def __init__(self, filename):
57
+ self.filename = filename
58
+ self.fp = open(self.filename, 'rb')
59
+ self.table = []
60
+ assert_string(self.fp, self.MAGIC)
61
+ self.fp.seek(19)
62
+ self.version = 2 if self._read_int(4) == 1 else 1
63
+ self.fp.seek(len(self.MAGIC))
64
+ if self.version == 2:
65
+ self.additional_header_offset = self._read_int(8)
66
+ self.minor_version = self._read_int(4)
67
+ assert self.minor_version == 1
68
+ self.fp.seek(self.additional_header_offset)
69
+ # 80
70
+ self.flags = self._read_int(1)
71
+ # 00 00 00 00 00 00 00 00
72
+ self.fp.read(8)
73
+ self.table_offset = self._read_int(8)
74
+ else:
75
+ self.table_offset = self._read_int(8)
76
+ self.read_table()
77
+
78
+ def read_table(self):
79
+ self.fp.seek(self.table_offset)
80
+ self.table_compressed = self._read_int(1)
81
+ self.table_size_comp = self._read_int(8)
82
+ if self.table_compressed:
83
+ self.table_size_orig = self._read_int(8)
84
+ else:
85
+ self.table_size_orig = self.table_size_comp
86
+ table_data = self.fp.read(self.table_size_comp)
87
+ if self.table_compressed:
88
+ table_data = zlib.decompress(table_data)
89
+ self.table_raw = table_data
90
+ stream = io.BytesIO(table_data)
91
+ while 1:
92
+ magic = stream.read(4)
93
+ if not magic:
94
+ break
95
+ assert magic == b'File'
96
+ d = AttrDict()
97
+ d.length = self._read_int(8, stream)
98
+ assert_string(stream, b'info')
99
+ d.info_length = self._read_int(8, stream)
100
+ d.flags = self._read_int(4, stream)
101
+ d.size = self._read_int(8, stream)
102
+ d.size_comp = self._read_int(8, stream)
103
+ offset = stream.tell()
104
+ # should be name
105
+ name_length = self._read_int(2, stream)
106
+ if d.info_length == 22 + name_length*2:
107
+ d.name = stream.read(name_length*2).decode('utf-16-le', 'ignore')
108
+ d.name_good = True
109
+ else:
110
+ stream.seek(offset)
111
+ d.name = stream.read(12)
112
+ d.name_good = False
113
+ assert_string(stream, b'segm')
114
+ d.segm_number = self._read_int(8, stream) // 28
115
+ d.segm = []
116
+ for i in range(d.segm_number):
117
+ d.segm.append(SegmChunk(
118
+ self._read_int(4, stream),
119
+ self._read_int(8, stream),
120
+ self._read_int(8, stream),
121
+ self._read_int(8, stream)
122
+ ))
123
+ assert_string(stream, b'adlr')
124
+ # length
125
+ self._read_int(8, stream)
126
+ d.adlr = stream.read(4)
127
+ self.table.append(d)
128
+
129
+ def get(self, num, decrypt=True):
130
+ fileinfo = self.table[num]
131
+ outbuffer = io.BytesIO()
132
+ for segm in fileinfo.segm:
133
+ self.fp.seek(segm.offset)
134
+ data = self.fp.read(segm.size_comp)
135
+ if segm.flags & 7: # compressed
136
+ data = zlib.decompress(data)
137
+ if decrypt:
138
+ data = self.decrypt(fileinfo, data)
139
+ assert len(data) == segm.size
140
+ outbuffer.write(data)
141
+ outbuffer.seek(0)
142
+ return outbuffer
143
+
144
+ def extract(self, out):
145
+ for k, fileinfo in enumerate(self.table, 1):
146
+ outbuffer = self.get(k-1)
147
+ if fileinfo.name_good:
148
+ filename = fileinfo.name
149
+ if len(filename.encode('utf-8')) > 128:
150
+ fn, ext = os.path.splitext(filename)
151
+ filename = fn[:64] + ext
152
+ else:
153
+ ext = self.detect_ext(outbuffer.read(1024))
154
+ filename = ('%04d' % k) + (ext or '.txt')
155
+ outbuffer.seek(0)
156
+ dirname = os.path.dirname(filename)
157
+ os.makedirs(os.path.join(out, dirname), exist_ok=True)
158
+ with open(os.path.join(out, filename), 'wb') as f:
159
+ shutil.copyfileobj(outbuffer, f)
160
+ print('Extracted %s' % filename)
161
+
162
+ def detect_ext(self, data):
163
+ import magic
164
+ try:
165
+ fmagic = magic.detect_from_content(data)
166
+ except AttributeError as e:
167
+ print(e)
168
+ return '.ukn'
169
+ if fmagic.mime_type.startswith('text/'):
170
+ if fmagic.encoding == 'unknown-8bit':
171
+ ext = '.bin'
172
+ else:
173
+ text = data.decode(fmagic.encoding)
174
+ if '@return' in text or '*start' in text or '.ks' in text or '[w]' in text:
175
+ ext = '.ks'
176
+ elif '.tjs' in text or '%[' in text or '];' in text:
177
+ ext = '.tjs'
178
+ else:
179
+ ext = '.txt'
180
+ else:
181
+ ext = mimetypes.guess_extension(fmagic.mime_type)
182
+ if ext == '.jpeg':
183
+ ext = '.jpg'
184
+ elif ext == '.oga':
185
+ ext = '.ogg'
186
+ elif ext == '.asf':
187
+ ext = '.wmv'
188
+ return ext
189
+
190
+ def _read_int(self, size, infile=None, endian='<', signed=False):
191
+ inttypes = {1: 'B', 2: 'H', 4: 'I', 8: 'Q'}
192
+ d = (infile or self.fp).read(size)
193
+ if signed:
194
+ return struct.unpack(endian + inttypes[size].lower(), d)[0]
195
+ else:
196
+ return struct.unpack(endian + inttypes[size], d)[0]
197
+
198
+ def decrypt(self, fileinfo, data):
199
+ return data
200
+
201
+ def close(self):
202
+ self.fp.close()
203
+
204
+ def __del__(self):
205
+ self.fp.close()
206
+
207
+ class EncryptedXp3File(Xp3File):
208
+
209
+ def load_key(self):
210
+ if hasattr(self, 'keyhead'):
211
+ return
212
+ # Gaokao Love 100 Days Disc Ver.
213
+ elif self.table_offset == 1631288384:
214
+ self.keyhead = b'\x0c\xf0\x04a\x00JB\x00'
215
+ else:
216
+ self.keyhead = b'\x1d\xef[\xa3\x00\xcaA\x00'
217
+
218
+ def detect_key(self):
219
+ for k, fileinfo in enumerate(self.table):
220
+ segm = fileinfo.segm[0]
221
+ self.fp.seek(segm.offset)
222
+ if segm.flags & 7: # compressed
223
+ data = self.fp.read(segm.size_comp)
224
+ data = zlib.decompress(data)[:12]
225
+ else:
226
+ data = self.fp.read(12)
227
+ for ftype, head1, head2 in MAGICS:
228
+ if xor(head1, fileinfo.adlr) == data[:4]:
229
+ self.keyhead = xor(head2, data[4:])
230
+ return self.keyhead
231
+
232
+ def decrypt(self, fileinfo, data):
233
+ self.load_key()
234
+ return xor(data, fileinfo.adlr + self.keyhead)
235
+
236
+
237
+ def main():
238
+ args = sys.argv[1:]
239
+ encrypted = False
240
+ if args[0] == '-e':
241
+ encrypted = True
242
+ args.pop(0)
243
+ xp3file = EncryptedXp3File(args[0])
244
+ else:
245
+ xp3file = Xp3File(args[0])
246
+ print('File loaded.')
247
+ xp3file.extract(args[1])
248
+
249
+ if __name__ == '__main__':
250
+ main()
v-corpus-zh/橘子班/高考恋爱100天/gk100d_ks.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('..')
3
+
4
+ from common import \
5
+ get_all_files_in_directory, \
6
+ startsWithAny, getCmdArgsMap, \
7
+ clearT, removeWait, log_process
8
+ # https://huggingface.co/datasets/Limour/archvie/blob/main/v-corpus-zh/common.py
9
+
10
+ # =================
11
+
12
+ a = get_all_files_in_directory(r'E:\galgame\gk100d\ks', ext='.ks')
13
+ b = r'E:\galgame\gk100d\text'
14
+
15
+ # =================
16
+
17
+ sc = {}
18
+ _n = {
19
+ "【木 馨" : "木馨",
20
+ "【? ?" : "?",
21
+ "【我" : "我",
22
+ "【曲 琪" : "曲琪",
23
+ "【乔 伊" : "乔伊",
24
+ "【魏 通" : "魏通",
25
+ "【梅老师" : "梅老师",
26
+ "【母 亲" : "母亲",
27
+ "【罗小涵" : "罗小涵",
28
+ "【父 亲" : "父亲",
29
+ "【西安老师" : "西安老师",
30
+ "【男生 A" : "男生A",
31
+ "【男生 B" : "男生B",
32
+ "【女生 A" : "女生A",
33
+ "【女生A" : "女生A",
34
+ "【老师A" : "老师A",
35
+ "【女生 B" : "女生B",
36
+ "【老 师" : "老师",
37
+ "【店老板" : "店老板",
38
+ "【副团长" : "副团长",
39
+ "【女生 C" : "女生C",
40
+ "【老 鼠" : "老鼠",
41
+ "【团 长" : "团长",
42
+ "【工作人员" : "工作人员",
43
+ "【木馨的母亲" : "木馨的母亲",
44
+ "【校 医" : "校医",
45
+ "【学员 A" : "学员A",
46
+ "【学员 B" : "学员B",
47
+ "【众学员" : "众学员",
48
+ "【教 官" : "教官",
49
+ "【学员 C" : "学员C",
50
+ "【学员 D" : "学员D",
51
+ "【保洁员" : "保洁员",
52
+ "【女 生" : "女生",
53
+ "【小商贩" : "小商贩",
54
+ "【男生A" : "男生A",
55
+ "【路过的学生" : "路过的学生",
56
+ "【演讲者" : "演讲者",
57
+ "【韩状元" : "韩状元",
58
+ "【外校学生" : "外校学生",
59
+ "【高一学生" : "高一学生",
60
+ "【小卖部大叔" : "小卖部大叔",
61
+ "【外校生A" : "外校生A",
62
+ "【外校生B" : "外校生B",
63
+ "【外校生C" : "外校生C",
64
+ "【外校生D" : "外校生D",
65
+ "【服务员" : "服务员",
66
+ "【皇阿玛" : "皇阿玛",
67
+ "【女同学A " : "女同学A",
68
+ "【众 人" : "众人",
69
+ "【男生B" : "男生B",
70
+ "【女生B" : "女生B",
71
+ "【众同学" : "众同学",
72
+ "【同学A" : "同学A",
73
+ "【女生" : "女生",
74
+ "【中年女性" : "中年女性",
75
+ "【老师B " : "老师B",
76
+ "【老师A " : "老师A",
77
+ "【女生A " : "女生A",
78
+ "【女生B " : "女生B",
79
+ "【小涵的母亲" : "小涵的母亲",
80
+ "【旁边的小孩" : "旁边的小孩",
81
+ "【罗小涵\"" : "罗小涵",
82
+ "【男生C" : "男生C",
83
+ "【女生C" : "女生C",
84
+ "【??" : "?",
85
+ "【西安老师\"" : "西安老师",
86
+ "【女生C " : "女生C",
87
+ "【男生A " : "男生A",
88
+ "【男生B " : "男生B",
89
+ "【男生C " : "男生C",
90
+ "【同学B" : "同学B",
91
+ "【流浪汉" : "流浪汉",
92
+ "【清洁工" : "清洁工",
93
+ "【服务生" : "服务生",
94
+ "【木馨" : "木馨",
95
+ "【医 生" : "医生",
96
+ "【护 士" : "护士",
97
+ "【网吧青年" : "网吧青年",
98
+ "【老 板" : "老板",
99
+ "【网吧老板" : "网吧老板",
100
+ "【警 察" : "警察",
101
+ "【书店老板" : "书店老板",
102
+ "【同学 A" : "同学A",
103
+ "【店 员" : "店员",
104
+ "【记 者" : "记者",
105
+ "【警 卫" : "警卫",
106
+ "【父 亲" : "父亲",
107
+ "【工作人员A" : "工作人员A",
108
+ "【工作人员B" : "工作人员B",
109
+ "【工作人员\"" : "工作人员",
110
+ "【肌肉男" : "肌肉男",
111
+ "【推销人员" : "推销人员",
112
+ "【矮个子" : "矮个子",
113
+ "【神秘人" : "神秘人",
114
+ "【志愿者" : "志愿者",
115
+ "【白发老人" : "白发老人",
116
+ "【店 长" : "店长",
117
+ "【队 友" : "队友",
118
+ "【店员A" : "店员A",
119
+ "【店员B" : "店员B",
120
+ "【店长" : "店长",
121
+ "【顾客" : "顾客",
122
+ "【众店员" : "众店员",
123
+ "【木馨的父亲" : "木馨的父亲",
124
+ "【室友A" : "室友A",
125
+ "【室友B" : "室友B",
126
+ "【室友C" : "室友C",
127
+ "【学 长" : "学长"
128
+ }
129
+
130
+ # =================
131
+
132
+ for path in a:
133
+ name: str = path[path.rindex('\\') + 1:]
134
+ name = '0'
135
+ if name not in sc:
136
+ sc[name] = []
137
+ print(name)
138
+
139
+ # =================
140
+ try:
141
+ f = open(path, 'r', encoding='utf-16-le')
142
+ data = list(x.rstrip() for x in f.readlines())
143
+ except UnicodeDecodeError:
144
+ f = open(path, 'r', encoding='gbk')
145
+ data = list(x.rstrip() for x in f.readlines())
146
+ finally:
147
+ f.close()
148
+ # =================
149
+ if any(('{' in line) for line in data):
150
+ continue
151
+ # =================
152
+ data[0] = data[0].lstrip('\ufeff')
153
+ # print(data[0][0])
154
+ def _filter(x: str):
155
+ if not x:
156
+ return False
157
+ if startsWithAny(x, {'*', ';', '[', '//'}):
158
+ return False
159
+ return True
160
+ data = list(filter(_filter, data))
161
+
162
+ # =================
163
+ n = ''
164
+ w_i = -1
165
+ while w_i < len(data) - 1:
166
+ w_i += 1
167
+ line: str = data[w_i]
168
+ # =================
169
+ if line.startswith('@dia'):
170
+ n = ''
171
+ continue
172
+ elif line.startswith('@主角'):
173
+ n = '我'
174
+ continue
175
+ elif line.startswith('@npc'):
176
+ tmp = getCmdArgsMap(line)
177
+ if 'id' in tmp:
178
+ n = tmp['id']
179
+ else:
180
+ print(line)
181
+ continue
182
+ elif line.startswith('@'):
183
+ n = ''
184
+ continue
185
+ if line.isascii():
186
+ continue
187
+ if n:
188
+ d = log_process(f'【{n}】{removeWait(line)}', line, _n)
189
+ else:
190
+ d = log_process(f'{removeWait(line)}', line, _n)
191
+ if d:
192
+ sc[name].append(d)
193
+
194
+ # =================
195
+
196
+ for k, v in sc.items():
197
+ if v:
198
+ with open(b + f'\\{k}.txt', 'w', encoding='utf-8') as f:
199
+ f.write('\n'.join(v))
200
+
201
+ # =================
202
+ import json
203
+ tmp = json.dumps(_n, ensure_ascii=False, indent=4)