seablue commited on
Commit
6920232
1 Parent(s): 28c6e02

Upload zero_to_fp32.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. zero_to_fp32.py +592 -0
zero_to_fp32.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dict = torch.load(f, map_location=device)
147
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
+ # and also handle the case where it was already removed by another helper script
149
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
+ state_dicts.append(state_dict)
151
+
152
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
154
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
+
157
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
159
+ # use the max of the partition_count to get the dp world_size.
160
+
161
+ if type(world_size) is list:
162
+ world_size = max(world_size)
163
+
164
+ if world_size != total_files:
165
+ raise ValueError(
166
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
+ )
169
+
170
+ # the groups are named differently in each stage
171
+ if zero_stage <= 2:
172
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
+ elif zero_stage == 3:
174
+ fp32_groups_key = FP32_FLAT_GROUPS
175
+ else:
176
+ raise ValueError(f"unknown zero stage {zero_stage}")
177
+
178
+ if zero_stage <= 2:
179
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
+ elif zero_stage == 3:
181
+ # if there is more than one param group, there will be multiple flattened tensors - one
182
+ # flattened tensor per group - for simplicity merge them into a single tensor
183
+ #
184
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
185
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
+
187
+ fp32_flat_groups = [
188
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
+ ]
190
+
191
+ return zero_stage, world_size, fp32_flat_groups
192
+
193
+
194
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
195
+ """
196
+ Returns fp32 state_dict reconstructed from ds checkpoint
197
+
198
+ Args:
199
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
+
201
+ """
202
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
+
204
+ optim_files = get_optim_files(ds_checkpoint_dir)
205
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
+
208
+ model_files = get_model_state_files(ds_checkpoint_dir)
209
+
210
+ zero_model_states = parse_model_states(model_files)
211
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
+
213
+ if zero_stage <= 2:
214
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
215
+ elif zero_stage == 3:
216
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
217
+
218
+
219
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
220
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
221
+ return
222
+
223
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
224
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
225
+
226
+ if debug:
227
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
228
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
229
+
230
+ wanted_params = len(frozen_param_shapes)
231
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
232
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
233
+ print(f'Frozen params: Have {avail_numel} numels to process.')
234
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
235
+
236
+ total_params = 0
237
+ total_numel = 0
238
+ for name, shape in frozen_param_shapes.items():
239
+ total_params += 1
240
+ unpartitioned_numel = shape.numel()
241
+ total_numel += unpartitioned_numel
242
+
243
+ state_dict[name] = frozen_param_fragments[name]
244
+
245
+ if debug:
246
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
247
+
248
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
249
+
250
+
251
+ def _has_callable(obj, fn):
252
+ attr = getattr(obj, fn, None)
253
+ return callable(attr)
254
+
255
+
256
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
257
+ param_shapes = zero_model_states[0].param_shapes
258
+
259
+ # Reconstruction protocol:
260
+ #
261
+ # XXX: document this
262
+
263
+ if debug:
264
+ for i in range(world_size):
265
+ for j in range(len(fp32_flat_groups[0])):
266
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
267
+
268
+ # XXX: memory usage doubles here (zero2)
269
+ num_param_groups = len(fp32_flat_groups[0])
270
+ merged_single_partition_of_fp32_groups = []
271
+ for i in range(num_param_groups):
272
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
273
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
274
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
275
+ avail_numel = sum(
276
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
277
+
278
+ if debug:
279
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
280
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
281
+ # not asserting if there is a mismatch due to possible padding
282
+ print(f"Have {avail_numel} numels to process.")
283
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
284
+
285
+ # params
286
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
287
+ # out-of-core computing solution
288
+ total_numel = 0
289
+ total_params = 0
290
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
291
+ offset = 0
292
+ avail_numel = full_single_fp32_vector.numel()
293
+ for name, shape in shapes.items():
294
+
295
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
296
+ total_numel += unpartitioned_numel
297
+ total_params += 1
298
+
299
+ if debug:
300
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
301
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
302
+ offset += unpartitioned_numel
303
+
304
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
305
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
306
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
307
+ # live optimizer object, so we are checking that the numbers are within the right range
308
+ align_to = 2 * world_size
309
+
310
+ def zero2_align(x):
311
+ return align_to * math.ceil(x / align_to)
312
+
313
+ if debug:
314
+ print(f"original offset={offset}, avail_numel={avail_numel}")
315
+
316
+ offset = zero2_align(offset)
317
+ avail_numel = zero2_align(avail_numel)
318
+
319
+ if debug:
320
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
321
+
322
+ # Sanity check
323
+ if offset != avail_numel:
324
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
325
+
326
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
327
+
328
+
329
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
330
+ state_dict = OrderedDict()
331
+
332
+ # buffers
333
+ buffers = zero_model_states[0].buffers
334
+ state_dict.update(buffers)
335
+ if debug:
336
+ print(f"added {len(buffers)} buffers")
337
+
338
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
339
+
340
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
341
+
342
+ # recover shared parameters
343
+ for pair in zero_model_states[0].shared_params:
344
+ if pair[1] in state_dict:
345
+ state_dict[pair[0]] = state_dict[pair[1]]
346
+
347
+ return state_dict
348
+
349
+
350
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
351
+ remainder = unpartitioned_numel % world_size
352
+ padding_numel = (world_size - remainder) if remainder else 0
353
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
354
+ return partitioned_numel, padding_numel
355
+
356
+
357
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
358
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
359
+ return
360
+
361
+ if debug:
362
+ for i in range(world_size):
363
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
364
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
365
+
366
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
367
+ wanted_params = len(frozen_param_shapes)
368
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
369
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
370
+ print(f'Frozen params: Have {avail_numel} numels to process.')
371
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
372
+
373
+ total_params = 0
374
+ total_numel = 0
375
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
376
+ total_params += 1
377
+ unpartitioned_numel = shape.numel()
378
+ total_numel += unpartitioned_numel
379
+
380
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
381
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
382
+
383
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
384
+
385
+ if debug:
386
+ print(
387
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
388
+ )
389
+
390
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
391
+
392
+
393
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
394
+ param_shapes = zero_model_states[0].param_shapes
395
+ avail_numel = fp32_flat_groups[0].numel() * world_size
396
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
397
+ # param, re-consolidating each param, while dealing with padding if any
398
+
399
+ # merge list of dicts, preserving order
400
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
401
+
402
+ if debug:
403
+ for i in range(world_size):
404
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
405
+
406
+ wanted_params = len(param_shapes)
407
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
408
+ # not asserting if there is a mismatch due to possible padding
409
+ avail_numel = fp32_flat_groups[0].numel() * world_size
410
+ print(f"Trainable params: Have {avail_numel} numels to process.")
411
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
412
+
413
+ # params
414
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
415
+ # out-of-core computing solution
416
+ offset = 0
417
+ total_numel = 0
418
+ total_params = 0
419
+ for name, shape in param_shapes.items():
420
+
421
+ unpartitioned_numel = shape.numel()
422
+ total_numel += unpartitioned_numel
423
+ total_params += 1
424
+
425
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
426
+
427
+ if debug:
428
+ print(
429
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
430
+ )
431
+
432
+ # XXX: memory usage doubles here
433
+ state_dict[name] = torch.cat(
434
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
435
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
436
+ offset += partitioned_numel
437
+
438
+ offset *= world_size
439
+
440
+ # Sanity check
441
+ if offset != avail_numel:
442
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
443
+
444
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
445
+
446
+
447
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
448
+ state_dict = OrderedDict()
449
+
450
+ # buffers
451
+ buffers = zero_model_states[0].buffers
452
+ state_dict.update(buffers)
453
+ if debug:
454
+ print(f"added {len(buffers)} buffers")
455
+
456
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
457
+
458
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
459
+
460
+ # recover shared parameters
461
+ for pair in zero_model_states[0].shared_params:
462
+ if pair[1] in state_dict:
463
+ state_dict[pair[0]] = state_dict[pair[1]]
464
+
465
+ return state_dict
466
+
467
+
468
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
469
+ """
470
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
471
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
472
+ via a model hub.
473
+
474
+ Args:
475
+ - ``checkpoint_dir``: path to the desired checkpoint folder
476
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
477
+
478
+ Returns:
479
+ - pytorch ``state_dict``
480
+
481
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
482
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
483
+ the checkpoint.
484
+
485
+ A typical usage might be ::
486
+
487
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
488
+ # do the training and checkpoint saving
489
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
490
+ model = model.cpu() # move to cpu
491
+ model.load_state_dict(state_dict)
492
+ # submit to model hub or save the model to share with others
493
+
494
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
495
+ application. i.e. you will need to re-initialize the deepspeed engine, since
496
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
497
+
498
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
499
+
500
+ """
501
+ if tag is None:
502
+ latest_path = os.path.join(checkpoint_dir, 'latest')
503
+ if os.path.isfile(latest_path):
504
+ with open(latest_path, 'r') as fd:
505
+ tag = fd.read().strip()
506
+ else:
507
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
508
+
509
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
510
+
511
+ if not os.path.isdir(ds_checkpoint_dir):
512
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
513
+
514
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
515
+
516
+
517
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
518
+ """
519
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
520
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
521
+
522
+ Args:
523
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
524
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
525
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
526
+ """
527
+
528
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
529
+ print(f"Saving fp32 state dict to {output_file}")
530
+ torch.save(state_dict, output_file)
531
+
532
+
533
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
534
+ """
535
+ 1. Put the provided model to cpu
536
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
537
+ 3. Load it into the provided model
538
+
539
+ Args:
540
+ - ``model``: the model object to update
541
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
542
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
543
+
544
+ Returns:
545
+ - ``model`: modified model
546
+
547
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
548
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
549
+ conveniently placed for you in the checkpoint folder.
550
+
551
+ A typical usage might be ::
552
+
553
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
554
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
555
+ # submit to model hub or save the model to share with others
556
+
557
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
558
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
559
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
560
+
561
+ """
562
+ logger.info(f"Extracting fp32 weights")
563
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
564
+
565
+ logger.info(f"Overwriting model with fp32 weights")
566
+ model = model.cpu()
567
+ model.load_state_dict(state_dict, strict=False)
568
+
569
+ return model
570
+
571
+
572
+ if __name__ == "__main__":
573
+
574
+ parser = argparse.ArgumentParser()
575
+ parser.add_argument("checkpoint_dir",
576
+ type=str,
577
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
578
+ parser.add_argument(
579
+ "output_file",
580
+ type=str,
581
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
582
+ parser.add_argument("-t",
583
+ "--tag",
584
+ type=str,
585
+ default=None,
586
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
587
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
588
+ args = parser.parse_args()
589
+
590
+ debug = args.debug
591
+
592
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)