diff --git "a/huggingface_accelerate.txt" "b/huggingface_accelerate.txt"
new file mode 100644--- /dev/null
+++ "b/huggingface_accelerate.txt"
@@ -0,0 +1,7101 @@
+# File: accelerate-main/manim_animations/big_model_inference/stage_1.py
+from manim import *
+
+class Stage1(Scene):
+
+ def construct(self):
+ mem = Rectangle(height=0.5, width=0.5)
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ cpu_left_col_base = [mem.copy() for i in range(6)]
+ cpu_right_col_base = [mem.copy() for i in range(6)]
+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)
+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)
+ cpu_rects = VGroup(cpu_left_col, cpu_right_col).arrange(RIGHT, buff=0)
+ cpu_text = Text('CPU', font_size=24)
+ cpu = Group(cpu_rects, cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ cpu.move_to([-2.5, -0.5, 0])
+ self.add(cpu)
+ gpu_base = [mem.copy() for i in range(1)]
+ gpu_rect = VGroup(*gpu_base).arrange(UP, buff=0)
+ gpu_text = Text('GPU', font_size=24)
+ gpu = Group(gpu_rect, gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ gpu.align_to(cpu, DOWN)
+ gpu.set_x(gpu.get_x() - 1)
+ self.add(gpu)
+ model_base = [mem.copy() for i in range(6)]
+ model_rect = VGroup(*model_base).arrange(RIGHT, buff=0)
+ model_text = Text('Model', font_size=24)
+ model = Group(model_rect, model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ model.move_to([3, -1.0, 0])
+ self.play(Create(cpu_left_col, run_time=1), Create(cpu_right_col, run_time=1), Create(gpu_rect, run_time=1))
+ step_1 = MarkupText(f"First, an empty model skeleton is loaded\ninto memory without using much RAM.", font_size=24)
+ key = Square(side_length=2.2)
+ key.move_to([-5, 2, 0])
+ key_text = MarkupText(f"Key:\n\n● Empty Model", font_size=18)
+ key_text.move_to([-5, 2.4, 0])
+ step_1.move_to([2, 2, 0])
+ self.play(Write(step_1, run_time=2.5), Write(key_text), Write(key))
+ self.add(model)
+ cpu_targs = []
+ first_animations = []
+ second_animations = []
+ for (i, rect) in enumerate(model_base):
+ cpu_target = Rectangle(height=0.46, width=0.46).set_stroke(width=0.0).set_fill(YELLOW, opacity=0.7)
+ cpu_target.move_to(rect)
+ cpu_target.generate_target()
+ cpu_target.target.height = 0.46 / 4
+ cpu_target.target.width = 0.46 / 3
+ if i == 0:
+ cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.02, direction=UP)
+ cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
+ elif i == 3:
+ cpu_target.target.next_to(cpu_targs[0].target, direction=UP, buff=0.0)
+ else:
+ cpu_target.target.next_to(cpu_targs[i - 1].target, direction=RIGHT, buff=0.0)
+ cpu_targs.append(cpu_target)
+ first_animations.append(rect.animate(run_time=0.5).set_stroke(YELLOW))
+ second_animations.append(MoveToTarget(cpu_target, run_time=1.5))
+ self.play(*first_animations)
+ self.play(*second_animations)
+ self.wait()
+
+# File: accelerate-main/manim_animations/big_model_inference/stage_2.py
+from manim import *
+
+class Stage2(Scene):
+
+ def construct(self):
+ mem = Rectangle(height=0.5, width=0.5)
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ cpu_left_col_base = [mem.copy() for i in range(6)]
+ cpu_right_col_base = [mem.copy() for i in range(6)]
+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)
+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)
+ cpu_rects = VGroup(cpu_left_col, cpu_right_col).arrange(RIGHT, buff=0)
+ cpu_text = Text('CPU', font_size=24)
+ cpu = Group(cpu_rects, cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ cpu.move_to([-2.5, -0.5, 0])
+ self.add(cpu)
+ gpu_base = [mem.copy() for i in range(4)]
+ gpu_rect = VGroup(*gpu_base).arrange(UP, buff=0)
+ gpu_text = Text('GPU', font_size=24)
+ gpu = Group(gpu_rect, gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ gpu.move_to([-1, -1, 0])
+ self.add(gpu)
+ model_base = [mem.copy() for i in range(6)]
+ model_rect = VGroup(*model_base).arrange(RIGHT, buff=0)
+ model_text = Text('Model', font_size=24)
+ model = Group(model_rect, model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ model.move_to([3, -1.0, 0])
+ self.add(model)
+ cpu_targs = []
+ for (i, rect) in enumerate(model_base):
+ rect.set_stroke(YELLOW)
+ cpu_target = Rectangle(height=0.46 / 4, width=0.46 / 3).set_stroke(width=0.0).set_fill(YELLOW, opacity=0.7)
+ if i == 0:
+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.02, direction=UP)
+ cpu_target.set_x(cpu_target.get_x() + 0.1)
+ elif i == 3:
+ cpu_target.next_to(cpu_targs[0], direction=UP, buff=0.0)
+ else:
+ cpu_target.next_to(cpu_targs[i - 1], direction=RIGHT, buff=0.0)
+ self.add(cpu_target)
+ cpu_targs.append(cpu_target)
+ checkpoint_base = [mem.copy() for i in range(6)]
+ checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT, buff=0)
+ checkpoint_text = Text('Loaded Checkpoint', font_size=24)
+ checkpoint = Group(checkpoint_rect, checkpoint_text).arrange(DOWN, aligned_edge=DOWN, buff=0.4)
+ checkpoint.move_to([3, 0.5, 0])
+ key = Square(side_length=2.2)
+ key.move_to([-5, 2, 0])
+ key_text = MarkupText(f"Key:\n\n● Empty Model", font_size=18)
+ key_text.move_to([-5, 2.4, 0])
+ self.add(key_text, key)
+ blue_text = MarkupText(f"● Checkpoint", font_size=18)
+ blue_text.next_to(key_text, DOWN * 2.4, aligned_edge=key_text.get_left())
+ step_2 = MarkupText(f'Next, a second model is loaded into memory,\nwith the weights of a single shard.', font_size=24)
+ step_2.move_to([2, 2, 0])
+ self.play(Write(step_2), Write(blue_text))
+ self.play(Write(checkpoint_text, run_time=1), Create(checkpoint_rect, run_time=1))
+ first_animations = []
+ second_animations = []
+ for (i, rect) in enumerate(checkpoint_base):
+ target = fill.copy().set_fill(BLUE, opacity=0.7)
+ target.move_to(rect)
+ first_animations.append(GrowFromCenter(target, run_time=1))
+ cpu_target = target.copy()
+ cpu_target.generate_target()
+ if i < 5:
+ cpu_target.target.move_to(cpu_left_col_base[i + 1])
+ else:
+ cpu_target.target.move_to(cpu_right_col_base[i - 5])
+ second_animations.append(MoveToTarget(cpu_target, run_time=1.5))
+ self.play(*first_animations)
+ self.play(*second_animations)
+ self.wait()
+
+# File: accelerate-main/manim_animations/big_model_inference/stage_3.py
+from manim import *
+
+class Stage3(Scene):
+
+ def construct(self):
+ mem = Rectangle(height=0.5, width=0.5)
+ meta_mem = Rectangle(height=0.25, width=0.25)
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ cpu_left_col_base = [mem.copy() for i in range(6)]
+ cpu_right_col_base = [mem.copy() for i in range(6)]
+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)
+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)
+ cpu_rects = VGroup(cpu_left_col, cpu_right_col).arrange(RIGHT, buff=0)
+ cpu_text = Text('CPU', font_size=24)
+ cpu = Group(cpu_rects, cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ cpu.move_to([-2.5, -0.5, 0])
+ self.add(cpu)
+ gpu_base = [mem.copy() for i in range(4)]
+ gpu_rect = VGroup(*gpu_base).arrange(UP, buff=0)
+ gpu_text = Text('GPU', font_size=24)
+ gpu = Group(gpu_rect, gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ gpu.move_to([-1, -1, 0])
+ self.add(gpu)
+ model_base = [mem.copy() for i in range(6)]
+ model_rect = VGroup(*model_base).arrange(RIGHT, buff=0)
+ model_text = Text('Model', font_size=24)
+ model = Group(model_rect, model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ model.move_to([3, -1.0, 0])
+ self.add(model)
+ model_arr = []
+ model_cpu_arr = []
+ model_meta_arr = []
+ for (i, rect) in enumerate(model_base):
+ rect.set_stroke(YELLOW)
+ cpu_target = Rectangle(height=0.46 / 4, width=0.46 / 3).set_stroke(width=0.0).set_fill(YELLOW, opacity=0.7)
+ if i == 0:
+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.02, direction=UP)
+ cpu_target.set_x(cpu_target.get_x() + 0.1)
+ elif i == 3:
+ cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.0)
+ else:
+ cpu_target.next_to(model_cpu_arr[i - 1], direction=RIGHT, buff=0.0)
+ self.add(cpu_target)
+ model_cpu_arr.append(cpu_target)
+ self.add(*model_arr, *model_cpu_arr, *model_meta_arr)
+ checkpoint_base = [mem.copy() for i in range(6)]
+ checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT, buff=0)
+ checkpoint_text = Text('Loaded Checkpoint', font_size=24)
+ checkpoint = Group(checkpoint_rect, checkpoint_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ checkpoint.move_to([3, 0.5, 0])
+ self.add(checkpoint)
+ ckpt_arr = []
+ ckpt_cpu_arr = []
+ for (i, rect) in enumerate(checkpoint_base):
+ target = fill.copy().set_fill(BLUE, opacity=0.7)
+ target.move_to(rect)
+ ckpt_arr.append(target)
+ cpu_target = target.copy()
+ if i < 5:
+ cpu_target.move_to(cpu_left_col_base[i + 1])
+ else:
+ cpu_target.move_to(cpu_right_col_base[i - 5])
+ ckpt_cpu_arr.append(cpu_target)
+ self.add(*ckpt_arr, *ckpt_cpu_arr)
+ key = Square(side_length=2.2)
+ key.move_to([-5, 2, 0])
+ key_text = MarkupText(f"Key:\n\n● Empty Model", font_size=18)
+ key_text.move_to([-5, 2.4, 0])
+ self.add(key_text, key)
+ blue_text = MarkupText(f"● Checkpoint", font_size=18)
+ blue_text.next_to(key_text, DOWN * 2.4, aligned_edge=key_text.get_left())
+ self.add(blue_text)
+ step_3 = MarkupText(f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.', font_size=24)
+ step_3.move_to([2, 2, 0])
+ disk_left_col_base = [meta_mem.copy() for i in range(6)]
+ disk_right_col_base = [meta_mem.copy() for i in range(6)]
+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)
+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)
+ disk_rects = VGroup(disk_left_col, disk_right_col).arrange(RIGHT, buff=0)
+ disk_text = Text('Disk', font_size=24)
+ disk = Group(disk_rects, disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ disk.move_to([-4.0, -1.25, 0])
+ self.play(Write(step_3, run_time=3), Write(disk_text, run_time=1), Create(disk_rects, run_time=1))
+ animations = []
+ for (i, rect) in enumerate(ckpt_cpu_arr):
+ target = rect.copy()
+ target.generate_target()
+ target.target.move_to(disk_left_col_base[i]).scale(0.5)
+ animations.append(MoveToTarget(target, run_time=1.5))
+ self.play(*animations)
+ self.play(FadeOut(step_3))
+ step_4 = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.', font_size=24)
+ step_4.move_to([2, 2, 0])
+ self.play(Write(step_4, run_time=3))
+ self.play(FadeOut(checkpoint_rect, checkpoint_text, *ckpt_arr, *ckpt_cpu_arr))
+ self.wait()
+
+# File: accelerate-main/manim_animations/big_model_inference/stage_4.py
+from manim import *
+
+class Stage4(Scene):
+
+ def construct(self):
+ mem = Rectangle(height=0.5, width=0.5)
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ meta_mem = Rectangle(height=0.25, width=0.25)
+ cpu_left_col_base = [mem.copy() for i in range(6)]
+ cpu_right_col_base = [mem.copy() for i in range(6)]
+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)
+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)
+ cpu_rects = VGroup(cpu_left_col, cpu_right_col).arrange(RIGHT, buff=0)
+ cpu_text = Text('CPU', font_size=24)
+ cpu = Group(cpu_rects, cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ cpu.move_to([-2.5, -0.5, 0])
+ self.add(cpu)
+ gpu_base = [mem.copy() for i in range(4)]
+ gpu_rect = VGroup(*gpu_base).arrange(UP, buff=0)
+ gpu_text = Text('GPU', font_size=24)
+ gpu = Group(gpu_rect, gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ gpu.move_to([-1, -1, 0])
+ self.add(gpu)
+ model_base = [mem.copy() for i in range(6)]
+ model_rect = VGroup(*model_base).arrange(RIGHT, buff=0)
+ model_text = Text('Model', font_size=24)
+ model = Group(model_rect, model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ model.move_to([3, -1.0, 0])
+ self.add(model)
+ model_cpu_arr = []
+ model_meta_arr = []
+ for (i, rect) in enumerate(model_base):
+ rect.set_stroke(YELLOW)
+ cpu_target = Rectangle(height=0.46 / 4, width=0.46 / 3).set_stroke(width=0.0).set_fill(YELLOW, opacity=0.7)
+ if i == 0:
+ cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.02, direction=UP)
+ cpu_target.set_x(cpu_target.get_x() + 0.1)
+ elif i == 3:
+ cpu_target.next_to(model_cpu_arr[0], direction=UP, buff=0.0)
+ else:
+ cpu_target.next_to(model_cpu_arr[i - 1], direction=RIGHT, buff=0.0)
+ self.add(cpu_target)
+ model_cpu_arr.append(cpu_target)
+ self.add(*model_cpu_arr, *model_meta_arr)
+ disk_left_col_base = [meta_mem.copy() for i in range(6)]
+ disk_right_col_base = [meta_mem.copy() for i in range(6)]
+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)
+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)
+ disk_rects = VGroup(disk_left_col, disk_right_col).arrange(RIGHT, buff=0)
+ disk_text = Text('Disk', font_size=24)
+ disk = Group(disk_rects, disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ disk.move_to([-4.0, -1.25, 0])
+ self.add(disk_text, disk_rects)
+ cpu_disk_arr = []
+ for i in range(6):
+ target = fill.copy().set_fill(BLUE, opacity=0.8)
+ target.move_to(disk_left_col_base[i]).scale(0.5)
+ cpu_disk_arr.append(target)
+ self.add(*cpu_disk_arr)
+ key = Square(side_length=2.2)
+ key.move_to([-5, 2, 0])
+ key_text = MarkupText(f"Key:\n\n● Empty Model", font_size=18)
+ key_text.move_to([-5, 2.4, 0])
+ self.add(key_text, key)
+ blue_text = MarkupText(f"● Checkpoint", font_size=18)
+ blue_text.next_to(key_text, DOWN * 2.4, aligned_edge=key_text.get_left())
+ self.add(blue_text)
+ step_5 = MarkupText(f'The offloaded weights are all sent to the CPU.', font_size=24)
+ step_5.move_to([2, 2, 0])
+ self.play(Write(step_5, run_time=3))
+ for i in range(6):
+ rect = cpu_disk_arr[i]
+ cp2 = rect.copy().set_fill(BLUE, opacity=0.8).scale(2.0)
+ cp2.generate_target()
+ cp2.target.move_to(model_base[i])
+ if i == 0:
+ rect.set_fill(BLUE, opacity=0.8)
+ rect.generate_target()
+ rect.target.move_to(cpu_left_col_base[0]).scale(2.0)
+ self.remove(*model_meta_arr, *model_cpu_arr)
+ else:
+ rect.generate_target()
+ rect.target.move_to(cpu_left_col_base[i]).scale(2.0)
+ self.play(MoveToTarget(rect), MoveToTarget(cp2), model_base[i].animate.set_stroke(WHITE))
+ self.play(FadeOut(step_5))
+ step_5 = MarkupText(f'Finally, hooks are added to each weight in the model\nto transfer the weights from CPU to GPU\n\t\tand back when needed.', font_size=24)
+ step_5.move_to([2, 2, 0])
+ self.play(Write(step_5, run_time=3))
+ arrows = []
+ animations = []
+ for i in range(6):
+ a = Arrow(start=UP, end=DOWN, color=RED, buff=0.5)
+ a.next_to(model_base[i].get_left(), UP, buff=0.2)
+ arrows.append(a)
+ animations.append(Write(a))
+ self.play(*animations)
+ self.wait()
+
+# File: accelerate-main/manim_animations/big_model_inference/stage_5.py
+from manim import *
+
+class Stage5(Scene):
+
+ def construct(self):
+ mem = Rectangle(height=0.5, width=0.5)
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ meta_mem = Rectangle(height=0.25, width=0.25)
+ cpu_left_col_base = [mem.copy() for i in range(6)]
+ cpu_right_col_base = [mem.copy() for i in range(6)]
+ cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0)
+ cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0)
+ cpu_rects = VGroup(cpu_left_col, cpu_right_col).arrange(RIGHT, buff=0)
+ cpu_text = Text('CPU', font_size=24)
+ cpu = Group(cpu_rects, cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ cpu.move_to([-2.5, -0.5, 0])
+ self.add(cpu)
+ gpu_base = [mem.copy() for i in range(4)]
+ gpu_rect = VGroup(*gpu_base).arrange(UP, buff=0)
+ gpu_text = Text('GPU', font_size=24)
+ gpu = Group(gpu_rect, gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ gpu.move_to([-1, -1, 0])
+ self.add(gpu)
+ model_base = [mem.copy() for i in range(6)]
+ model_rect = VGroup(*model_base).arrange(RIGHT, buff=0)
+ model_text = Text('Model', font_size=24)
+ model = Group(model_rect, model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ model.move_to([3, -1.0, 0])
+ self.add(model)
+ model_arr = []
+ model_cpu_arr = []
+ for (i, rect) in enumerate(model_base):
+ target = fill.copy().set_fill(BLUE, opacity=0.8)
+ target.move_to(rect)
+ model_arr.append(target)
+ cpu_target = Rectangle(height=0.46, width=0.46).set_stroke(width=0.0).set_fill(BLUE, opacity=0.8)
+ cpu_target.move_to(cpu_left_col_base[i])
+ model_cpu_arr.append(cpu_target)
+ self.add(*model_arr, *model_cpu_arr)
+ disk_left_col_base = [meta_mem.copy() for i in range(6)]
+ disk_right_col_base = [meta_mem.copy() for i in range(6)]
+ disk_left_col = VGroup(*disk_left_col_base).arrange(UP, buff=0)
+ disk_right_col = VGroup(*disk_right_col_base).arrange(UP, buff=0)
+ disk_rects = VGroup(disk_left_col, disk_right_col).arrange(RIGHT, buff=0)
+ disk_text = Text('Disk', font_size=24)
+ disk = Group(disk_rects, disk_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ disk.move_to([-4, -1.25, 0])
+ self.add(disk_text, disk_rects)
+ key = Square(side_length=2.2)
+ key.move_to([-5, 2, 0])
+ key_text = MarkupText(f"Key:\n\n● Empty Model", font_size=18)
+ key_text.move_to([-5, 2.4, 0])
+ self.add(key_text, key)
+ blue_text = MarkupText(f"● Checkpoint", font_size=18)
+ blue_text.next_to(key_text, DOWN * 2.4, aligned_edge=key_text.get_left())
+ self.add(blue_text)
+ step_6 = MarkupText(f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.', font_size=24)
+ step_6.move_to([2, 2, 0])
+ self.play(Write(step_6))
+ input = Square(0.3)
+ input.set_fill(RED, opacity=1.0)
+ input.set_stroke(width=0.0)
+ input.next_to(model_base[0], LEFT, buff=0.5)
+ self.play(Write(input))
+ input.generate_target()
+ input.target.next_to(model_arr[0], direction=LEFT, buff=0.02)
+ self.play(MoveToTarget(input))
+ self.play(FadeOut(step_6))
+ a = Arrow(start=UP, end=DOWN, color=RED, buff=0.5)
+ a.next_to(model_arr[0].get_left(), UP, buff=0.2)
+ model_cpu_arr[0].generate_target()
+ model_cpu_arr[0].target.move_to(gpu_rect[0])
+ step_7 = MarkupText(f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.', font_size=24)
+ step_7.move_to([2, 2, 0])
+ self.play(Write(step_7, run_time=3))
+ circ_kwargs = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
+ self.play(Write(a), Circumscribe(model_arr[0], color=ORANGE, **circ_kwargs), Circumscribe(model_cpu_arr[0], color=ORANGE, **circ_kwargs), Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs))
+ self.play(MoveToTarget(model_cpu_arr[0]))
+ a_c = a.copy()
+ for i in range(6):
+ a_c.next_to(model_arr[i].get_right() + 0.02, UP, buff=0.2)
+ input.generate_target()
+ input.target.move_to(model_arr[i].get_right() + 0.02)
+ grp = AnimationGroup(FadeOut(a, run_time=0.5), MoveToTarget(input, run_time=0.5), FadeIn(a_c, run_time=0.5), lag_ratio=0.2)
+ self.play(grp)
+ model_cpu_arr[i].generate_target()
+ model_cpu_arr[i].target.move_to(cpu_left_col_base[i])
+ if i < 5:
+ model_cpu_arr[i + 1].generate_target()
+ model_cpu_arr[i + 1].target.move_to(gpu_rect[0])
+ if i >= 1:
+ circ_kwargs['run_time'] = 0.7
+ self.play(Circumscribe(model_arr[i], **circ_kwargs), Circumscribe(cpu_left_col_base[i], **circ_kwargs), Circumscribe(cpu_left_col_base[i + 1], color=ORANGE, **circ_kwargs), Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs), Circumscribe(model_arr[i + 1], color=ORANGE, **circ_kwargs))
+ if i < 1:
+ self.play(MoveToTarget(model_cpu_arr[i]), MoveToTarget(model_cpu_arr[i + 1]))
+ else:
+ self.play(MoveToTarget(model_cpu_arr[i], run_time=0.7), MoveToTarget(model_cpu_arr[i + 1], run_time=0.7))
+ else:
+ model_cpu_arr[i].generate_target()
+ model_cpu_arr[i].target.move_to(cpu_left_col_base[-1])
+ input.generate_target()
+ input.target.next_to(model_arr[-1].get_right(), RIGHT + 0.02, buff=0.2)
+ self.play(Circumscribe(model_arr[-1], color=ORANGE, **circ_kwargs), Circumscribe(cpu_left_col_base[-1], color=ORANGE, **circ_kwargs), Circumscribe(gpu_rect[0], color=ORANGE, **circ_kwargs))
+ self.play(MoveToTarget(model_cpu_arr[i]))
+ a = a_c
+ a_c = a_c.copy()
+ input.generate_target()
+ input.target.next_to(model_base[-1], RIGHT + 0.02, buff=0.5)
+ self.play(FadeOut(step_7), FadeOut(a, run_time=0.5))
+ step_8 = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.', font_size=24)
+ step_8.move_to([2, 2, 0])
+ self.play(Write(step_8, run_time=3), MoveToTarget(input))
+ self.wait()
+
+# File: accelerate-main/manim_animations/dataloaders/stage_0.py
+from manim import *
+
+class Stage0(Scene):
+
+ def construct(self):
+ mascot = ImageMobject('mascot_bookie.png')
+ mascot.scale(0.35)
+ mascot.move_to([-3.75, -1, 0])
+ text = Paragraph('Distributed Training,\nHugging Face Accelerate,\nand PyTorch DataLoaders\n\nHow do they all interact?', font_size=36, line_spacing=1, alignment='center', weight=BOLD)
+ text.move_to([1.75, 0.5, 0])
+ self.add(mascot)
+ self.add(text)
+
+# File: accelerate-main/manim_animations/dataloaders/stage_1.py
+from manim import *
+
+class Stage01(Scene):
+
+ def construct(self):
+ mascot = ImageMobject('mascot_bookie.png')
+ mascot.scale(0.35)
+ mascot.move_to([-3.75, -1, 0])
+ text = Paragraph('Distributed Training,\nHugging Face Accelerate,\nand PyTorch DataLoaders\n\nHow do they all interact?', font_size=36, line_spacing=1, alignment='center', weight=BOLD)
+ text.move_to([1.75, 0.5, 0])
+ self.add(mascot)
+ self.add(text)
+
+# File: accelerate-main/manim_animations/dataloaders/stage_2.py
+from manim import *
+
+class Stage2(Scene):
+
+ def construct(self):
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ columns = [VGroup(*[Rectangle(height=0.25, width=0.25, color='green') for i in range(8)]).arrange(RIGHT, buff=0) for j in range(4)]
+ dataset_recs = VGroup(*columns).arrange(UP, buff=0)
+ dataset_text = Text('Dataset', font_size=24)
+ dataset = Group(dataset_recs, dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ dataset.move_to([-2, 0, 0])
+ self.add(dataset)
+ code = Code(code='dataloader = DataLoader(...)\nfor batch in dataloader():\n\t...', tab_width=4, background='window', language='Python', font='Monospace', font_size=14, corner_radius=0.2, insert_line_no=False, line_spacing=0.75, style=Code.styles_list[1])
+ code.move_to([-3.5, 2.5, 0])
+ self.add(code)
+ dataloader = Group(Rectangle(color='red', height=2, width=2), Text('DataLoader', font_size=24)).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ sampler = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ dataloader.move_to([1, 0, 0])
+ sampler.move_to([0.75, 0.25, 0])
+ self.add(dataloader)
+ self.add(sampler)
+ gpu_1 = Group(Rectangle(color='white', height=1, width=1), Text('GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4, 2, 0])
+ gpu_2 = Group(Rectangle(color='white', height=1, width=1), Text('GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4, 0.5, 0])
+ gpu_3 = Group(Rectangle(color='white', height=1, width=1), Text('GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4, -1, 0])
+ gpu_4 = Group(Rectangle(color='white', height=1, width=1), Text('GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4, -2.5, 0])
+ gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
+ self.add(gpu_1, gpu_2, gpu_3, gpu_4)
+ self.play(Create(gpu_1[0], run_time=0.5), Create(gpu_2[0], run_time=0.5), Create(gpu_3[0], run_time=0.5), Create(gpu_4[0], run_time=0.5), Create(dataset_recs, run_time=1), Create(sampler[0], run_time=1), Create(dataloader[0], run_time=1))
+ step_1 = MarkupText(f'Without any special care, \nthe same data is sent though each sampler, \nand the same samples are spit out on each GPU', font_size=18)
+ step_1.move_to([0, -2.5, 0])
+ self.play(Write(step_1, run_time=4))
+ first_animations = []
+ second_animations = []
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ current_color = colors[0]
+ buff = 0
+ lr_buff = 0.25
+ old_target = None
+ new_datasets = []
+ for (i, data) in enumerate(dataset_recs[-1]):
+ if i % 2 == 0:
+ current_color = 'BLUE_E'
+ dataset_target = Rectangle(height=0.46 / 2, width=0.46 / 2).set_stroke(width=0.0).set_fill(current_color, opacity=0.7)
+ dataset_target.move_to(data)
+ dataset_target.generate_target()
+ aligned_edge = ORIGIN
+ if i % 2 == 0:
+ old_target = dataset_target.target
+ buff -= 0.25
+ aligned_edge = LEFT
+ dataset_target.target.next_to(sampler, buff=buff, direction=UP, aligned_edge=LEFT)
+ else:
+ dataset_target.target.next_to(old_target, direction=RIGHT, buff=0.01)
+ new_datasets.append(dataset_target)
+ first_animations.append(data.animate(run_time=0.5).set_stroke(current_color))
+ second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
+ self.play(*first_animations)
+ self.play(*second_animations)
+ self.wait()
+ move_animation = []
+ for (j, gpu) in enumerate(gpus):
+ buff = 0
+ for (i, data) in enumerate(new_datasets):
+ if i % 2 == 0:
+ current_color = colors[i // 2]
+ if j != 3:
+ data = data.copy()
+ data.generate_target()
+ aligned_edge = ORIGIN
+ if i % 2 == 0:
+ old_target = data.target
+ buff -= 0.25
+ aligned_edge = LEFT
+ data.target.next_to(gpu, buff=buff, direction=UP, aligned_edge=LEFT)
+ else:
+ data.target.next_to(old_target, direction=RIGHT, buff=0.01)
+ move_animation.append(MoveToTarget(data, run_time=1.5))
+ self.play(*move_animation)
+ self.remove(step_1)
+ step_2 = MarkupText(f'This behavior is undesireable, because we want\neach GPU to see different data for efficient training.', font_size=18)
+ step_2.move_to([0, -2.5, 0])
+ self.play(Write(step_2, run_time=2.5))
+ self.wait()
+
+# File: accelerate-main/manim_animations/dataloaders/stage_3.py
+from manim import *
+
+class Stage3(Scene):
+
+ def construct(self):
+ step_1 = MarkupText(f'To combat this, Accelerate employs one of two different\nSampler wrapper methods depending on the scenario:', font_size=24)
+ step_1.move_to([0, 1.5, 0])
+ self.add(step_1)
+ step_2 = MarkupText(f"1. Sharding the dataset before drawing:\n\t● IterableDatasetShard\n\t● BatchSamplerShard", font_size=24).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
+ self.add(step_2)
+ step_3 = MarkupText(f"\n\n2. Splitting the batch after drawing:\n\t● DataLoaderDispatcher", font_size=24).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
+ self.add(step_3)
+
+# File: accelerate-main/manim_animations/dataloaders/stage_4.py
+from manim import *
+
+class Stage4(Scene):
+
+ def construct(self):
+ step_1 = MarkupText(f"To understand the next part fully, let's define two terms,\n`batch_size` and `global_batch_size`:", font_size=18)
+ step_1.move_to([0, 1.5, 0])
+ step_2 = MarkupText(f"\n\n● `batch_size`: \n\tThis will be defined as the batch size seen on a given\n\t*individual* GPU", font_size=18).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
+ step_3 = MarkupText(f"\n\n● `global_batch_size`:\n\tThis will be defined as the *total* number of\n\tdifferent items seen in the dataset, across all GPUs", font_size=18).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
+ step_4 = MarkupText(f'\n\nSo if we have a dataset of 64 items, 8 GPUs, \nand a `batch_size` of 8, each *step* will go through\nthe entire dataset one time as 8*8=64', font_size=18).next_to(step_3, direction=DOWN, aligned_edge=LEFT)
+ self.play(Write(step_1, run_time=4))
+ self.play(Write(step_2, run_time=4))
+ self.play(Write(step_3, run_time=4))
+ self.play(Write(step_4, run_time=6))
+ self.wait()
+
+# File: accelerate-main/manim_animations/dataloaders/stage_5.py
+from manim import *
+
+class Stage5(Scene):
+
+ def construct(self):
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ columns = [VGroup(*[Rectangle(height=0.25, width=0.25, color=colors[j]) for i in range(8)]).arrange(RIGHT, buff=0) for j in range(4)]
+ dataset_recs = VGroup(*columns).arrange(UP, buff=0)
+ dataset_text = Text('Dataset', font_size=24)
+ dataset = Group(dataset_recs, dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ dataset.move_to([-2, 0, 0])
+ self.add(dataset)
+ code = Code(code='# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...', tab_width=4, background='window', language='Python', font='Monospace', font_size=14, corner_radius=0.2, insert_line_no=False, line_spacing=0.75, style=Code.styles_list[1])
+ code.move_to([-3.5, 2.5, 0])
+ self.add(code)
+ sampler_1 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_2 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_3 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_4 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_1.move_to([2, 2, 0])
+ sampler_2.move_to([2, 0.5, 0])
+ sampler_3.move_to([2, -1.0, 0])
+ sampler_4.move_to([2, -2.5, 0])
+ self.add(sampler_1, sampler_2, sampler_3, sampler_4)
+ samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
+ gpu_1 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
+ gpu_2 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, 0.5, 0])
+ gpu_3 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
+ gpu_4 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
+ gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
+ self.add(gpu_1, gpu_2, gpu_3, gpu_4)
+ self.play(Create(gpu_1[0], run_time=1), Create(gpu_2[0], run_time=1), Create(gpu_3[0], run_time=1), Create(gpu_4[0], run_time=1), Create(dataset_recs, run_time=1), Create(sampler_1[0], run_time=1), Create(sampler_2[0], run_time=1), Create(sampler_3[0], run_time=1), Create(sampler_4[0], run_time=1))
+ first_animations = []
+ second_animations = []
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ current_color = colors[0]
+ buff = 0
+ lr_buff = 0.25
+ old_target = None
+ new_datasets = []
+ for (i, row_data) in enumerate(dataset_recs):
+ new_row = []
+ current_color = colors[i]
+ if i == 0:
+ idx = -3
+ elif i == 1:
+ idx = -2
+ elif i == 2:
+ idx = -1
+ elif i == 3:
+ idx = 0
+ for (j, indiv_data) in enumerate(row_data):
+ dataset_target = Rectangle(height=0.46 / 2, width=0.46 / 2).set_stroke(width=0.0).set_fill(current_color, opacity=0.7)
+ dataset_target.move_to(indiv_data)
+ dataset_target.generate_target()
+ aligned_edge = ORIGIN
+ if j % 8 == 0:
+ aligned_edge = LEFT
+ dataset_target.target.next_to(samplers[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ dataset_target.target.set_x(dataset_target.target.get_x())
+ elif j % 4 == 0:
+ old_target = dataset_target.target
+ dataset_target.target.next_to(samplers[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ dataset_target.target.set_x(dataset_target.target.get_x())
+ dataset_target.target.set_y(dataset_target.target.get_y() - 0.25)
+ else:
+ dataset_target.target.next_to(old_target, direction=RIGHT, buff=0.02)
+ old_target = dataset_target.target
+ new_row.append(dataset_target)
+ first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
+ second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
+ new_datasets.append(new_row)
+ step_1 = MarkupText(f'Since we splice the dataset between each GPU,\nthe models weights can be averaged during `backward()`\nActing as though we did one giant epoch\nvery quickly.', font_size=18)
+ step_1.move_to([-2.5, -2, 0])
+ self.play(Write(step_1, run_time=3))
+ self.play(*first_animations)
+ self.play(*second_animations)
+ self.wait(duration=0.5)
+ move_animation = []
+ import random
+ for (i, row) in enumerate(new_datasets):
+ current_color = colors[i]
+ if i == 0:
+ idx = -3
+ elif i == 1:
+ idx = -2
+ elif i == 2:
+ idx = -1
+ elif i == 3:
+ idx = 0
+ for (j, indiv_data) in enumerate(row):
+ indiv_data.generate_target()
+ aligned_edge = ORIGIN
+ if j % 8 == 0:
+ aligned_edge = LEFT
+ indiv_data.target.next_to(gpus[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ indiv_data.target.set_x(indiv_data.target.get_x())
+ elif j % 4 == 0:
+ indiv_data.target.next_to(gpus[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ indiv_data.target.set_x(indiv_data.target.get_x())
+ indiv_data.target.set_y(indiv_data.target.get_y() - 0.25)
+ else:
+ indiv_data.target.next_to(old_target, direction=RIGHT, buff=0.02)
+ old_target = indiv_data.target
+ move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
+ self.play(*move_animation)
+ self.wait()
+
+# File: accelerate-main/manim_animations/dataloaders/stage_6.py
+from manim import *
+
+class Stage6(Scene):
+
+ def construct(self):
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ columns = [VGroup(*[Rectangle(height=0.25, width=0.25, color=colors[j]) for i in range(8)]).arrange(RIGHT, buff=0) for j in range(4)]
+ dataset_recs = VGroup(*columns).arrange(UP, buff=0)
+ dataset_text = Text('Dataset', font_size=24)
+ dataset = Group(dataset_recs, dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ dataset.move_to([-2, 0, 0])
+ self.add(dataset)
+ code = Code(code='# We enable this by default\naccelerator = Accelerator()\ndataloader = DataLoader(..., shuffle=True)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...', tab_width=4, background='window', language='Python', font='Monospace', font_size=14, corner_radius=0.2, insert_line_no=False, line_spacing=0.75, style=Code.styles_list[1])
+ code.move_to([-3.5, 2.5, 0])
+ self.add(code)
+ sampler_1 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_2 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_3 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_4 = Group(Rectangle(color='blue', height=1, width=1), Text('Sampler GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_1.move_to([2, 2, 0])
+ sampler_2.move_to([2, 0.5, 0])
+ sampler_3.move_to([2, -1.0, 0])
+ sampler_4.move_to([2, -2.5, 0])
+ self.add(sampler_1, sampler_2, sampler_3, sampler_4)
+ samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
+ gpu_1 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
+ gpu_2 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, 0.5, 0])
+ gpu_3 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
+ gpu_4 = Group(Rectangle(color='white', height=1, width=1), Text('Output GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
+ gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
+ self.add(gpu_1, gpu_2, gpu_3, gpu_4)
+ first_animations = []
+ second_animations = []
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ current_color = colors[0]
+ buff = 0
+ lr_buff = 0.25
+ old_target = None
+ new_datasets = []
+ for (i, row_data) in enumerate(dataset_recs):
+ new_row = []
+ current_color = colors[i]
+ if i == 0:
+ idx = -3
+ elif i == 1:
+ idx = -2
+ elif i == 2:
+ idx = -1
+ elif i == 3:
+ idx = 0
+ for (j, indiv_data) in enumerate(row_data):
+ dataset_target = Rectangle(height=0.46 / 2, width=0.46 / 2).set_stroke(width=0.0).set_fill(current_color, opacity=0.7)
+ dataset_target.move_to(indiv_data)
+ dataset_target.generate_target()
+ aligned_edge = ORIGIN
+ if j % 8 == 0:
+ aligned_edge = LEFT
+ old_target = dataset_target.target
+ dataset_target.target.next_to(samplers[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ dataset_target.target.set_x(dataset_target.target.get_x())
+ elif j % 4 == 0:
+ old_target = dataset_target.target
+ dataset_target.target.next_to(samplers[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ dataset_target.target.set_x(dataset_target.target.get_x())
+ dataset_target.target.set_y(dataset_target.target.get_y() - 0.25)
+ else:
+ dataset_target.target.next_to(old_target, direction=RIGHT, buff=0.02)
+ old_target = dataset_target.target
+ new_row.append(dataset_target)
+ first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
+ second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
+ new_datasets.append(new_row)
+ step_1 = MarkupText(f"During shuffling, each mini-batch's\noutput order will be modified", font_size=18)
+ step_1.move_to([-1.5, -2, 0])
+ self.play(Write(step_1, run_time=3))
+ self.play(*first_animations)
+ self.play(*second_animations)
+ self.wait(duration=0.5)
+ move_animation = []
+ import random
+ for (i, row) in enumerate(new_datasets):
+ row = [row[k] for k in random.sample(range(8), 8)]
+ current_color = colors[i]
+ if i == 0:
+ idx = -3
+ elif i == 1:
+ idx = -2
+ elif i == 2:
+ idx = -1
+ elif i == 3:
+ idx = 0
+ for (j, indiv_data) in enumerate(row):
+ indiv_data.generate_target()
+ aligned_edge = ORIGIN
+ if j % 8 == 0:
+ aligned_edge = LEFT
+ indiv_data.target.next_to(gpus[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ indiv_data.target.set_x(indiv_data.target.get_x())
+ elif j % 4 == 0:
+ indiv_data.target.next_to(gpus[abs(idx)].get_corner(UP + LEFT), buff=0.02, direction=RIGHT + DOWN)
+ indiv_data.target.set_x(indiv_data.target.get_x())
+ indiv_data.target.set_y(indiv_data.target.get_y() - 0.25)
+ else:
+ indiv_data.target.next_to(old_target, direction=RIGHT, buff=0.02)
+ old_target = indiv_data.target
+ move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
+ self.play(*move_animation)
+ self.wait()
+
+# File: accelerate-main/manim_animations/dataloaders/stage_7.py
+from manim import *
+
+class Stage7(Scene):
+
+ def construct(self):
+ code = Code(code='accelerator = Accelerator(dispatch_batches=True)\ndataloader = DataLoader(...)\ndataloader = accelerator.prepare(dataloader)\nfor batch in dataloader:\n\t...', tab_width=4, background='window', language='Python', font='Monospace', font_size=14, corner_radius=0.2, insert_line_no=False, line_spacing=0.75, style=Code.styles_list[1])
+ code.move_to([-3.5, 2.5, 0])
+ self.add(code)
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ fill = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
+ columns = [VGroup(*[Rectangle(height=0.25, width=0.25, color=colors[j]) for i in range(8)]).arrange(RIGHT, buff=0) for j in range(4)]
+ dataset_recs = VGroup(*columns).arrange(UP, buff=0)
+ dataset_text = Text('Dataset', font_size=24)
+ dataset = Group(dataset_recs, dataset_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN)
+ dataset.move_to([-2, 0, 0])
+ self.add(dataset)
+ sampler_1 = Group(Rectangle(color='blue', height=1.02, width=1.02), Text('Sampler GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_2 = Group(Rectangle(color='blue', height=1.02, width=1.02), Text('Sampler GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_3 = Group(Rectangle(color='blue', height=1.02, width=1.02), Text('Sampler GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_4 = Group(Rectangle(color='blue', height=1.02, width=1.02), Text('Sampler GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN)
+ sampler_1.move_to([2, 2, 0])
+ sampler_2.move_to([2, 0.5, 0])
+ sampler_3.move_to([2, -1.0, 0])
+ sampler_4.move_to([2, -2.5, 0])
+ self.add(sampler_1, sampler_2, sampler_3, sampler_4)
+ samplers = [sampler_1[0], sampler_2[0], sampler_3[0], sampler_4[0]]
+ gpu_1 = Group(Rectangle(color='white', height=1.02, width=0.98), Text('Output GPU 1', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, 2, 0])
+ gpu_2 = Group(Rectangle(color='white', height=1.02, width=0.98), Text('Output GPU 2', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, 0.5, 0])
+ gpu_3 = Group(Rectangle(color='white', height=1.02, width=0.98), Text('Output GPU 3', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, -1, 0])
+ gpu_4 = Group(Rectangle(color='white', height=1.02, width=0.98), Text('Output GPU 4', font_size=12)).arrange(DOWN, buff=0.25, aligned_edge=DOWN).move_to([4.5, -2.5, 0])
+ gpus = [gpu_1[0], gpu_2[0], gpu_3[0], gpu_4[0]]
+ self.add(gpu_1, gpu_2, gpu_3, gpu_4)
+ step_1 = MarkupText(f"When using a `DataLoaderDispatcher`, all\nof the samples are collected from GPU 0's dataset,\nthen divided and sent to each GPU.\nAs a result, this will be slower.", font_size=18)
+ step_1.move_to([-2.5, -2, 0])
+ self.play(Write(step_1, run_time=3.5))
+ first_animations = []
+ second_animations = []
+ colors = ['BLUE_E', 'DARK_BROWN', 'GOLD_E', 'GRAY_A']
+ current_color = colors[0]
+ ud_buff = 0.01
+ lr_buff = 0.01
+ old_target = None
+ new_datasets = []
+ for (i, row_data) in enumerate(dataset_recs):
+ new_row = []
+ current_color = colors[i]
+ for (j, indiv_data) in enumerate(row_data):
+ dataset_target = Rectangle(height=0.46 / 4, width=0.46 / 2).set_stroke(width=0.0).set_fill(current_color, opacity=0.7)
+ dataset_target.move_to(indiv_data)
+ dataset_target.generate_target()
+ aligned_edge = ORIGIN
+ if j % 8 == 0:
+ aligned_edge = LEFT
+ dataset_target.target.next_to(samplers[0].get_corner(DOWN + LEFT), buff=0.0125, direction=RIGHT + UP)
+ dataset_target.target.set_x(dataset_target.target.get_x())
+ dataset_target.target.set_y(dataset_target.target.get_y() + 0.25 * i)
+ elif j % 4 == 0:
+ old_target = dataset_target.target
+ dataset_target.target.next_to(samplers[0].get_corner(DOWN + LEFT), buff=0.0125, direction=RIGHT + UP)
+ dataset_target.target.set_x(dataset_target.target.get_x())
+ dataset_target.target.set_y(dataset_target.target.get_y() + 0.125 + 0.25 * i)
+ else:
+ dataset_target.target.next_to(old_target, direction=RIGHT, buff=0.0125)
+ old_target = dataset_target.target
+ new_row.append(dataset_target)
+ first_animations.append(indiv_data.animate(run_time=0.5).set_stroke(current_color))
+ second_animations.append(MoveToTarget(dataset_target, run_time=1.5))
+ new_datasets.append(new_row)
+ self.play(*first_animations)
+ self.play(*second_animations)
+ move_animation = []
+ for (i, row) in enumerate(new_datasets):
+ current_color = colors[i]
+ if i == 0:
+ idx = -3
+ elif i == 1:
+ idx = -2
+ elif i == 2:
+ idx = -1
+ elif i == 3:
+ idx = 0
+ for (j, indiv_data) in enumerate(row):
+ indiv_data.generate_target()
+ indiv_data.animate.stretch_to_fit_height(0.46 / 2)
+ aligned_edge = ORIGIN
+ if j % 8 == 0:
+ aligned_edge = LEFT
+ indiv_data.target.next_to(gpus[abs(idx)].get_corner(UP + LEFT), buff=0.01, direction=RIGHT + DOWN)
+ indiv_data.target.set_x(indiv_data.target.get_x())
+ indiv_data.target.set_y(indiv_data.target.get_y() - 0.25)
+ elif j % 4 == 0:
+ indiv_data.target.next_to(gpus[abs(idx)].get_corner(UP + LEFT), buff=0.01, direction=RIGHT + DOWN)
+ indiv_data.target.set_x(indiv_data.target.get_x())
+ else:
+ indiv_data.target.next_to(old_target, direction=RIGHT, buff=0.01)
+ old_target = indiv_data.target
+ move_animation.append(MoveToTarget(indiv_data, run_time=1.5))
+ self.play(*move_animation)
+ self.wait()
+
+# File: accelerate-main/src/accelerate/__init__.py
+__version__ = '0.35.0.dev0'
+from .accelerator import Accelerator
+from .big_modeling import cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch
+from .data_loader import skip_first_batches
+from .inference import prepare_pippy
+from .launchers import debug_launcher, notebook_launcher
+from .state import PartialState
+from .utils import AutocastKwargs, DataLoaderConfiguration, DDPCommunicationHookType, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, ProfileKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states
+if is_rich_available():
+ from .utils import rich
+
+# File: accelerate-main/src/accelerate/accelerator.py
+from __future__ import annotations
+import contextlib
+import functools
+import json
+import math
+import os
+import re
+import shutil
+import sys
+import warnings
+from collections import OrderedDict
+from contextlib import contextmanager
+from functools import partial
+from types import MethodType
+from typing import Any, Callable, Union
+import torch
+import torch.utils.hooks as hooks
+from huggingface_hub import split_torch_state_dict_into_shards
+from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
+from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
+from .hooks import AlignDevicesHook
+from .logging import get_logger
+from .optimizer import AcceleratedOptimizer
+from .scheduler import AcceleratedScheduler
+from .state import AcceleratorState, GradientState, PartialState
+from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers
+from .utils import MODEL_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SAFE_WEIGHTS_PATTERN_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, WEIGHTS_PATTERN_NAME, AutocastKwargs, DataLoaderConfiguration, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FP8RecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProfileKwargs, ProjectConfiguration, RNGType, TorchDynamoPlugin, apply_fp8_autowrap, check_os_kernel, clean_state_dict_for_safetensors, compare_versions, convert_model, convert_outputs_to_fp32, extract_model_from_parallel, gather, gather_object, get_mixed_precision_context_manager, get_pretty_name, is_bf16_available, is_deepspeed_available, is_ipex_available, is_lomo_available, is_megatron_lm_available, is_mlu_available, is_msamp_available, is_musa_available, is_npu_available, is_torch_version, is_torch_xla_available, is_transformer_engine_available, is_xpu_available, load_fsdp_model, load_fsdp_optimizer, pad_across_processes, parse_choice_from_env, recursively_apply, reduce, release_memory, save, save_fsdp_model, save_fsdp_optimizer, wait_for_everyone
+from .utils.constants import FSDP_PYTORCH_VERSION, PROFILE_PATTERN_NAME
+from .utils.modeling import get_state_dict_offloaded_model
+from .utils.other import is_compiled_module
+if is_deepspeed_available():
+ from .utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler
+if is_megatron_lm_available():
+ from .utils import MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, megatron_lm_initialize, megatron_lm_prepare_data_loader, megatron_lm_prepare_model_optimizer_scheduler
+from torch.distributed.algorithms.join import Join
+if is_torch_xla_available():
+ import torch_xla.amp as xamp
+ import torch_xla.core.xla_model as xm
+ import torch_xla.distributed.xla_multiprocessing as xmp
+if is_npu_available(check_device=False):
+ import torch_npu
+try:
+ from torch.optim.lr_scheduler import LRScheduler
+except ImportError:
+ from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
+logger = get_logger(__name__)
+_split_batches = object()
+_dispatch_batches = object()
+_even_batches = object()
+_use_seedable_sampler = object()
+
+class Accelerator:
+
+ def __init__(self, device_placement: bool=True, split_batches: bool=_split_batches, mixed_precision: PrecisionType | str | None=None, gradient_accumulation_steps: int=1, cpu: bool=False, dataloader_config: DataLoaderConfiguration | None=None, deepspeed_plugin: DeepSpeedPlugin | dict[str, DeepSpeedPlugin] | None=None, fsdp_plugin: FullyShardedDataParallelPlugin | None=None, megatron_lm_plugin: MegatronLMPlugin | None=None, rng_types: list[str | RNGType] | None=None, log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None=None, project_dir: str | os.PathLike | None=None, project_config: ProjectConfiguration | None=None, gradient_accumulation_plugin: GradientAccumulationPlugin | None=None, step_scheduler_with_optimizer: bool=True, kwargs_handlers: list[KwargsHandler] | None=None, dynamo_backend: DynamoBackend | str | None=None, deepspeed_plugins: DeepSpeedPlugin | dict[str, DeepSpeedPlugin] | None=None):
+ self.trackers = []
+ if project_config is not None:
+ self.project_configuration = project_config
+ else:
+ self.project_configuration = ProjectConfiguration(project_dir=project_dir)
+ if project_dir is not None and self.project_dir is None:
+ self.project_configuration.set_directories(project_dir)
+ if mixed_precision is not None:
+ mixed_precision = str(mixed_precision)
+ if mixed_precision not in PrecisionType:
+ raise ValueError(f'Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}')
+ dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend)
+ if deepspeed_plugins is not None and deepspeed_plugin is not None:
+ raise ValueError('You cannot pass in both `deepspeed_plugins` and `deepspeed_plugin`.')
+ elif deepspeed_plugin is not None:
+ deepspeed_plugins = deepspeed_plugin
+ if deepspeed_plugins is None:
+ if PartialState._shared_state != {} and PartialState().distributed_type == DistributedType.DEEPSPEED:
+ deepspeed_plugins = AcceleratorState().deepspeed_plugins
+ else:
+ deepspeed_plugins = DeepSpeedPlugin() if os.environ.get('ACCELERATE_USE_DEEPSPEED', 'false') == 'true' else None
+ else:
+ if PartialState().distributed_type == DistributedType.DEEPSPEED and AcceleratorState._shared_state != {} and (AcceleratorState().deepspeed_plugins is not None):
+ raise NotImplementedError('You cannot pass in a `deepspeed_plugin` when creating a second `Accelerator`. Please make sure the first `Accelerator` is initialized with all the plugins you want to use.')
+ if isinstance(deepspeed_plugins, dict):
+ for plugin in deepspeed_plugins.values():
+ if not isinstance(plugin, DeepSpeedPlugin):
+ raise TypeError('`deepspeed_plugin` must be a DeepSpeedPlugin object.')
+ if deepspeed_plugins is not None:
+ os.environ['ACCELERATE_USE_DEEPSPEED'] = 'true'
+ if not is_deepspeed_available():
+ raise ImportError('DeepSpeed is not installed => run `pip install deepspeed` or build it from source.')
+ if is_mlu_available():
+ if compare_versions('deepspeed-mlu', '<', '0.10.1'):
+ raise ImportError('DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.')
+ elif is_musa_available():
+ if compare_versions('deepspeed', '>', '0.14.3'):
+ raise ImportError('DeepSpeed MUSA version must be <= 0.14.3. Please downgrade DeepSpeed.')
+ elif compare_versions('deepspeed', '<', '0.9.3'):
+ raise ImportError('DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.')
+ mixed_precision = os.environ.get('ACCELERATE_MIXED_PRECISION', 'no') if mixed_precision is None else mixed_precision
+ if not isinstance(deepspeed_plugins, dict):
+ deepspeed_plugins.set_mixed_precision(mixed_precision)
+ deepspeed_plugins.select(_from_accelerator_state=True)
+ else:
+ for plugin in deepspeed_plugins.values():
+ plugin.set_mixed_precision(mixed_precision)
+ first_plugin = next(iter(deepspeed_plugins.values()))
+ first_plugin.select(_from_accelerator_state=True)
+ self.deepspeed_engine_wrapped = None
+ if os.environ.get('ACCELERATE_USE_FSDP', 'false') == 'true' or isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):
+ if not is_torch_version('>=', FSDP_PYTORCH_VERSION):
+ raise ValueError(f'FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}')
+ if fsdp_plugin is None:
+ fsdp_plugin = FullyShardedDataParallelPlugin() if os.environ.get('ACCELERATE_USE_FSDP', 'false') == 'true' else None
+ else:
+ if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):
+ raise TypeError('`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.')
+ os.environ['ACCELERATE_USE_FSDP'] = 'true'
+ if megatron_lm_plugin is None:
+ megatron_lm_plugin = MegatronLMPlugin() if os.environ.get('ACCELERATE_USE_MEGATRON_LM', 'false') == 'true' else None
+ else:
+ if not isinstance(megatron_lm_plugin, MegatronLMPlugin):
+ raise TypeError('`megatron_lm_plugin` must be a MegatronLMPlugin object.')
+ os.environ['ACCELERATE_USE_MEGATRON_LM'] = 'true'
+ if megatron_lm_plugin:
+ if not is_megatron_lm_available():
+ raise ImportError('Megatron is not installed. please build it from source.')
+ self.ddp_handler = None
+ self.scaler_handler = None
+ self.init_handler = None
+ self.fp8_recipe_handler = None
+ self.autocast_handler = None
+ self.profile_handler = None
+ self.has_lomo_optimizer = False
+ if kwargs_handlers is not None:
+ for handler in kwargs_handlers:
+ assert isinstance(handler, KwargsHandler), f'Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`.'
+ if isinstance(handler, DistributedDataParallelKwargs):
+ if self.ddp_handler is not None:
+ raise ValueError('You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.')
+ else:
+ self.ddp_handler = handler
+ elif isinstance(handler, GradScalerKwargs):
+ if self.scaler_handler is not None:
+ raise ValueError('You can only pass one `GradScalerKwargs` in `kwargs_handler`.')
+ else:
+ self.scaler_handler = handler
+ elif isinstance(handler, InitProcessGroupKwargs):
+ if self.init_handler is not None:
+ raise ValueError('You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.')
+ else:
+ self.init_handler = handler
+ elif isinstance(handler, FP8RecipeKwargs):
+ if self.fp8_recipe_handler is not None:
+ raise ValueError('You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.')
+ else:
+ self.fp8_recipe_handler = handler
+ elif isinstance(handler, AutocastKwargs):
+ if self.autocast_handler is not None:
+ raise ValueError('You can only pass one `AutocastKwargs` in `kwargs_handler`.')
+ else:
+ self.autocast_handler = handler
+ elif isinstance(handler, ProfileKwargs):
+ if self.profile_handler is not None:
+ raise ValueError('You can only pass one `ProfileKwargs` in `kwargs_handler`.')
+ else:
+ self.profile_handler = handler
+ kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
+ self.state = AcceleratorState(mixed_precision=mixed_precision, cpu=cpu, dynamo_plugin=dynamo_plugin, deepspeed_plugin=deepspeed_plugins, fsdp_plugin=fsdp_plugin, megatron_lm_plugin=megatron_lm_plugin, _from_accelerator=True, **kwargs)
+ if self.state.mixed_precision == 'fp8' and self.fp8_recipe_handler is None:
+ self.fp8_recipe_handler = FP8RecipeKwargs()
+ self.delayed_fp8_autocast = False
+ if self.fp8_recipe_handler is not None:
+ if self.state.mixed_precision != 'fp8' and self.distributed_type not in (DistributedType.FSDP, DistributedType.DEEPSPEED):
+ raise ValueError("Passing in a `FP8RecipeKwargs` object requires setting `mixed_precision='fp8'`.")
+ self.delayed_fp8_autocast = self.fp8_recipe_handler.backend == 'TE' and self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.FSDP)
+ trackers = filter_trackers(log_with, self.logging_dir)
+ if len(trackers) < 1 and log_with is not None:
+ warnings.warn(f'`log_with={log_with}` was passed but no supported trackers are currently installed.')
+ self.log_with = trackers
+ if mixed_precision != 'bf16' and getattr(self.state, 'downcast_bfloat', False) and (self.state.distributedType != DistributedType.XLA):
+ raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
+ if gradient_accumulation_plugin is not None:
+ if gradient_accumulation_steps != 1:
+ raise ValueError('You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object.')
+ else:
+ gradient_accumulation_steps = int(parse_choice_from_env('ACCELERATE_GRADIENT_ACCUMULATION_STEPS', gradient_accumulation_steps))
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps)
+ self.gradient_state = GradientState(gradient_accumulation_plugin=gradient_accumulation_plugin)
+ self.device_placement = device_placement
+ if dataloader_config is None:
+ dataloader_config = DataLoaderConfiguration()
+ self.dataloader_config = dataloader_config
+ self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
+ self.scaler = None
+ self.native_amp = False
+ if self.state.mixed_precision == 'fp16' and self.device.type != 'cpu' and (self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)):
+ self.native_amp = True
+ if self.device.type not in ('xpu', 'cuda', 'npu', 'xla', 'mlu', 'musa') or is_torch_xla_available(check_is_tpu=True):
+ raise ValueError(f'fp16 mixed precision requires a GPU (not {self.device.type!r}).')
+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
+ if self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
+ self.scaler = ShardedGradScaler(**kwargs)
+ elif is_torch_xla_available(check_is_gpu=True):
+ self.scaler = xamp.GradScaler(**kwargs)
+ elif is_mlu_available():
+ self.scaler = torch.mlu.amp.GradScaler(**kwargs)
+ elif is_musa_available():
+ self.scalar = torch.musa.amp.GradScaler(**kwargs)
+ elif is_npu_available():
+ self.scaler = torch.npu.amp.GradScaler(**kwargs)
+ elif is_xpu_available():
+ self.scaler = torch.amp.GradScaler('xpu', **kwargs)
+ else:
+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)
+ elif self.state.mixed_precision == 'bf16' and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM):
+ if self.device.type in ['cpu', 'xpu']:
+ self.native_amp = True
+ else:
+ self.native_amp = is_bf16_available(True)
+ if mixed_precision == 'bf16' and (not self.native_amp) and (not is_torch_xla_available()):
+ raise ValueError('bf16 mixed precision requires PyTorch >= 1.10 and a supported device.')
+ elif self.state.mixed_precision == 'fp8':
+ self.native_amp = True
+ if self.fp8_backend == 'MSAMP':
+ if self.distributed_type == DistributedType.FSDP:
+ raise NotImplementedError('`accelerate` + `MS-AMP` + `FSDP` is not supported at this time. Please consider using deepspeed, which is supported.')
+ elif self.distributed_type != DistributedType.DEEPSPEED:
+ self.scaler = torch.cuda.amp.GradScaler()
+ self.step = 0
+ self._optimizers = []
+ self._models = []
+ self._schedulers = []
+ self._dataloaders = []
+ self._custom_objects = []
+ self._load_model_state_pre_hook = OrderedDict()
+ self._save_model_state_pre_hook = OrderedDict()
+ self.rng_types = rng_types
+ if self.rng_types is None:
+ self.rng_types = ['generator']
+ self.flag_tensor = None
+ check_os_kernel()
+
+ @property
+ def deepspeed_plugin(self):
+ return self.state.deepspeed_plugin
+
+ @property
+ def use_distributed(self):
+ return self.state.use_distributed
+
+ @property
+ def distributed_type(self):
+ return self.state.distributed_type
+
+ @property
+ def num_processes(self):
+ return self.state.num_processes
+
+ @property
+ def process_index(self):
+ return self.state.process_index
+
+ @property
+ def local_process_index(self):
+ return self.state.local_process_index
+
+ @property
+ def device(self):
+ return self.state.device
+
+ @property
+ def split_batches(self):
+ return self.dataloader_config.split_batches
+
+ @property
+ def dispatch_batches(self):
+ return self.dataloader_config.dispatch_batches
+
+ @property
+ def even_batches(self):
+ return self.dataloader_config.even_batches
+
+ @even_batches.setter
+ def even_batches(self, value: bool):
+ self.dataloader_config.even_batches = value
+
+ @property
+ def use_seedable_sampler(self):
+ return self.dataloader_config.use_seedable_sampler
+
+ @property
+ def non_blocking(self):
+ return self.dataloader_config.non_blocking
+
+ @property
+ def use_stateful_dataloader(self):
+ if hasattr(self.dataloader_config, 'use_stateful_dataloader'):
+ return self.dataloader_config.use_stateful_dataloader
+ return False
+
+ @property
+ def project_dir(self):
+ return self.project_configuration.project_dir
+
+ @property
+ def logging_dir(self):
+ return self.project_configuration.logging_dir
+
+ @property
+ def save_iteration(self):
+ return self.project_configuration.iteration
+
+ @property
+ def is_main_process(self):
+ return self.state.is_main_process
+
+ @property
+ def is_local_main_process(self):
+ return self.state.is_local_main_process
+
+ @property
+ def is_last_process(self):
+ return self.process_index == self.num_processes - 1
+
+ @property
+ def mixed_precision(self):
+ return self.state.mixed_precision
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool=False):
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
+ yield inputs
+
+ def on_main_process(self, function: Callable[..., Any]=None):
+ if function is None:
+ if 'Accelerator.' in self.__qualname__:
+ function = self
+ else:
+ raise ValueError('The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object.')
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_main_process(function)(*args, **kwargs)
+ return _inner
+
+ def on_local_main_process(self, function: Callable[..., Any]=None):
+ if function is None:
+ if 'Accelerator.' in self.__qualname__:
+ function = self
+ else:
+ raise ValueError('The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object.')
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_local_main_process(function)(*args, **kwargs)
+ return _inner
+
+ def on_last_process(self, function: Callable[..., Any]):
+ if function is None:
+ if 'Accelerator.' in self.__qualname__:
+ function = self
+ else:
+ raise ValueError('The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object.')
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_last_process(function)(*args, **kwargs)
+ return _inner
+
+ def on_process(self, function: Callable[..., Any]=None, process_index: int=None):
+ if self is not None and process_index is not None and (function is None):
+ return partial(self.on_process, process_index=process_index)
+ if function is None:
+ if 'Accelerator.' in self.__qualname__:
+ function = self
+ else:
+ raise ValueError('The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object.')
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_process(function, process_index)(*args, **kwargs)
+ return _inner
+
+ def on_local_process(self, function: Callable[..., Any]=None, local_process_index: int=None):
+ if self is not None and local_process_index is not None and (function is None):
+ return partial(self.on_local_process, local_process_index=local_process_index)
+ if function is None:
+ if 'Accelerator.' in self.__qualname__:
+ function = self
+ else:
+ raise ValueError('The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object.')
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
+ return _inner
+
+ @contextmanager
+ def main_process_first(self):
+ with self.state.main_process_first():
+ yield
+
+ @contextmanager
+ def local_main_process_first(self):
+ with self.state.local_main_process_first():
+ yield
+
+ @contextmanager
+ def no_sync(self, model):
+ context = contextlib.nullcontext
+ if self.use_distributed:
+ context = getattr(model, 'no_sync', context)
+ with context():
+ yield
+
+ @staticmethod
+ @contextmanager
+ def trigger_sync_in_backward(model):
+ if not isinstance(model, torch.nn.parallel.DistributedDataParallel):
+ yield
+ return
+ old_require_backward_grad_sync = model.require_backward_grad_sync
+ old_require_forward_param_sync = model.require_forward_param_sync
+ model.require_backward_grad_sync = True
+ model.require_forward_param_sync = True
+ model.reducer.prepare_for_backward([])
+ try:
+ yield
+ finally:
+ model.require_backward_grad_sync = old_require_backward_grad_sync
+ model.require_forward_param_sync = old_require_forward_param_sync
+
+ def _do_sync(self):
+ if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
+ self.step = 0
+ self.gradient_state._set_sync_gradients(True)
+ else:
+ self.step += 1
+ self.gradient_state._set_sync_gradients(self.step % self.gradient_state.num_steps == 0)
+
+ @property
+ def sync_gradients(self):
+ return self.gradient_state.sync_gradients
+
+ @sync_gradients.setter
+ def sync_gradients(self, sync_gradients):
+ self.gradient_state.sync_gradients = sync_gradients
+
+ @property
+ def gradient_accumulation_steps(self):
+ return self.gradient_state.num_steps
+
+ @gradient_accumulation_steps.setter
+ def gradient_accumulation_steps(self, gradient_accumulation_steps):
+ self.gradient_state.plugin_kwargs.update({'num_steps': gradient_accumulation_steps})
+
+ @contextmanager
+ def accumulate(self, *models):
+ self._do_sync()
+ allow_gradient_sync = self.sync_gradients or (self.use_distributed and self.gradient_state.plugin_kwargs.get('sync_each_batch', False))
+ with contextlib.ExitStack() as cm_stack:
+ for m in models:
+ cm_stack.enter_context(contextlib.nullcontext() if allow_gradient_sync else self.no_sync(m))
+ yield
+
+ @contextmanager
+ def join_uneven_inputs(self, joinables, even_batches=None):
+ if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU):
+ dl_even_batches_values = []
+ if even_batches is not None:
+ iterable_dl_seen = False
+ for (dl_idx, dl) in enumerate(self._dataloaders):
+ if isinstance(dl, DataLoaderDispatcher):
+ iterable_dl_seen = True
+ continue
+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))
+ dl.batch_sampler.even_batches = even_batches
+ if iterable_dl_seen:
+ warnings.warn('Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable')
+ else:
+ even_batches = self.even_batches
+ enable_join = False if even_batches else True
+ try:
+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):
+ yield
+ finally:
+ for (dl_idx, even_batches_value) in dl_even_batches_values:
+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value
+ else:
+ if self.distributed_type != DistributedType.NO:
+ warnings.warn('Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect.')
+ with contextlib.nullcontext(joinables):
+ yield
+
+ def print(self, *args, **kwargs):
+ self.state.print(*args, **kwargs)
+
+ def _prepare_one(self, obj, first_pass=False, device_placement=None):
+ if first_pass:
+ if isinstance(obj, torch.utils.data.DataLoader):
+ return self.prepare_data_loader(obj, device_placement=device_placement)
+ elif isinstance(obj, torch.nn.Module):
+ return self.prepare_model(obj, device_placement=device_placement)
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = self.prepare_optimizer(obj, device_placement=device_placement)
+ return optimizer
+ elif isinstance(obj, LRScheduler):
+ scheduler = self.prepare_scheduler(obj)
+ return scheduler
+ return obj
+
+ def prepare(self, *args, device_placement=None):
+ if device_placement is None:
+ device_placement = [None for _ in args]
+ elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM):
+ raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.")
+ elif len(device_placement) != len(args):
+ raise ValueError(f'`device_placement` should be a list with {len(args)} elements (the number of objects passed).')
+ for obj in args:
+ if isinstance(obj, torch.nn.Module) and self.verify_device_map(obj) and (self.distributed_type != DistributedType.NO) and (os.environ.get('ACCELERATE_BYPASS_DEVICE_MAP', 'false') != 'true'):
+ raise ValueError("You can't train a model that has been loaded with `device_map='auto'` in any distributed mode. Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`.")
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ model_count = 0
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ model_count += 1
+ if model_count > 1:
+ raise AssertionError("You can't use same `Accelerator()` instance with multiple models when using DeepSpeed")
+ if self.distributed_type == DistributedType.XLA:
+ (model_device, optimizer_device) = self._get_devices()
+ if model_device is not None and optimizer_device is not None and (model_device != optimizer_device):
+ raise ValueError('The model and the optimizer parameters are not on the same device, which probably means you created an optimizer around your model **before** putting on the device. Make sure the line model.to(device) is before the optimizer creation in your script or remove it entirely and use the flag default value for `device_placement` in your `Accelerator` to let it handle that part for you.')
+ tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA
+ if tpu_should_fix_optimizer:
+ old_named_params = self._get_named_parameters(*args)
+ if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
+ if self.device.type == 'cpu' and self.state.use_ipex:
+ args = self._prepare_ipex_or_xpu(*args)
+ elif self.device.type == 'xpu' and is_xpu_available():
+ args = self._prepare_ipex_or_xpu(*args)
+ if self.fp8_backend == 'TE':
+ args = self._prepare_te(*args)
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ result = self._prepare_deepspeed(*args)
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ result = self._prepare_megatron_lm(*args)
+ else:
+ if self.fp8_backend == 'MSAMP':
+ (args, device_placement) = self._prepare_msamp(*args, device_placement=device_placement)
+ result = tuple((self._prepare_one(obj, first_pass=True, device_placement=d) for (obj, d) in zip(args, device_placement)))
+ result = tuple((self._prepare_one(obj, device_placement=d) for (obj, d) in zip(result, device_placement)))
+ if tpu_should_fix_optimizer:
+ new_named_params = self._get_named_parameters(*result)
+ mapping = {p: new_named_params[n] for (n, p) in old_named_params.items()}
+ for obj in result:
+ if isinstance(obj, torch.optim.Optimizer):
+ obj._switch_parameters(mapping)
+ for item in result:
+ if any((item in container for container in (self._dataloaders, self._models, self._optimizers, self._schedulers))):
+ item._is_accelerate_prepared = True
+ return result if len(result) > 1 else result[0]
+
+ def prepare_model(self, model: torch.nn.Module, device_placement: bool=None, evaluation_mode: bool=False):
+ if device_placement is None:
+ device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
+ self._models.append(model)
+ if self.verify_device_map(model) and self.distributed_type != DistributedType.NO and (os.environ.get('ACCELERATE_BYPASS_DEVICE_MAP', 'false') != 'true'):
+ raise ValueError("You can't train a model that has been loaded with `device_map='auto'` in any distributed mode. Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`.")
+ if self.native_amp:
+ model._original_forward = model.forward
+ autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler)
+ if self.fp8_backend == 'MSAMP' or not hasattr(model.forward, '__func__'):
+ model_forward_func = model.forward
+ model.forward = convert_outputs_to_fp32(autocast_context(model_forward_func))
+ else:
+ model_forward_func = model.forward.__func__
+ new_forward = autocast_context(model_forward_func)
+ model.forward = MethodType(new_forward, model)
+ model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
+ if self.fp8_backend == 'TE' and (not self.delayed_fp8_autocast):
+ model = apply_fp8_autowrap(model, self.fp8_recipe_handler)
+ if (getattr(model, 'is_loaded_in_8bit', False) or getattr(model, 'is_loaded_in_4bit', False)) and getattr(model, 'hf_device_map', False):
+ model_devices = set(model.hf_device_map.values())
+ if len(model_devices) > 1 and self.distributed_type != DistributedType.NO:
+ raise ValueError("You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode. In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism. Therefore you should not specify that you are under any distributed regime in your accelerate config.")
+ elif len(model_devices) == 1:
+ current_device = list(model_devices)[0]
+ current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
+ if torch.device(current_device_index) != self.device:
+ if self.device.index is not None or current_device_index != 0:
+ raise ValueError("You can't train a model that has been loaded in 8-bit precision on a different device than the one you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device()}` or `device_map={'':torch.xpu.current_device()}`")
+ if 'cpu' in model_devices or 'disk' in model_devices:
+ raise ValueError("You can't train a model that has been loaded in 8-bit precision with CPU or disk offload.")
+ elif device_placement and (not self.verify_device_map(model)):
+ model = model.to(self.device)
+ if not evaluation_mode:
+ if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU):
+ if any((p.requires_grad for p in model.parameters())):
+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
+ if os.environ.get('ACCELERATE_BYPASS_DEVICE_MAP', 'false') != 'true':
+ (device_ids, output_device) = ([self.local_process_index], self.local_process_index)
+ else:
+ (device_ids, output_device) = (None, None)
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=device_ids, output_device=output_device, **kwargs)
+ if self.ddp_handler is not None:
+ self.ddp_handler.register_comm_hook(model)
+ elif self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
+ is_type_fsdp = isinstance(model, FSDP) or (is_compiled_module(model) and isinstance(model._orig_mod, FSDP))
+ if not is_type_fsdp:
+ self.state.fsdp_plugin.set_auto_wrap_policy(model)
+ fsdp_plugin = self.state.fsdp_plugin
+ kwargs = {'sharding_strategy': fsdp_plugin.sharding_strategy, 'cpu_offload': fsdp_plugin.cpu_offload, 'auto_wrap_policy': fsdp_plugin.auto_wrap_policy, 'mixed_precision': fsdp_plugin.mixed_precision_policy, 'sync_module_states': fsdp_plugin.sync_module_states, 'backward_prefetch': fsdp_plugin.backward_prefetch, 'forward_prefetch': fsdp_plugin.forward_prefetch, 'use_orig_params': fsdp_plugin.use_orig_params, 'param_init_fn': fsdp_plugin.param_init_fn, 'ignored_modules': fsdp_plugin.ignored_modules, 'limit_all_gathers': fsdp_plugin.limit_all_gathers, 'device_id': self.device}
+ model = FSDP(model, **kwargs)
+ if fsdp_plugin.activation_checkpointing:
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper
+ apply_activation_checkpointing(model, checkpoint_wrapper_fn=functools.partial(checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT), auto_wrap_policy=fsdp_plugin.auto_wrap_policy)
+ if self.mixed_precision != 'no':
+ upcasted_log = []
+ for module in FSDP.fsdp_modules(model):
+ if not module._has_params:
+ continue
+ param = module._flat_param
+ if param.dtype != torch.float32 and param.device != torch.device('meta') and param.requires_grad:
+ name_param_log = (module.module.__class__.__name__, ', '.join(module._flat_param._fqns))
+ if name_param_log not in upcasted_log:
+ upcasted_log.append(name_param_log)
+ param.data = param.data.to(torch.float32)
+ module._handle._orig_param_dtype = torch.float32
+ if self.is_main_process:
+ for (name_log, param_log) in upcasted_log:
+ warnings.warn(f'Upcasted low precision parameters in {name_log} because mixed precision turned on in FSDP. Affects: {param_log}.')
+ if len(upcasted_log) > 0:
+ warnings.warn('FSDP upcast of low precision parameters may affect the precision of model checkpoints.')
+ if len(self._models) > 1 and self._models[-2] is self._models[-1]:
+ del self._models[-2]
+ self._models[-1] = model
+ elif self.distributed_type == DistributedType.MULTI_CPU:
+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
+ model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
+ if self.ddp_handler is not None:
+ self.ddp_handler.register_comm_hook(model)
+ elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
+ model = xmp.MpModelWrapper(model).to(self.device)
+ if self.delayed_fp8_autocast:
+ model = apply_fp8_autowrap(model, self.fp8_recipe_handler)
+ if self.state.dynamo_plugin.backend != DynamoBackend.NO and (not is_compiled_module(model)):
+ if not is_torch_version('>=', '2.0'):
+ raise ValueError('Using `torch.compile` requires PyTorch 2.0 or higher.')
+ model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
+ return model
+
+ def _prepare_te(self, *args):
+ if not is_transformer_engine_available():
+ raise ImportError('`transformer_engine` was not found on your system. Please ensure that `transformer_engine` is installed')
+ (model, optimizer) = (None, None)
+ (num_models, num_optimizers) = (0, 0)
+ result = [obj for obj in args]
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ num_models += 1
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = obj
+ num_optimizers += 1
+ if optimizer is None and model is None:
+ return result
+ elif optimizer is None or model is None:
+ raise ValueError('You must pass a model and an optimizer together to `accelerate.prepare()` when using TransformerEngine.')
+ elif num_models > 1 or num_optimizers > 1:
+ raise ValueError(f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with TransformerEngine.")
+ old_named_params = self._get_named_parameters(model)
+ with torch.no_grad():
+ convert_model(model)
+ new_named_params = self._get_named_parameters(model)
+ mapping = {p: new_named_params[n] for (n, p) in old_named_params.items()}
+ for param_group in optimizer.param_groups:
+ param_group['params'] = [mapping[p] for p in param_group['params']]
+ return result
+
+ def _prepare_deepspeed(self, *args):
+ import deepspeed
+ ds_initialize = deepspeed.initialize
+ if self.fp8_backend == 'MSAMP':
+ from msamp import deepspeed as msamp_deepspeed
+ ds_initialize = msamp_deepspeed.initialize
+ deepspeed_plugin = self.deepspeed_plugin
+ is_dataloader_present = any((isinstance(obj, torch.utils.data.DataLoader) for obj in args))
+ result = [self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj for obj in args]
+ if deepspeed_plugin.is_auto('train_micro_batch_size_per_gpu'):
+ if is_dataloader_present:
+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, 'batch_size')]
+ if any((bs is None for bs in batch_sizes)):
+ raise ValueError("At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.")
+ if self.split_batches:
+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]
+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)
+ if len(batch_sizes) > 1:
+ logger.info(f'Since you passed both train and evaluation dataloader, `is_train_batch_min` (here {deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device}).')
+ else:
+ raise ValueError("When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders with `batch_size` attribute returning an integer value or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.")
+ else:
+ batch_size_per_device = deepspeed_plugin.get_value('train_micro_batch_size_per_gpu')
+ deepspeed_plugin.fill_match('gradient_accumulation_steps', must_match=False, gradient_accumulation_steps=self.gradient_accumulation_steps)
+ config_kwargs = {'gradient_clipping': 1.0, 'zero_optimization.stage3_gather_16bit_weights_on_model_save': False}
+ if batch_size_per_device is not None:
+ config_kwargs['train_micro_batch_size_per_gpu'] = batch_size_per_device
+ config_kwargs['train_batch_size'] = batch_size_per_device * deepspeed_plugin.get_value('gradient_accumulation_steps') * self.num_processes
+ model = None
+ optimizer = None
+ scheduler = None
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):
+ optimizer = obj
+ elif isinstance(obj, (LRScheduler, DummyScheduler)) or type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
+ scheduler = obj
+ if optimizer is not None:
+ if 'optimizer' in deepspeed_plugin.deepspeed_config and (not isinstance(optimizer, DummyOptim)):
+ raise ValueError('You cannot specify an optimizer in the config file and in the code at the same time. Please remove the optimizer from the config file or create `accelerate.utils.DummyOptim` in the code.')
+ elif 'optimizer' not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, DummyOptim):
+ raise ValueError('You cannot create a `DummyOptim` without specifying an optimizer in the config file.')
+ if isinstance(optimizer, torch.optim.Optimizer):
+ deepspeed_plugin.deepspeed_config['zero_allow_untested_optimizer'] = True
+ if scheduler is not None:
+ if 'scheduler' in deepspeed_plugin.deepspeed_config and (not isinstance(scheduler, DummyScheduler)):
+ raise ValueError('You cannot specify a scheduler in the config file and in the code at the same time. Please remove the scheduler from the config file or create `accelerate.utils.DummyScheduler` in the code.')
+ elif 'scheduler' not in deepspeed_plugin.deepspeed_config and isinstance(scheduler, DummyScheduler) and (scheduler.lr_scheduler_callable is None):
+ raise ValueError('Either specify a scheduler in the config file or pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`.')
+ if optimizer is not None and scheduler is not None:
+ if isinstance(optimizer, DummyOptim) and (not isinstance(scheduler, DummyScheduler)):
+ raise ValueError('You can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`.')
+ if model is not None:
+ if getattr(self.fp8_recipe_handler, 'backend', None) == 'TE':
+ model = apply_fp8_autowrap(model, self.fp8_recipe_handler)
+ deepspeed_plugin.set_moe_leaf_modules(model)
+ hidden_size_based_keys = ['zero_optimization.reduce_bucket_size', 'zero_optimization.stage3_prefetch_bucket_size', 'zero_optimization.stage3_param_persistence_threshold']
+ hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)]
+ if len(hidden_size_auto_keys) > 0:
+ reasoning = "therefore it's not possible to automatically fill out the following `auto` entries " + f'in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing ' + '`auto` values for these keys with an integer value of your choice.'
+ if not hasattr(model, 'config'):
+ raise ValueError("Can't find `model.config` entry, " + reasoning)
+ if hasattr(model.config, 'hidden_size'):
+ hidden_size = model.config.hidden_size
+ elif hasattr(model.config, 'hidden_sizes'):
+ hidden_size = max(model.config.hidden_sizes)
+ else:
+ raise ValueError('Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, ' + reasoning)
+ config_kwargs.update({'zero_optimization.reduce_bucket_size': hidden_size * hidden_size, 'zero_optimization.stage3_prefetch_bucket_size': int(0.9 * hidden_size * hidden_size), 'zero_optimization.stage3_param_persistence_threshold': 10 * hidden_size})
+ if isinstance(optimizer, DummyOptim):
+ config_kwargs.update({'optimizer.params.lr': optimizer.lr, 'optimizer.params.weight_decay': optimizer.weight_decay})
+ if isinstance(scheduler, DummyScheduler) and scheduler.lr_scheduler_callable is None:
+ max_lr = getattr(scheduler.optimizer, 'lr', None) if getattr(scheduler.optimizer, 'defaults', None) is None else scheduler.optimizer.defaults['lr']
+ config_kwargs.update({'scheduler.params.warmup_min_lr': 0, 'scheduler.params.warmup_max_lr': max_lr, 'scheduler.params.warmup_num_steps': scheduler.warmup_num_steps})
+ if scheduler.total_num_steps is not None:
+ config_kwargs['scheduler.params.total_num_steps'] = math.ceil(scheduler.total_num_steps / self.num_processes) if not self.split_batches else scheduler.total_num_steps
+ deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs)
+ self.deepspeed_config = deepspeed_plugin.deepspeed_config
+ kwargs = dict(model=model, config_params=self.deepspeed_config)
+ if optimizer is not None:
+ if isinstance(optimizer, DummyOptim):
+ kwargs['model_parameters'] = optimizer.params
+ if isinstance(scheduler, DummyScheduler) and scheduler.lr_scheduler_callable is not None:
+ kwargs['lr_scheduler'] = scheduler.lr_scheduler_callable
+ else:
+ if self.deepspeed_config['zero_optimization'].get('offload_optimizer', {}).get('device', 'none') != 'none' and self.deepspeed_config.get('zero_force_ds_cpu_optimizer', True):
+ from deepspeed.ops.adam import DeepSpeedCPUAdam
+ defaults = {k: v for (k, v) in optimizer.defaults.items() if k in ['lr', 'weight_decay']}
+ optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
+ kwargs['optimizer'] = optimizer
+ if scheduler is not None:
+ if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
+ kwargs['lr_scheduler'] = scheduler
+ (engine, optimizer, _, lr_scheduler) = ds_initialize(**kwargs)
+ if optimizer is not None:
+ optimizer = DeepSpeedOptimizerWrapper(optimizer)
+ if scheduler is not None:
+ if lr_scheduler is None:
+ scheduler = AcceleratedScheduler(scheduler, optimizer, step_with_optimizer=self.step_scheduler_with_optimizer, split_batches=self.split_batches)
+ else:
+ scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = engine
+ elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):
+ result[i] = optimizer
+ elif isinstance(result[i], (LRScheduler, DummyScheduler)) or type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
+ result[i] = scheduler
+ if self.deepspeed_engine_wrapped is None:
+ self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine)
+ else:
+ logger.warning('A wrapped DeepSpeed engine reference is currently tied for this `Accelerator()` instance. If you want to call `accelerator.backward()` referencing a new model/engine, please create a separate `Accelerator()` instance and call `accelerator.prepare()` on it.')
+ self._models.append(engine)
+ if optimizer is not None:
+ self._optimizers.append(optimizer)
+ if scheduler is not None:
+ self._schedulers.append(scheduler)
+ return tuple(result)
+
+ def _prepare_megatron_lm(self, *args):
+ megatron_lm_plugin = self.state.megatron_lm_plugin
+ micro_batch_size = None
+ if not megatron_lm_plugin.megatron_dataset_flag:
+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, 'batch_size')]
+ if len(batch_sizes) == 0:
+ raise ValueError('You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM.')
+ micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)
+ if len(batch_sizes) > 1:
+ logger.info(f'Since you passed both train and evaluation dataloader, `is_train_batch_min` (here {megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size}).')
+ else:
+ for obj in args:
+ if isinstance(obj, MegatronLMDummyDataLoader):
+ micro_batch_size = obj.dataset_args['micro_batch_size']
+ break
+ if micro_batch_size is not None:
+ dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
+ megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
+ else:
+ raise ValueError('When you do not pass the dataloader parameter, the `data_parallel_size`, `micro_batch_size`, and `global_batch_size` megatron parameters will not be updated.')
+ model = None
+ optimizer = None
+ scheduler = None
+ batch_data = None
+ for obj in args:
+ if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None:
+ batch_data = next(iter(obj))
+ elif isinstance(obj, torch.nn.Module):
+ model = obj
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = obj
+ elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):
+ scheduler = obj
+ if model is not None:
+ megatron_lm_plugin.set_network_size_args(model, batch_data)
+ if optimizer is not None:
+ megatron_lm_plugin.set_optimizer_type(optimizer)
+ if scheduler is not None:
+ if not isinstance(scheduler, MegatronLMDummyScheduler):
+ raise ValueError("You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead.")
+ megatron_lm_plugin.set_scheduler_args(scheduler)
+ megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)
+ (model, optimizer, scheduler) = megatron_lm_prepare_model_optimizer_scheduler(self)
+ self.wait_for_everyone()
+ counter = 0
+ result = []
+ for obj in args:
+ if isinstance(obj, torch.utils.data.DataLoader):
+ result.append(megatron_lm_prepare_data_loader(self, obj))
+ counter += 1
+ elif isinstance(obj, MegatronLMDummyDataLoader):
+ if counter == 0:
+ obj.set_megatron_data_args()
+ dataloaders = megatron_lm_prepare_data_loader(self, obj)
+ result.append(dataloaders[counter])
+ counter += 1
+ else:
+ result.append(obj)
+ if model is not None:
+ model = MegatronEngine(self, model, optimizer, scheduler)
+ if optimizer is not None:
+ optimizer = MegatronLMOptimizerWrapper(optimizer)
+ if scheduler is not None:
+ scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], torch.optim.Optimizer):
+ result[i] = optimizer
+ elif isinstance(result[i], MegatronLMDummyScheduler):
+ result[i] = scheduler
+ if model is not None:
+ self._models.append(model)
+ if len(self._models) > 1:
+ raise AssertionError("You can't use same `Accelerator()` instance with multiple models when using Megatron-LM")
+ if optimizer is not None:
+ self._optimizers.append(optimizer)
+ if scheduler is not None:
+ self._schedulers.append(scheduler)
+ return tuple(result)
+
+ def _prepare_ipex_or_xpu(self, *args):
+ if self.state.use_ipex:
+ if not is_ipex_available():
+ raise ImportError("IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer to https://github.com/intel/intel-extension-for-pytorch.")
+ model = None
+ optimizer = None
+ result = [obj for obj in args]
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ model.train()
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = obj
+ if optimizer is not None and model is not None:
+ dtype = torch.bfloat16 if self.state.mixed_precision == 'bf16' else None
+ if self.device.type == 'xpu':
+ model = model.to(self.device)
+ if is_ipex_available():
+ import intel_extension_for_pytorch as ipex
+ (model, optimizer) = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level='O1')
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], torch.optim.Optimizer):
+ result[i] = optimizer
+ return tuple(result)
+
+ def _prepare_msamp(self, *args, device_placement):
+ if not is_msamp_available():
+ raise ImportError("MS-AMP was not found on your system. Please ensure that MS-AMP is available or choose `'te'` as the backend for FP8 mixed precision training.")
+ import msamp
+ (model, optimizer) = (None, None)
+ optimizer_index = None
+ (num_models, num_optimizers) = (0, 0)
+ result = [obj for obj in args]
+ for (i, obj) in enumerate(result):
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ num_models += 1
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = obj
+ optimizer_index = i
+ num_optimizers += 1
+ if optimizer is None and model is None:
+ return (result, device_placement)
+ elif optimizer is None or model is None:
+ raise ValueError('You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP.')
+ elif num_models > 1 or num_optimizers > 1:
+ raise ValueError(f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP.")
+ else:
+ (model, optimizer) = msamp.initialize(model, optimizer, opt_level=self.fp8_recipe_handler.opt_level)
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], torch.optim.Optimizer):
+ result[i] = optimizer
+ if optimizer_index is not None:
+ device_placement[optimizer_index] = False
+ return (tuple(result), device_placement)
+
+ def prepare_data_loader(self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None):
+ if getattr(data_loader, '_is_accelerate_prepared', False):
+ if data_loader not in self._dataloaders:
+ self._dataloaders.append(data_loader)
+ return data_loader
+ if device_placement is None:
+ device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False
+ prepared_data_loader = prepare_data_loader(data_loader, self.device, num_processes=self.num_processes, process_index=self.process_index, split_batches=self.split_batches, put_on_device=device_placement, rng_types=self.rng_types.copy(), dispatch_batches=self.dispatch_batches, even_batches=self.even_batches, slice_fn_for_dispatch=slice_fn_for_dispatch, use_seedable_sampler=self.use_seedable_sampler, non_blocking=self.non_blocking, use_stateful_dataloader=self.use_stateful_dataloader)
+ self._dataloaders.append(prepared_data_loader)
+ return prepared_data_loader
+
+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
+ if is_lomo_available():
+ from lomo_optim import AdaLomo, Lomo
+ self.has_lomo_optimizer |= isinstance(optimizer, (Lomo, AdaLomo))
+ if getattr(optimizer, '_is_accelerate_prepared', False):
+ if optimizer not in self._optimizers:
+ self._optimizers.append(optimizer)
+ return optimizer
+ if device_placement is None:
+ device_placement = self.device_placement
+ scaler = None if self.fp8_backend == 'MSAMP' else self.scaler
+ optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=scaler)
+ self._optimizers.append(optimizer)
+ return optimizer
+
+ def prepare_scheduler(self, scheduler: LRScheduler):
+ if getattr(scheduler, '_is_accelerate_prepared', False):
+ if scheduler not in self._schedulers:
+ self._schedulers.append(scheduler)
+ return scheduler
+ optimizer = self._optimizers
+ for opt in self._optimizers:
+ if getattr(scheduler, 'optimizer', None) == opt.optimizer:
+ optimizer = opt
+ break
+ scheduler = AcceleratedScheduler(scheduler, optimizer, step_with_optimizer=self.step_scheduler_with_optimizer, split_batches=self.split_batches)
+ self._schedulers.append(scheduler)
+ return scheduler
+
+ def backward(self, loss, **kwargs):
+ learning_rate = kwargs.get('learning_rate')
+ if self.distributed_type != DistributedType.DEEPSPEED:
+ loss = loss / self.gradient_accumulation_steps
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ self.deepspeed_engine_wrapped.backward(loss, **kwargs)
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ return
+ elif self.scaler is not None:
+ self.scaler.scale(loss).backward(**kwargs)
+ elif learning_rate is not None and self.has_lomo_optimizer:
+ self.lomo_backward(loss, learning_rate)
+ else:
+ loss.backward(**kwargs)
+
+ def set_trigger(self):
+ self.flag_tensor = torch.tensor(1, device=self.device)
+
+ def check_trigger(self):
+ if self.flag_tensor is None:
+ self.flag_tensor = torch.tensor(0, device=self.device)
+ flag_tensor = self.reduce(self.flag_tensor)
+ if flag_tensor.item() >= 1:
+ self.flag_tensor = torch.tensor(0, device=self.device)
+ return True
+ return False
+
+ def unscale_gradients(self, optimizer=None):
+ if self.native_amp and self.mixed_precision == 'fp16':
+ if optimizer is None:
+ optimizer = self._optimizers
+ elif not isinstance(optimizer, (tuple, list)):
+ optimizer = [optimizer]
+ for opt in optimizer:
+ while isinstance(opt, AcceleratedOptimizer):
+ opt = opt.optimizer
+ self.scaler.unscale_(opt)
+
+ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
+ if self.distributed_type == DistributedType.FSDP:
+ self.unscale_gradients()
+ parameters = [p for p in parameters]
+ for model in self._models:
+ if parameters == [p for p in model.parameters()]:
+ return model.clip_grad_norm_(max_norm, norm_type)
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ return None
+ elif self.distributed_type == DistributedType.XLA:
+ for acc_opt in self._optimizers:
+ if not acc_opt.gradient_state.is_xla_gradients_synced:
+ opt = acc_opt
+ while isinstance(opt, AcceleratedOptimizer):
+ opt = opt.optimizer
+ gradients = xm._fetch_gradients(opt)
+ xm.all_reduce('sum', gradients, scale=1.0 / self.num_processes)
+ acc_opt.gradient_state.is_xla_gradients_synced = True
+ if os.environ.get('ACCELERATE_USE_FSDP', 'false') == 'true':
+ self.unscale_gradients()
+ parameters = [p for p in parameters]
+ for model in self._models:
+ if parameters == [p for p in model.parameters()]:
+ return model.clip_grad_norm_(max_norm, norm_type)
+ self.unscale_gradients()
+ return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
+
+ def clip_grad_value_(self, parameters, clip_value):
+ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:
+ raise Exception('DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.')
+ self.unscale_gradients()
+ torch.nn.utils.clip_grad_value_(parameters, clip_value)
+
+ def gather(self, tensor):
+ return gather(tensor)
+
+ def gather_for_metrics(self, input_data, use_gather_object=False):
+ try:
+ recursively_apply(lambda x: x, input_data, error_on_other_type=True)
+ all_tensors = True
+ except TypeError:
+ all_tensors = False
+ use_gather_object = use_gather_object or not all_tensors
+ if use_gather_object:
+ data = gather_object(input_data)
+ else:
+ data = self.gather(input_data)
+ try:
+ if self.gradient_state.end_of_dataloader:
+ if self.gradient_state.remainder == -1:
+ logger.info('The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.')
+ return data
+ elif self.gradient_state.remainder > 0:
+
+ def _adjust_samples(tensor):
+ return tensor[:self.gradient_state.remainder]
+ if use_gather_object:
+ return _adjust_samples(data)
+ else:
+ return recursively_apply(_adjust_samples, data)
+ else:
+ return data
+ else:
+ return data
+ except Exception:
+ return data
+
+ def reduce(self, tensor, reduction='sum', scale=1.0):
+ return reduce(tensor, reduction, scale)
+
+ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
+ return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
+
+ def unwrap_model(self, model, keep_fp32_wrapper: bool=True):
+ return extract_model_from_parallel(model, keep_fp32_wrapper)
+
+ def wait_for_everyone(self):
+ wait_for_everyone()
+
+ @on_main_process
+ def init_trackers(self, project_name: str, config: dict | None=None, init_kwargs: dict | None={}):
+ for tracker in self.log_with:
+ if issubclass(type(tracker), GeneralTracker):
+ self.trackers.append(tracker)
+ else:
+ tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]
+ if tracker_init.requires_logging_directory:
+ self.trackers.append(tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})))
+ else:
+ self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {})))
+ if config is not None:
+ for tracker in self.trackers:
+ tracker.store_init_configuration(config)
+
+ def get_tracker(self, name: str, unwrap: bool=False):
+ if len(self.trackers) > 0:
+ for tracker in self.trackers:
+ if tracker.name == name:
+ return tracker.tracker if unwrap else tracker
+ raise ValueError(f'{name} is not an available tracker stored inside the `Accelerator`.')
+ return GeneralTracker(_blank=True)
+
+ @on_main_process
+ def log(self, values: dict, step: int | None=None, log_kwargs: dict | None={}):
+ for tracker in self.trackers:
+ tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
+
+ def end_training(self):
+ for tracker in self.trackers:
+ tracker.finish()
+ self.state.destroy_process_group()
+
+ def save(self, obj, f, safe_serialization=False):
+ save(obj, f, save_on_each_node=self.project_configuration.save_on_each_node, safe_serialization=safe_serialization)
+
+ def save_model(self, model: torch.nn.Module, save_directory: Union[str, os.PathLike], max_shard_size: Union[int, str]='10GB', safe_serialization: bool=True):
+ if os.path.isfile(save_directory):
+ logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
+ return
+ os.makedirs(save_directory, exist_ok=True)
+ if any([module._hf_hook.offload for module in model.modules() if hasattr(module, '_hf_hook') and isinstance(module._hf_hook, AlignDevicesHook)]):
+ state_dict = get_state_dict_offloaded_model(model)
+ else:
+ if any((param.device == torch.device('meta') for param in model.parameters())):
+ raise RuntimeError("You can't save the model since some parameters are on the meta device.")
+ state_dict = self.get_state_dict(model)
+ if safe_serialization:
+ state_dict = clean_state_dict_for_safetensors(state_dict)
+ weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
+ filename_pattern = SAFE_WEIGHTS_PATTERN_NAME if safe_serialization else WEIGHTS_PATTERN_NAME
+ state_dict_split = split_torch_state_dict_into_shards(state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size)
+ for filename in os.listdir(save_directory):
+ full_filename = os.path.join(save_directory, filename)
+ weights_no_suffix = weights_name.replace('.bin', '')
+ filename_no_suffix = filename.replace('.bin', '')
+ reg = re.compile('(.*?)-\\d{5}-of-\\d{5}')
+ if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and (filename not in state_dict_split.filename_to_tensors.keys()) and (reg.fullmatch(filename_no_suffix) is not None) and PartialState().is_main_process:
+ os.remove(full_filename)
+ for (filename, tensors) in state_dict_split.filename_to_tensors.items():
+ shard = {tensor: state_dict[tensor] for tensor in tensors}
+ self.save(shard, os.path.join(save_directory, filename), safe_serialization=safe_serialization)
+ if state_dict_split.is_sharded:
+ index = {'metadata': state_dict_split.metadata, 'weight_map': state_dict_split.tensor_to_filename}
+ save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
+ save_index_file = os.path.join(save_directory, save_index_file)
+ with open(save_index_file, 'w', encoding='utf-8') as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + '\n'
+ f.write(content)
+ logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.')
+ else:
+ path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
+ logger.info(f'Model weights saved in {path_to_weights}')
+
+ def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
+ handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
+ self._save_model_state_pre_hook[handle.id] = hook
+ return handle
+
+ def save_state(self, output_dir: str=None, safe_serialization: bool=True, **save_model_func_kwargs):
+ if self.project_configuration.automatic_checkpoint_naming:
+ output_dir = os.path.join(self.project_dir, 'checkpoints')
+ os.makedirs(output_dir, exist_ok=True)
+ if self.project_configuration.automatic_checkpoint_naming:
+ folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]
+ if self.project_configuration.total_limit is not None and len(folders) + 1 > self.project_configuration.total_limit and self.is_main_process:
+
+ def _inner(folder):
+ return list(map(int, re.findall('[\\/]?([0-9]+)(?=[^\\/]*$)', folder)))[0]
+ folders.sort(key=_inner)
+ logger.warning(f'Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint.')
+ for folder in folders[:len(folders) + 1 - self.project_configuration.total_limit]:
+ shutil.rmtree(folder)
+ output_dir = os.path.join(output_dir, f'checkpoint_{self.save_iteration}')
+ if os.path.exists(output_dir):
+ raise ValueError(f'Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with.')
+ self.wait_for_everyone()
+ os.makedirs(output_dir, exist_ok=True)
+ logger.info(f'Saving current state to {output_dir}')
+ if self.distributed_type == DistributedType.XLA:
+ xm.mark_step()
+ weights = []
+ for (i, model) in enumerate(self._models):
+ if self.distributed_type == DistributedType.FSDP:
+ logger.info('Saving FSDP model')
+ save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i)
+ logger.info(f'FSDP Model saved to output dir {output_dir}')
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ logger.info('Saving DeepSpeed Model and Optimizer')
+ ckpt_id = f'{MODEL_NAME}' if i == 0 else f'{MODEL_NAME}_{i}'
+ model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs)
+ logger.info(f'DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}')
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ logger.info('Saving Megatron-LM Model, Optimizer and Scheduler')
+ model.save_checkpoint(output_dir)
+ logger.info(f'Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}')
+ else:
+ weights.append(self.get_state_dict(model, unwrap=False))
+ optimizers = []
+ if self.distributed_type == DistributedType.FSDP:
+ for (i, opt) in enumerate(self._optimizers):
+ logger.info('Saving FSDP Optimizer')
+ save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i)
+ logger.info(f'FSDP Optimizer saved to output dir {output_dir}')
+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ optimizers = self._optimizers
+ schedulers = []
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ for (i, scheduler) in enumerate(self._schedulers):
+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):
+ continue
+ schedulers.append(scheduler)
+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
+ schedulers = self._schedulers
+ dataloaders = self._dataloaders
+ for hook in self._save_model_state_pre_hook.values():
+ hook(self._models, weights, output_dir)
+ save_location = save_accelerator_state(output_dir, weights, optimizers, schedulers, dataloaders, self.state.process_index, self.step, self.scaler, save_on_each_node=self.project_configuration.save_on_each_node, safe_serialization=safe_serialization)
+ for (i, obj) in enumerate(self._custom_objects):
+ save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
+ self.project_configuration.iteration += 1
+ return save_location
+
+ def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
+ handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
+ self._load_model_state_pre_hook[handle.id] = hook
+ return handle
+
+ def load_state(self, input_dir: str=None, **load_model_func_kwargs):
+ if input_dir is not None:
+ input_dir = os.path.expanduser(input_dir)
+ if not os.path.isdir(input_dir):
+ raise ValueError(f'Tried to find {input_dir} but folder does not exist')
+ elif self.project_configuration.automatic_checkpoint_naming:
+ input_dir = os.path.join(self.project_dir, 'checkpoints')
+ folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
+
+ def _inner(folder):
+ return list(map(int, re.findall('[\\/]?([0-9]+)(?=[^\\/]*$)', folder)))[0]
+ folders.sort(key=_inner)
+ input_dir = folders[-1]
+ else:
+ raise ValueError('No input_dir provided and automatic checkpoint naming is disabled.')
+ logger.info(f'Loading states from {input_dir}')
+ models = []
+ for (i, model) in enumerate(self._models):
+ if self.distributed_type == DistributedType.FSDP:
+ logger.info('Loading FSDP model')
+ load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i)
+ logger.info(f'FSDP Model loaded from input dir {input_dir}')
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ logger.info('Loading DeepSpeed Model and Optimizer')
+ ckpt_id = f'{MODEL_NAME}' if i == 0 else f'{MODEL_NAME}_{i}'
+ model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs)
+ logger.info(f'DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}')
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ logger.info('Loading Megatron-LM Model, Optimizer and Scheduler')
+ model.load_checkpoint(input_dir)
+ logger.info(f'Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}')
+ else:
+ models.append(model)
+ optimizers = []
+ if self.distributed_type == DistributedType.FSDP:
+ for (i, opt) in enumerate(self._optimizers):
+ logger.info('Loading FSDP Optimizer')
+ load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i)
+ logger.info(f'FSDP Optimizer loaded from input dir {input_dir}')
+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ optimizers = self._optimizers
+ schedulers = []
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ for (i, scheduler) in enumerate(self._schedulers):
+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):
+ continue
+ schedulers.append(scheduler)
+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
+ schedulers = self._schedulers
+ dataloaders = self._dataloaders
+ for hook in self._load_model_state_pre_hook.values():
+ hook(models, input_dir)
+ map_location = load_model_func_kwargs.pop('map_location', None)
+ if map_location is None:
+ if self.num_processes > 1 and self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU):
+ map_location = 'on_device'
+ else:
+ map_location = 'cpu'
+ override_attributes = load_accelerator_state(input_dir, models, optimizers, schedulers, dataloaders, self.state.process_index, self.scaler, map_location, **load_model_func_kwargs)
+ if 'step' in override_attributes:
+ self.step = override_attributes['step']
+ custom_checkpoints = [f for f in os.listdir(input_dir) if re.search('^custom_checkpoint_\\d+\\.pkl$', f) is not None]
+ if len(custom_checkpoints) != len(self._custom_objects):
+ err = f'Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:'
+ err += f'\n\tFound checkpoints: {len(custom_checkpoints)}'
+ err += f'\n\tRegistered objects: {len(self._custom_objects)}\n'
+ err += 'Please make sure to only load checkpoints from folders that were created with the same set of registered objects,'
+ err += 'or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually.'
+ raise RuntimeError(err)
+ else:
+ logger.info(f'Loading in {len(custom_checkpoints)} custom states')
+ for (index, obj) in enumerate(self._custom_objects):
+ load_custom_state(obj, input_dir, index)
+
+ def free_memory(self, *objects):
+ if hasattr(self, 'deepspeed_engine_wrapped'):
+ if self.deepspeed_engine_wrapped is not None:
+ self.deepspeed_engine_wrapped.engine.destroy()
+ self.deepspeed_engine_wrapped = None
+ objects = release_memory(*objects)
+ self._schedulers = []
+ self._optimizers = []
+ self._models = []
+ self._dataloaders = []
+ self.step = 0
+ return objects
+
+ def clear(self, *objects):
+ return self.free_memory(*objects)
+
+ def _get_named_parameters(self, *args):
+ named_parameters = {}
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ obj = extract_model_from_parallel(obj)
+ named_parameters.update({n: p for (n, p) in obj.named_parameters()})
+ return named_parameters
+
+ def _get_devices(self, *args):
+ model_device = None
+ optimizer_device = None
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ for param in obj.parameters():
+ model_device = param.device
+ break
+ if isinstance(obj, torch.optim.Optimizer):
+ for param_group in obj.param_groups:
+ if len(param_group['params']) > 0:
+ optimizer_device = param_group['params'][0].device
+ break
+ return (model_device, optimizer_device)
+
+ def get_state_dict(self, model, unwrap=True):
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ if self.deepspeed_config['zero_optimization']['stage'] == 3:
+ if model.zero_gather_16bit_weights_on_model_save():
+ state_dict = model._zero3_consolidated_16bit_state_dict()
+ else:
+ raise ValueError('Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or set `zero3_save_16bit_model` to True when using `accelerate config`. To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.')
+ else:
+ from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
+ state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
+ elif self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp import FullStateDictConfig, StateDictType
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+ full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
+ state_dict = model.state_dict()
+ else:
+ if unwrap:
+ model = self.unwrap_model(model)
+ state_dict = model.state_dict()
+ return state_dict
+
+ def register_for_checkpointing(self, *objects):
+ invalid_objects = []
+ for obj in objects:
+ if not hasattr(obj, 'state_dict') or not hasattr(obj, 'load_state_dict'):
+ invalid_objects.append(obj)
+ if len(invalid_objects) > 0:
+ err = 'All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:'
+ for (index, obj) in enumerate(invalid_objects):
+ err += f'\n\t- Item at index {index}, `{get_pretty_name(obj)}`'
+ raise ValueError(err)
+ self._custom_objects.extend(objects)
+
+ @contextmanager
+ def autocast(self, autocast_handler: AutocastKwargs=None):
+ if autocast_handler is None:
+ autocast_handler = self.autocast_handler
+ autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler)
+ autocast_context.__enter__()
+ yield
+ autocast_context.__exit__(*sys.exc_info())
+
+ @contextmanager
+ def profile(self, profile_handler: ProfileKwargs | None=None):
+ profile_handler = profile_handler or self.profile_handler or ProfileKwargs()
+ with profile_handler.build() as profiler:
+ yield profiler
+ if profile_handler.output_trace_dir is None:
+ return
+ os.makedirs(profile_handler.output_trace_dir, exist_ok=True)
+ profiler.export_chrome_trace(os.path.join(profile_handler.output_trace_dir, PROFILE_PATTERN_NAME.format(suffix=self.process_index)))
+ self.wait_for_everyone()
+
+ @property
+ def optimizer_step_was_skipped(self):
+ for optimizer in self._optimizers:
+ if optimizer.step_was_skipped:
+ return True
+ return False
+
+ def skip_first_batches(self, dataloader, num_batches: int=0):
+ return skip_first_batches(dataloader, num_batches=num_batches)
+
+ def __deepcopy__(self, memo):
+ logger.info('Deep copying the `Accelerator` object, note that this will point to the same original object.')
+ return self
+
+ def verify_device_map(self, model: torch.nn.Module) -> bool:
+ for m in model.modules():
+ if hasattr(m, 'hf_device_map') and len(m.hf_device_map) > 1:
+ return True
+ return False
+
+ def lomo_backward(self, loss: torch.Tensor, learning_rate: float) -> None:
+ if is_lomo_available():
+ from lomo_optim import AdaLomo, Lomo
+ if learning_rate is None:
+ raise ValueError('A learning rate must be passed in order to call backward pass with LOMO optimizers.')
+ _backward_called = False
+ for optimizer in self._optimizers:
+ if isinstance(optimizer.optimizer, (Lomo, AdaLomo)):
+ optimizer.optimizer.fused_backward(loss, learning_rate)
+ _backward_called = True
+ if not _backward_called:
+ raise ValueError('Backward pass not properly called on LOMO optimizers. Are you sure you passed a LOMO optimizer in accelerator.prepare()?')
+
+ @property
+ def fp8_backend(self):
+ if self.mixed_precision == 'fp8' and self.fp8_recipe_handler is not None:
+ return self.fp8_recipe_handler.backend
+ elif self.state.deepspeed_plugin is not None and self.state.deepspeed_plugin.enable_msamp:
+ return 'MSAMP'
+ return None
+
+# File: accelerate-main/src/accelerate/big_modeling.py
+import logging
+import os
+from contextlib import contextmanager
+from functools import wraps
+from typing import Dict, List, Optional, Union
+import torch
+import torch.nn as nn
+from .hooks import AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks
+from .utils import OffloadedWeightsLoader, check_cuda_p2p_ib_support, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_mlu_available, is_musa_available, is_npu_available, is_torch_version, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters
+from .utils.other import recursive_getattr
+logger = logging.getLogger(__name__)
+
+@contextmanager
+def init_empty_weights(include_buffers: bool=None):
+ if include_buffers is None:
+ include_buffers = parse_flag_from_env('ACCELERATE_INIT_INCLUDE_BUFFERS', False)
+ with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:
+ yield f
+
+@contextmanager
+def init_on_device(device: torch.device, include_buffers: bool=None):
+ if include_buffers is None:
+ include_buffers = parse_flag_from_env('ACCELERATE_INIT_INCLUDE_BUFFERS', False)
+ if is_torch_version('>=', '2.0') and include_buffers:
+ with device:
+ yield
+ return
+ old_register_parameter = nn.Module.register_parameter
+ if include_buffers:
+ old_register_buffer = nn.Module.register_buffer
+
+ def register_empty_parameter(module, name, param):
+ old_register_parameter(module, name, param)
+ if param is not None:
+ param_cls = type(module._parameters[name])
+ kwargs = module._parameters[name].__dict__
+ kwargs['requires_grad'] = param.requires_grad
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
+
+ def register_empty_buffer(module, name, buffer, persistent=True):
+ old_register_buffer(module, name, buffer, persistent=persistent)
+ if buffer is not None:
+ module._buffers[name] = module._buffers[name].to(device)
+ if include_buffers:
+ tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}
+ else:
+ tensor_constructors_to_patch = {}
+
+ def patch_tensor_constructor(fn):
+
+ def wrapper(*args, **kwargs):
+ kwargs['device'] = device
+ return fn(*args, **kwargs)
+ return wrapper
+ try:
+ nn.Module.register_parameter = register_empty_parameter
+ if include_buffers:
+ nn.Module.register_buffer = register_empty_buffer
+ for torch_function_name in tensor_constructors_to_patch.keys():
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
+ yield
+ finally:
+ nn.Module.register_parameter = old_register_parameter
+ if include_buffers:
+ nn.Module.register_buffer = old_register_buffer
+ for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items():
+ setattr(torch, torch_function_name, old_torch_function)
+
+def cpu_offload(model: nn.Module, execution_device: Optional[torch.device]=None, offload_buffers: bool=False, state_dict: Optional[Dict[str, torch.Tensor]]=None, preload_module_classes: Optional[List[str]]=None):
+ if execution_device is None:
+ execution_device = next(iter(model.parameters())).device
+ if state_dict is None:
+ state_dict = {n: p.to('cpu') for (n, p) in model.state_dict().items()}
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
+ attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict, preload_module_classes=preload_module_classes)
+ return model
+
+def cpu_offload_with_hook(model: torch.nn.Module, execution_device: Optional[Union[int, str, torch.device]]=None, prev_module_hook: Optional[UserCpuOffloadHook]=None):
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
+ add_hook_to_module(model, hook, append=True)
+ user_hook = UserCpuOffloadHook(model, hook)
+ return (model, user_hook)
+
+def disk_offload(model: nn.Module, offload_dir: Union[str, os.PathLike], execution_device: Optional[torch.device]=None, offload_buffers: bool=False, preload_module_classes: Optional[List[str]]=None):
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, 'index.json')):
+ offload_state_dict(offload_dir, model.state_dict())
+ if execution_device is None:
+ execution_device = next(iter(model.parameters())).device
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
+ attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=weights_map, preload_module_classes=preload_module_classes)
+ return model
+
+def dispatch_model(model: nn.Module, device_map: Dict[str, Union[str, int, torch.device]], main_device: Optional[torch.device]=None, state_dict: Optional[Dict[str, torch.Tensor]]=None, offload_dir: Optional[Union[str, os.PathLike]]=None, offload_index: Optional[Dict[str, str]]=None, offload_buffers: bool=False, skip_keys: Optional[Union[str, List[str]]]=None, preload_module_classes: Optional[List[str]]=None, force_hooks: bool=False):
+ check_device_map(model, device_map)
+ is_bnb_quantized = (getattr(model, 'is_quantized', False) or getattr(model, 'is_loaded_in_8bit', False)) and getattr(model, 'quantization_method', 'bitsandbytes') == 'bitsandbytes'
+ if len(set(device_map.values())) > 1 or is_bnb_quantized or force_hooks:
+ if main_device is None:
+ if set(device_map.values()) == {'cpu'} or set(device_map.values()) == {'cpu', 'disk'}:
+ main_device = 'cpu'
+ else:
+ main_device = [d for d in device_map.values() if d not in ['cpu', 'disk']][0]
+ if main_device != 'cpu':
+ cpu_modules = [name for (name, device) in device_map.items() if device == 'cpu']
+ if state_dict is None and len(cpu_modules) > 0:
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
+ disk_modules = [name for (name, device) in device_map.items() if device == 'disk']
+ if offload_dir is None and offload_index is None and (len(disk_modules) > 0):
+ raise ValueError(f"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules need to be offloaded: {', '.join(disk_modules)}.")
+ if len(disk_modules) > 0 and offload_index is None and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, 'index.json'))):
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
+ offload_state_dict(offload_dir, disk_state_dict)
+ execution_device = {name: main_device if device in ['cpu', 'disk'] else device for (name, device) in device_map.items()}
+ execution_device[''] = main_device
+ offloaded_devices = ['disk'] if main_device == 'cpu' or main_device == 'mps' else ['cpu', 'disk']
+ offload = {name: device in offloaded_devices for (name, device) in device_map.items()}
+ save_folder = offload_dir if len(disk_modules) > 0 else None
+ if state_dict is not None or save_folder is not None or offload_index is not None:
+ device = main_device if offload_index is not None else None
+ weights_map = OffloadedWeightsLoader(state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device)
+ else:
+ weights_map = None
+ tied_params = find_tied_parameters(model)
+ tied_params_map = {}
+ for group in tied_params:
+ for param_name in group:
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
+ tied_params_map[data_ptr] = {}
+ attach_align_device_hook_on_blocks(model, execution_device=execution_device, offload=offload, offload_buffers=offload_buffers, weights_map=weights_map, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map)
+ offloaded_devices_str = ' and '.join([device for device in set(device_map.values()) if device in ('cpu', 'disk')])
+ if len(offloaded_devices_str) > 0:
+ logger.warning(f'Some parameters are on the meta device because they were offloaded to the {offloaded_devices_str}.')
+ retie_parameters(model, tied_params)
+
+ def add_warning(fn, model):
+
+ @wraps(fn)
+ def wrapper(*args, **kwargs):
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
+ if str(fn.__name__) == 'to':
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
+ if to_device is not None:
+ logger.warning(warning_msg)
+ else:
+ logger.warning(warning_msg)
+ for param in model.parameters():
+ if param.device == torch.device('meta'):
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
+ return fn(*args, **kwargs)
+ return wrapper
+ model.to = add_warning(model.to, model)
+ if is_npu_available():
+ model.npu = add_warning(model.npu, model)
+ elif is_mlu_available():
+ model.mlu = add_warning(model.mlu, model)
+ elif is_musa_available():
+ model.musa = add_warning(model.musa, model)
+ elif is_xpu_available():
+ model.xpu = add_warning(model.xpu, model)
+ else:
+ model.cuda = add_warning(model.cuda, model)
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ('cpu', 'disk')]) > 1
+ if use_multi_gpu and (not check_cuda_p2p_ib_support()):
+ logger.warning("We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. This can affect the multi-gpu inference when using accelerate device_map.Please make sure to update your driver to the latest version which resolves this.")
+ else:
+ device = list(device_map.values())[0]
+ if is_npu_available() and isinstance(device, int):
+ device = f'npu:{device}'
+ elif is_mlu_available() and isinstance(device, int):
+ device = f'mlu:{device}'
+ elif is_musa_available() and isinstance(device, int):
+ device = f'musa:{device}'
+ elif is_xpu_available() and isinstance(device, int):
+ device = f'xpu:{device}'
+ if device != 'disk':
+ model.to(device)
+ else:
+ raise ValueError('You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead.')
+ model.hf_device_map = dict(device_map)
+ return model
+
+def load_checkpoint_and_dispatch(model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]]=None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]]=None, no_split_module_classes: Optional[List[str]]=None, offload_folder: Optional[Union[str, os.PathLike]]=None, offload_buffers: bool=False, dtype: Optional[Union[str, torch.dtype]]=None, offload_state_dict: Optional[bool]=None, skip_keys: Optional[Union[str, List[str]]]=None, preload_module_classes: Optional[List[str]]=None, force_hooks: bool=False, strict: bool=False):
+ if isinstance(device_map, str) and device_map not in ['auto', 'balanced', 'balanced_low_0', 'sequential']:
+ raise ValueError("If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or 'sequential'.")
+ if isinstance(device_map, str):
+ if device_map != 'sequential':
+ max_memory = get_balanced_memory(model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, low_zero=device_map == 'balanced_low_0')
+ device_map = infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, offload_buffers=offload_buffers)
+ if offload_state_dict is None and device_map is not None and ('disk' in device_map.values()):
+ offload_state_dict = True
+ load_checkpoint_in_model(model, checkpoint, device_map=device_map, offload_folder=offload_folder, dtype=dtype, offload_state_dict=offload_state_dict, offload_buffers=offload_buffers, strict=strict)
+ if device_map is None:
+ return model
+ return dispatch_model(model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers, skip_keys=skip_keys, preload_module_classes=preload_module_classes, force_hooks=force_hooks)
+
+# File: accelerate-main/src/accelerate/checkpointing.py
+import random
+from pathlib import Path
+from typing import List
+import numpy as np
+import torch
+from safetensors.torch import load_model
+from torch.cuda.amp import GradScaler
+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_MODEL_NAME, SAFE_WEIGHTS_NAME, SAMPLER_NAME, SCALER_NAME, SCHEDULER_NAME, WEIGHTS_NAME, get_pretty_name, is_mlu_available, is_torch_xla_available, is_xpu_available, save
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+from .logging import get_logger
+from .state import PartialState
+logger = get_logger(__name__)
+
+def save_accelerator_state(output_dir: str, model_states: List[dict], optimizers: list, schedulers: list, dataloaders: list, process_index: int, step: int, scaler: GradScaler=None, save_on_each_node: bool=False, safe_serialization: bool=True):
+ output_dir = Path(output_dir)
+ for (i, state) in enumerate(model_states):
+ weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
+ if i > 0:
+ weights_name = weights_name.replace('.', f'_{i}.')
+ output_model_file = output_dir.joinpath(weights_name)
+ save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
+ logger.info(f'Model weights saved in {output_model_file}')
+ for (i, opt) in enumerate(optimizers):
+ state = opt.state_dict()
+ optimizer_name = f'{OPTIMIZER_NAME}.bin' if i == 0 else f'{OPTIMIZER_NAME}_{i}.bin'
+ output_optimizer_file = output_dir.joinpath(optimizer_name)
+ save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
+ logger.info(f'Optimizer state saved in {output_optimizer_file}')
+ for (i, scheduler) in enumerate(schedulers):
+ state = scheduler.state_dict()
+ scheduler_name = f'{SCHEDULER_NAME}.bin' if i == 0 else f'{SCHEDULER_NAME}_{i}.bin'
+ output_scheduler_file = output_dir.joinpath(scheduler_name)
+ save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
+ logger.info(f'Scheduler state saved in {output_scheduler_file}')
+ for (i, dataloader) in enumerate(dataloaders):
+ sampler_name = f'{SAMPLER_NAME}.bin' if i == 0 else f'{SAMPLER_NAME}_{i}.bin'
+ output_sampler_file = output_dir.joinpath(sampler_name)
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
+ if isinstance(dataloader.dataset, IterableDatasetShard):
+ sampler = dataloader.get_sampler()
+ if isinstance(sampler, SeedableRandomSampler):
+ save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
+ if getattr(dataloader, 'use_stateful_dataloader', False):
+ dataloader_state_dict_name = 'dl_state_dict.bin' if i == 0 else f'dl_state_dict_{i}.bin'
+ output_dataloader_state_dict_file = output_dir.joinpath(dataloader_state_dict_name)
+ state_dict = dataloader.state_dict()
+ torch.save(state_dict, output_dataloader_state_dict_file)
+ logger.info(f'Sampler state for dataloader {i} saved in {output_sampler_file}')
+ if scaler is not None:
+ state = scaler.state_dict()
+ output_scaler_file = output_dir.joinpath(SCALER_NAME)
+ torch.save(state, output_scaler_file)
+ logger.info(f'Gradient scaler state saved in {output_scaler_file}')
+ states = {}
+ states_name = f'{RNG_STATE_NAME}_{process_index}.pkl'
+ states['step'] = step
+ states['random_state'] = random.getstate()
+ states['numpy_random_seed'] = np.random.get_state()
+ states['torch_manual_seed'] = torch.get_rng_state()
+ if is_xpu_available():
+ states['torch_xpu_manual_seed'] = torch.xpu.get_rng_state_all()
+ if is_mlu_available():
+ states['torch_mlu_manual_seed'] = torch.mlu.get_rng_state_all()
+ else:
+ states['torch_cuda_manual_seed'] = torch.cuda.get_rng_state_all()
+ if is_torch_xla_available():
+ states['xm_seed'] = xm.get_rng_state()
+ output_states_file = output_dir.joinpath(states_name)
+ torch.save(states, output_states_file)
+ logger.info(f'Random states saved in {output_states_file}')
+ return output_dir
+
+def load_accelerator_state(input_dir, models, optimizers, schedulers, dataloaders, process_index, scaler=None, map_location=None, **load_model_func_kwargs):
+ override_attributes = dict()
+ if map_location not in [None, 'cpu', 'on_device']:
+ raise TypeError("Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`")
+ if map_location is None:
+ map_location = 'cpu'
+ elif map_location == 'on_device':
+ map_location = PartialState().device
+ input_dir = Path(input_dir)
+ for (i, model) in enumerate(models):
+ ending = f'_{i}' if i > 0 else ''
+ input_model_file = input_dir.joinpath(f'{SAFE_MODEL_NAME}{ending}.safetensors')
+ if input_model_file.exists():
+ load_model(model, input_model_file, device=str(map_location), **load_model_func_kwargs)
+ else:
+ input_model_file = input_dir.joinpath(f'{MODEL_NAME}{ending}.bin')
+ state_dict = torch.load(input_model_file, map_location=map_location)
+ model.load_state_dict(state_dict, **load_model_func_kwargs)
+ logger.info('All model weights loaded successfully')
+ for (i, opt) in enumerate(optimizers):
+ optimizer_name = f'{OPTIMIZER_NAME}.bin' if i == 0 else f'{OPTIMIZER_NAME}_{i}.bin'
+ input_optimizer_file = input_dir.joinpath(optimizer_name)
+ optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
+ optimizers[i].load_state_dict(optimizer_state)
+ logger.info('All optimizer states loaded successfully')
+ for (i, scheduler) in enumerate(schedulers):
+ scheduler_name = f'{SCHEDULER_NAME}.bin' if i == 0 else f'{SCHEDULER_NAME}_{i}.bin'
+ input_scheduler_file = input_dir.joinpath(scheduler_name)
+ scheduler.load_state_dict(torch.load(input_scheduler_file))
+ logger.info('All scheduler states loaded successfully')
+ for (i, dataloader) in enumerate(dataloaders):
+ sampler_name = f'{SAMPLER_NAME}.bin' if i == 0 else f'{SAMPLER_NAME}_{i}.bin'
+ input_sampler_file = input_dir.joinpath(sampler_name)
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
+ if isinstance(dataloader.dataset, IterableDatasetShard):
+ sampler = dataloader.get_sampler()
+ if isinstance(sampler, SeedableRandomSampler):
+ sampler = dataloader.set_sampler(torch.load(input_sampler_file))
+ if getattr(dataloader, 'use_stateful_dataloader', False):
+ dataloader_state_dict_name = 'dl_state_dict.bin' if i == 0 else f'dl_state_dict_{i}.bin'
+ input_dataloader_state_dict_file = input_dir.joinpath(dataloader_state_dict_name)
+ if input_dataloader_state_dict_file.exists():
+ state_dict = torch.load(input_dataloader_state_dict_file)
+ dataloader.load_state_dict(state_dict)
+ logger.info('All dataloader sampler states loaded successfully')
+ if scaler is not None:
+ input_scaler_file = input_dir.joinpath(SCALER_NAME)
+ scaler.load_state_dict(torch.load(input_scaler_file))
+ logger.info('GradScaler state loaded successfully')
+ try:
+ states = torch.load(input_dir.joinpath(f'{RNG_STATE_NAME}_{process_index}.pkl'))
+ if 'step' in states:
+ override_attributes['step'] = states['step']
+ random.setstate(states['random_state'])
+ np.random.set_state(states['numpy_random_seed'])
+ torch.set_rng_state(states['torch_manual_seed'])
+ if is_xpu_available():
+ torch.xpu.set_rng_state_all(states['torch_xpu_manual_seed'])
+ if is_mlu_available():
+ torch.mlu.set_rng_state_all(states['torch_mlu_manual_seed'])
+ else:
+ torch.cuda.set_rng_state_all(states['torch_cuda_manual_seed'])
+ if is_torch_xla_available():
+ xm.set_rng_state(states['xm_seed'])
+ logger.info('All random states loaded successfully')
+ except Exception:
+ logger.info('Could not load random states')
+ return override_attributes
+
+def save_custom_state(obj, path, index: int=0, save_on_each_node: bool=False):
+ save_location = Path(path) / f'custom_checkpoint_{index}.pkl'
+ logger.info(f'Saving the state of {get_pretty_name(obj)} to {save_location}')
+ save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
+
+def load_custom_state(obj, path, index: int=0):
+ load_location = f'{path}/custom_checkpoint_{index}.pkl'
+ logger.info(f'Loading the state of {get_pretty_name(obj)} from {load_location}')
+ obj.load_state_dict(torch.load(load_location, map_location='cpu'))
+
+# File: accelerate-main/src/accelerate/commands/accelerate_cli.py
+from accelerate.commands.config import get_config_parser
+from accelerate.commands.env import env_command_parser
+from accelerate.commands.estimate import estimate_command_parser
+from accelerate.commands.launch import launch_command_parser
+from accelerate.commands.merge import merge_command_parser
+from accelerate.commands.test import test_command_parser
+from accelerate.commands.tpu import tpu_command_parser
+from accelerate.commands.utils import CustomArgumentParser
+
+def main():
+ parser = CustomArgumentParser('Accelerate CLI tool', usage='accelerate []', allow_abbrev=False)
+ subparsers = parser.add_subparsers(help='accelerate command helpers')
+ get_config_parser(subparsers=subparsers)
+ estimate_command_parser(subparsers=subparsers)
+ env_command_parser(subparsers=subparsers)
+ launch_command_parser(subparsers=subparsers)
+ merge_command_parser(subparsers=subparsers)
+ tpu_command_parser(subparsers=subparsers)
+ test_command_parser(subparsers=subparsers)
+ args = parser.parse_args()
+ if not hasattr(args, 'func'):
+ parser.print_help()
+ exit(1)
+ args.func(args)
+if __name__ == '__main__':
+ main()
+
+# File: accelerate-main/src/accelerate/commands/config/__init__.py
+import argparse
+from .config import config_command_parser
+from .config_args import default_config_file, load_config_from_file
+from .default import default_command_parser
+from .update import update_command_parser
+
+def get_config_parser(subparsers=None):
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
+ config_parser = config_command_parser(subparsers)
+ subcommands = config_parser.add_subparsers(title='subcommands', dest='subcommand')
+ default_command_parser(subcommands, parents=[parent_parser])
+ update_command_parser(subcommands, parents=[parent_parser])
+ return config_parser
+
+def main():
+ config_parser = get_config_parser()
+ args = config_parser.parse_args()
+ if not hasattr(args, 'func'):
+ config_parser.print_help()
+ exit(1)
+ args.func(args)
+if __name__ == '__main__':
+ main()
+
+# File: accelerate-main/src/accelerate/commands/config/cluster.py
+import os
+from ...utils import ComputeEnvironment, DistributedType, is_deepspeed_available, is_fp8_available, is_mlu_available, is_mps_available, is_msamp_available, is_musa_available, is_npu_available, is_transformer_engine_available, is_transformers_available, is_xpu_available
+from ...utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, TORCH_DYNAMO_MODES
+from .config_args import ClusterConfig
+from .config_utils import DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_distributed_mode, _convert_dynamo_backend, _convert_fp8_backend, _convert_mixed_precision, _convert_yes_no_to_bool
+
+def get_cluster_input():
+ distributed_type = _ask_options('Which type of machine are you using?', ['No distributed training', 'multi-CPU', 'multi-XPU', 'multi-GPU', 'multi-NPU', 'multi-MLU', 'multi-MUSA', 'TPU'], _convert_distributed_mode)
+ machine_rank = 0
+ num_machines = 1
+ num_processes = 1
+ gpu_ids = None
+ main_process_ip = None
+ main_process_port = None
+ rdzv_backend = 'static'
+ same_network = True
+ debug = False
+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU]:
+ num_machines = _ask_field('How many different machines will you use (use more than 1 for multi-node training)? [1]: ', int, default=1)
+ if num_machines > 1:
+ machine_rank = _ask_options('What is the rank of this machine?', list(range(num_machines)), int)
+ main_process_ip = _ask_field('What is the IP address of the machine that will host the main process? ')
+ main_process_port = _ask_field('What is the port you will use to communicate with the main process? ', int)
+ same_network = _ask_field('Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ', _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ if not same_network:
+ rdzv_backend = _ask_field("What rendezvous backend will you use? ('static', 'c10d', ...): ", default='static')
+ debug = _ask_field('Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if distributed_type == DistributedType.NO:
+ use_cpu = _ask_field('Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ elif distributed_type == DistributedType.MULTI_CPU:
+ use_cpu = True
+ else:
+ use_cpu = False
+ ipex_config = {}
+ mpirun_config = {}
+ if use_cpu:
+ ipex_config['ipex'] = _ask_field('Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if distributed_type == DistributedType.MULTI_CPU:
+ use_mpirun = _ask_field('Do you want accelerate to launch mpirun? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_mpirun:
+ mpirun_hostfile = _ask_field('Please enter the path to the hostfile to use with mpirun [~/hostfile]: ', str, default='~/hostfile')
+ mpirun_config['mpirun_hostfile'] = os.path.expanduser(mpirun_hostfile.strip())
+ mpirun_config['mpirun_ccl'] = _ask_field('Enter the number of oneCCL worker threads [1]: ', default=1)
+ if not use_cpu and is_xpu_available() and (distributed_type not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA, DistributedType.MULTI_MUSA]):
+ ipex_config['use_xpu'] = _ask_field('Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ dynamo_config = {}
+ use_dynamo = _ask_field('Do you wish to optimize your script with torch dynamo?[yes/NO]:', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_dynamo:
+ prefix = 'dynamo_'
+ dynamo_config[prefix + 'backend'] = _ask_options('Which dynamo backend would you like to use?', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2)
+ use_custom_options = _ask_field('Do you want to customize the defaults sent to torch.compile? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_custom_options:
+ dynamo_config[prefix + 'mode'] = _ask_options('Which mode do you want to use?', TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default=0)
+ dynamo_config[prefix + 'use_fullgraph'] = _ask_field('Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ dynamo_config[prefix + 'use_dynamic'] = _ask_field('Do you want to enable dynamic shape tracing? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ use_mps = not use_cpu and is_mps_available()
+ deepspeed_config = {}
+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.NO] and (not use_mps):
+ use_deepspeed = _ask_field('Do you want to use DeepSpeed? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_deepspeed:
+ distributed_type = DistributedType.DEEPSPEED
+ assert is_deepspeed_available(), 'DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source'
+ if distributed_type == DistributedType.DEEPSPEED:
+ use_deepspeed_config = _ask_field('Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_deepspeed_config:
+ deepspeed_config['deepspeed_config_file'] = _ask_field('Please enter the path to the json DeepSpeed config file: ', str, default='none')
+ else:
+ deepspeed_config['zero_stage'] = _ask_options("What should be your DeepSpeed's ZeRO optimization stage?", [0, 1, 2, 3], int, default=2)
+ deepspeed_devices = ['none', 'cpu', 'nvme']
+ if deepspeed_config['zero_stage'] >= 2:
+ deepspeed_config['offload_optimizer_device'] = _ask_options('Where to offload optimizer states?', deepspeed_devices, lambda x: deepspeed_devices[int(x)])
+ deepspeed_config['offload_param_device'] = _ask_options('Where to offload parameters?', deepspeed_devices, lambda x: deepspeed_devices[int(x)])
+ if deepspeed_config['offload_param_device'] == 'nvme':
+ deepspeed_config['offload_param_nvme_path'] = _ask_field('Nvme Path to offload parameters?', str, default='/nvme')
+ if deepspeed_config['offload_optimizer_device'] == 'nvme':
+ deepspeed_config['offload_optimizer_nvme_path'] = _ask_field('Nvme Path to offload optimizer states?', str, default='/nvme')
+ deepspeed_config['gradient_accumulation_steps'] = _ask_field("How many gradient accumulation steps you're passing in your script? [1]: ", int, default=1)
+ use_gradient_clipping = _ask_field('Do you want to use gradient clipping? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_gradient_clipping:
+ deepspeed_config['gradient_clipping'] = _ask_field('What is the gradient clipping value? [1.0]: ', float, default=1.0)
+ if deepspeed_config['zero_stage'] == 3:
+ deepspeed_config['zero3_save_16bit_model'] = _ask_field('Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ deepspeed_config['zero3_init_flag'] = _ask_field('Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if deepspeed_config['zero3_init_flag']:
+ if not is_transformers_available():
+ raise Exception('When `zero3_init_flag` is set, it requires Transformers to be installed. Please run `pip3 install transformers`.')
+ use_moe = _ask_field('Do you want to enable Mixture-of-Experts training (MoE)? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_moe:
+ deepspeed_config['deepspeed_moe_layer_cls_names'] = _ask_field('Specify the comma-separated list of transformers MoE layer class names (case-sensitive), e.g : `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... : ', str)
+ if num_machines > 1:
+ launcher_query = 'Which Type of launcher do you want to use?'
+ deepspeed_config['deepspeed_multinode_launcher'] = _ask_options(launcher_query, DEEPSPEED_MULTINODE_LAUNCHERS, lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)])
+ if deepspeed_config['deepspeed_multinode_launcher'] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
+ deepspeed_config['deepspeed_hostfile'] = _ask_field('DeepSpeed configures multi-node compute resources with hostfile. Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; for more information please refer official [documentation](https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). Please specify the location of hostfile: ', str)
+ is_exclusion_filter = _ask_field('Do you want to specify exclusion filter string? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if is_exclusion_filter:
+ deepspeed_config['deepspeed_exclusion_filter'] = _ask_field('DeepSpeed exclusion filter string: ', str)
+ is_inclusion_filter = _ask_field('Do you want to specify inclusion filter string? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if is_inclusion_filter:
+ deepspeed_config['deepspeed_inclusion_filter'] = _ask_field('DeepSpeed inclusion filter string: ', str)
+ fsdp_config = {}
+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU]:
+ use_fsdp = _ask_field('Do you want to use FullyShardedDataParallel? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_fsdp:
+ distributed_type = DistributedType.FSDP
+ if distributed_type == DistributedType.FSDP:
+ sharding_strategy_query = 'What should be your sharding strategy?'
+ fsdp_config['fsdp_sharding_strategy'] = _ask_options(sharding_strategy_query, FSDP_SHARDING_STRATEGY, lambda x: FSDP_SHARDING_STRATEGY[int(x)])
+ fsdp_config['fsdp_offload_params'] = _ask_field('Do you want to offload parameters and gradients to CPU? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ fsdp_wrap_query = 'What should be your auto wrap policy?'
+ fsdp_config['fsdp_auto_wrap_policy'] = _ask_options(fsdp_wrap_query, FSDP_AUTO_WRAP_POLICY, lambda x: FSDP_AUTO_WRAP_POLICY[int(x)])
+ if fsdp_config['fsdp_auto_wrap_policy'] == FSDP_AUTO_WRAP_POLICY[0]:
+ use_no_split_modules = _ask_field("Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if not use_no_split_modules:
+ fsdp_config['fsdp_transformer_layer_cls_to_wrap'] = _ask_field('Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ', str)
+ elif fsdp_config['fsdp_auto_wrap_policy'] == FSDP_AUTO_WRAP_POLICY[1]:
+ fsdp_config['fsdp_min_num_params'] = _ask_field("What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ", int, default=100000000)
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
+ fsdp_config['fsdp_backward_prefetch'] = _ask_options(fsdp_backward_prefetch_query, FSDP_BACKWARD_PREFETCH, lambda x: FSDP_BACKWARD_PREFETCH[int(x)])
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
+ fsdp_config['fsdp_state_dict_type'] = _ask_options(fsdp_state_dict_type_query, FSDP_STATE_DICT_TYPE, lambda x: FSDP_STATE_DICT_TYPE[int(x)], default=2)
+ fsdp_config['fsdp_forward_prefetch'] = _ask_field("Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ fsdp_config['fsdp_use_orig_params'] = _ask_field("Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ", _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ fsdp_config['fsdp_cpu_ram_efficient_loading'] = _ask_field('Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ', _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ if fsdp_config['fsdp_cpu_ram_efficient_loading']:
+ fsdp_config['fsdp_sync_module_states'] = True
+ else:
+ fsdp_config['fsdp_sync_module_states'] = _ask_field('Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ', _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ fsdp_config['fsdp_activation_checkpointing'] = _ask_field('Do you want to enable FSDP activation checkpointing? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ megatron_lm_config = {}
+ if distributed_type in [DistributedType.MULTI_GPU]:
+ use_megatron_lm = _ask_field('Do you want to use Megatron-LM ? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_megatron_lm:
+ distributed_type = DistributedType.MEGATRON_LM
+ if distributed_type == DistributedType.MEGATRON_LM:
+ prefix = 'megatron_lm_'
+ megatron_lm_config[prefix + 'tp_degree'] = _ask_field('What is the Tensor Parallelism degree/size? [1]:', int, default=1, error_message='Please enter an integer.')
+ if megatron_lm_config[prefix + 'tp_degree'] > 1:
+ megatron_lm_config[prefix + 'sequence_parallelism'] = _ask_field('Do you want to enable Sequence Parallelism? [YES/no]: ', _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ megatron_lm_config[prefix + 'pp_degree'] = _ask_field('What is the Pipeline Parallelism degree/size? [1]:', int, default=1, error_message='Please enter an integer.')
+ if megatron_lm_config[prefix + 'pp_degree'] > 1:
+ megatron_lm_config[prefix + 'num_micro_batches'] = _ask_field('What is the number of micro-batches? [1]:', int, default=1, error_message='Please enter an integer.')
+ megatron_lm_config[prefix + 'recompute_activations'] = _ask_field('Do you want to enable selective activation recomputation? [YES/no]: ', _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ megatron_lm_config[prefix + 'use_distributed_optimizer'] = _ask_field('Do you want to use distributed optimizer which shards optimizer state and gradients across data parallel ranks? [YES/no]: ', _convert_yes_no_to_bool, default=True, error_message='Please enter yes or no.')
+ megatron_lm_config[prefix + 'gradient_clipping'] = _ask_field('What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ', float, default=1.0)
+ tpu_commands = None
+ tpu_command_file = None
+ tpu_downcast_bf16 = 'no'
+ tpu_env = []
+ tpu_name = None
+ tpu_vm = None
+ tpu_zone = None
+ tpu_use_sudo = False
+ tpu_use_cluster = False
+ if distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.XLA]:
+ machine_type = str(distributed_type).split('.')[1].replace('MULTI_', '')
+ if machine_type == 'TPU':
+ machine_type += ' cores'
+ elif machine_type == 'CPU':
+ machine_type = 'processes'
+ else:
+ machine_type += '(s)'
+ num_processes = _ask_field(f'How many {machine_type} should be used for distributed training? [1]:', int, default=1, error_message='Please enter an integer.')
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ num_processes = _ask_field('How many GPU(s) should be used for distributed training? [1]:', int, default=1, error_message='Please enter an integer.')
+ else:
+ num_processes = 1
+ if distributed_type == DistributedType.MULTI_GPU and num_machines == 1 and (num_processes == 1):
+ raise ValueError(f'Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using.')
+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.NO] and (not use_cpu) and (not use_mps):
+ if is_npu_available():
+ machine_type = 'NPU(s)'
+ elif is_mlu_available():
+ machine_type = 'MLU(s)'
+ elif is_musa_available():
+ machine_type = 'MUSA(s)'
+ else:
+ machine_type = 'GPU(s)'
+ gpu_ids = _ask_field(f'What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:', default='all')
+ enable_cpu_affinity = False
+ if distributed_type in (DistributedType.NO, DistributedType.MULTI_GPU) and (not use_cpu) and (not use_mps):
+ enable_cpu_affinity = _ask_field('Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ fp8_config = None
+ if distributed_type == DistributedType.XLA:
+ mixed_precision = 'no'
+ main_training_function = _ask_field('What is the name of the function in your script that should be launched in all parallel scripts? [main]: ', default='main')
+ tpu_use_cluster = _ask_field('Are you using a TPU cluster? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if tpu_use_cluster:
+ tpu_name = _ask_field('What is the name of your TPU cluster? ', default=None, error_message='Please enter the name of your TPU cluster.')
+ tpu_zone = _ask_field('What is the zone of your TPU cluster? ', default=None, error_message='Please enter the zone of your TPU cluster.')
+ tpu_use_sudo = _ask_field('To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ', default=False, error_message='Please enter yes or no.')
+ run_commands = _ask_field('Do you have code you wish to run on startup in each pod? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if run_commands:
+ use_command_file = _ask_field('Is this code located in a bash script? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_command_file:
+ tpu_command_file = _ask_field('What is the path to your bash script? ', default=None, error_message='Please enter the path to your bash script.')
+ tpu_command_file = os.path.abspath(tpu_command_file)
+ else:
+ print('Please enter each command seperately you wish to run on startup in each pod.')
+ tpu_commands = []
+ another_command = True
+ while another_command:
+ tpu_commands.append(_ask_field('Please enter a single command to be ran ', default=None, error_message='Please enter the commands you wish to run on startup in each pod as a single string.'))
+ another_command = _ask_field('Do you wish to add another command? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ tpu_vm = _ask_field('If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ', default='').split(',')
+ tpu_env = _ask_field('What environment variables do you wish to set in each pod, seperated by a comma: ', default='').split(',')
+ else:
+ main_training_function = 'main'
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
+ mixed_precision = None
+ else:
+ mixed_precision = _ask_options('Do you wish to use mixed precision?', ['no', 'fp16', 'bf16', 'fp8'], _convert_mixed_precision)
+ if mixed_precision == 'fp8':
+ if not is_fp8_available():
+ raise ValueError('FP8 (either Transformer Engine or MSAMP) is not installed on this machine.')
+ fp8_config = {}
+ fp8_config['backend'] = _ask_options('Which FP8 backend do you want to use?', ['te', 'msamp'], _convert_fp8_backend)
+ if fp8_config['backend'] == 'TE':
+ if not is_transformer_engine_available():
+ raise ValueError('TransformersEngine was selected, but it is not installed on this machine.')
+ fp8_config['use_autocast_during_eval'] = _ask_field('Do you want to use FP8 autocast during eval mode? Generally better metrics are found when this is disabled [yes/NO]: ', _convert_yes_no_to_bool, default=False)
+ fp8_config['margin'] = _ask_field('What margin should be used for gradient scaling? [0]: ', int, default=0)
+ fp8_config['interval'] = _ask_field('What interval should be used for for how often the scaling factor is recomputed? [1]: ', int, default=1)
+ fp8_config['fp8_format'] = _ask_options('Which weight format should be used?', ['HYBRID', 'E4M3'], lambda x: 'HYBRID' if x == 0 else 'E4M3', default=0)
+ fp8_config['amax_history_length'] = _ask_field('What length of history should be used for the amax scaling factor computation? [1024]: ', int, default=1024)
+ fp8_config['amax_compute_algorithm'] = _ask_options('Which algorithm should be used for the amax scaling factor computation?', ['max', 'most_recent'], lambda x: 'max' if x == 0 else 'most_recent', default=0)
+ fp8_config['override_linear_precision'] = _ask_field('Do you want to to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision? [yes/NO]: ', _convert_yes_no_to_bool, default=False)
+ if fp8_config['override_linear_precision']:
+ fprop = _ask_field('Should `fprop` be executed in higher precision? [yes/NO]: ', _convert_yes_no_to_bool, default=False)
+ dgrad = _ask_field('Should `dgrad` be executed in higher precision? [yes/NO]: ', _convert_yes_no_to_bool, default=False)
+ wgrad = _ask_field('Should `wgrad` be executed in higher precision? [yes/NO]: ', _convert_yes_no_to_bool, default=False)
+ fp8_config['override_linear_precision'] = (fprop, dgrad, wgrad)
+ elif fp8_config['backend'] == 'MSAMP':
+ if not is_msamp_available():
+ raise ValueError('MSAMP was selected, but it is not installed on this machine.')
+ fp8_config['optimization_level'] = _ask_options('Which optimization level should be used?', ['O1', 'O2'], lambda x: 'O1' if x == 0 else 'O2', default=1)
+ if use_dynamo and mixed_precision == 'no' and (not use_cpu):
+ print('Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.')
+ if distributed_type == DistributedType.XLA and mixed_precision == 'bf16':
+ tpu_downcast_bf16 = _ask_field('Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?', default='no')
+ return ClusterConfig(compute_environment=ComputeEnvironment.LOCAL_MACHINE, distributed_type=distributed_type, num_processes=num_processes, gpu_ids=gpu_ids, mixed_precision=mixed_precision, downcast_bf16=tpu_downcast_bf16, machine_rank=machine_rank, num_machines=num_machines, main_process_ip=main_process_ip, main_process_port=main_process_port, main_training_function=main_training_function, fp8_config=fp8_config, deepspeed_config=deepspeed_config, fsdp_config=fsdp_config, megatron_lm_config=megatron_lm_config, ipex_config=ipex_config, mpirun_config=mpirun_config, use_cpu=use_cpu, rdzv_backend=rdzv_backend, same_network=same_network, commands=tpu_commands, command_file=tpu_command_file, tpu_env=tpu_env, tpu_name=tpu_name, tpu_vm=tpu_vm, tpu_zone=tpu_zone, tpu_use_sudo=tpu_use_sudo, tpu_use_cluster=tpu_use_cluster, dynamo_config=dynamo_config, debug=debug, enable_cpu_affinity=enable_cpu_affinity)
+
+# File: accelerate-main/src/accelerate/commands/config/config.py
+import argparse
+import os
+from accelerate.utils import ComputeEnvironment
+from .cluster import get_cluster_input
+from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file
+from .config_utils import _ask_field, _ask_options, _convert_compute_environment
+from .sagemaker import get_sagemaker_input
+description = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
+
+def get_user_input():
+ compute_environment = _ask_options('In which compute environment are you running?', ['This machine', 'AWS (Amazon SageMaker)'], _convert_compute_environment)
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
+ config = get_sagemaker_input()
+ else:
+ config = get_cluster_input()
+ return config
+
+def config_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser('config', description=description)
+ else:
+ parser = argparse.ArgumentParser('Accelerate config command', description=description)
+ parser.add_argument('--config_file', default=None, help="The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed with 'huggingface'.")
+ if subparsers is not None:
+ parser.set_defaults(func=config_command)
+ return parser
+
+def config_command(args):
+ config = get_user_input()
+ if args.config_file is not None:
+ config_file = args.config_file
+ else:
+ if not os.path.isdir(cache_dir):
+ os.makedirs(cache_dir)
+ config_file = default_yaml_config_file
+ if config_file.endswith('.json'):
+ config.to_json_file(config_file)
+ else:
+ config.to_yaml_file(config_file)
+ print(f'accelerate configuration saved at {config_file}')
+
+def main():
+ parser = config_command_parser()
+ args = parser.parse_args()
+ config_command(args)
+if __name__ == '__main__':
+ main()
+
+# File: accelerate-main/src/accelerate/commands/config/config_args.py
+import json
+import os
+from dataclasses import dataclass
+from enum import Enum
+from typing import List, Optional, Union
+import yaml
+from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
+from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
+hf_cache_home = os.path.expanduser(os.environ.get('HF_HOME', os.path.join(os.environ.get('XDG_CACHE_HOME', '~/.cache'), 'huggingface')))
+cache_dir = os.path.join(hf_cache_home, 'accelerate')
+default_json_config_file = os.path.join(cache_dir, 'default_config.yaml')
+default_yaml_config_file = os.path.join(cache_dir, 'default_config.yaml')
+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
+ default_config_file = default_yaml_config_file
+else:
+ default_config_file = default_json_config_file
+
+def load_config_from_file(config_file):
+ if config_file is not None:
+ if not os.path.isfile(config_file):
+ raise FileNotFoundError(f'The passed configuration file `{config_file}` does not exist. Please pass an existing file to `accelerate launch`, or use the default one created through `accelerate config` and run `accelerate launch` without the `--config_file` argument.')
+ else:
+ config_file = default_config_file
+ with open(config_file, encoding='utf-8') as f:
+ if config_file.endswith('.json'):
+ if json.load(f).get('compute_environment', ComputeEnvironment.LOCAL_MACHINE) == ComputeEnvironment.LOCAL_MACHINE:
+ config_class = ClusterConfig
+ else:
+ config_class = SageMakerConfig
+ return config_class.from_json_file(json_file=config_file)
+ else:
+ if yaml.safe_load(f).get('compute_environment', ComputeEnvironment.LOCAL_MACHINE) == ComputeEnvironment.LOCAL_MACHINE:
+ config_class = ClusterConfig
+ else:
+ config_class = SageMakerConfig
+ return config_class.from_yaml_file(yaml_file=config_file)
+
+@dataclass
+class BaseConfig:
+ compute_environment: ComputeEnvironment
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
+ mixed_precision: str
+ use_cpu: bool
+ debug: bool
+
+ def to_dict(self):
+ result = self.__dict__
+
+ def _convert_enums(value):
+ if isinstance(value, Enum):
+ return value.value
+ if isinstance(value, dict):
+ if not bool(value):
+ return None
+ for (key1, value1) in value.items():
+ value[key1] = _convert_enums(value1)
+ return value
+ for (key, value) in result.items():
+ result[key] = _convert_enums(value)
+ result = {k: v for (k, v) in result.items() if v is not None}
+ return result
+
+ @staticmethod
+ def process_config(config_dict):
+ if 'compute_environment' not in config_dict:
+ config_dict['compute_environment'] = ComputeEnvironment.LOCAL_MACHINE
+ if 'distributed_type' not in config_dict:
+ raise ValueError('A `distributed_type` must be specified in the config file.')
+ if 'num_processes' not in config_dict and config_dict['distributed_type'] == DistributedType.NO:
+ config_dict['num_processes'] = 1
+ if 'mixed_precision' not in config_dict:
+ config_dict['mixed_precision'] = 'fp16' if 'fp16' in config_dict and config_dict['fp16'] else None
+ if 'fp16' in config_dict:
+ del config_dict['fp16']
+ if 'dynamo_backend' in config_dict:
+ dynamo_backend = config_dict.pop('dynamo_backend')
+ config_dict['dynamo_config'] = {} if dynamo_backend == 'NO' else {'dynamo_backend': dynamo_backend}
+ if 'use_cpu' not in config_dict:
+ config_dict['use_cpu'] = False
+ if 'debug' not in config_dict:
+ config_dict['debug'] = False
+ if 'enable_cpu_affinity' not in config_dict:
+ config_dict['enable_cpu_affinity'] = False
+ return config_dict
+
+ @classmethod
+ def from_json_file(cls, json_file=None):
+ json_file = default_json_config_file if json_file is None else json_file
+ with open(json_file, encoding='utf-8') as f:
+ config_dict = json.load(f)
+ config_dict = cls.process_config(config_dict)
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
+ if len(extra_keys) > 0:
+ raise ValueError(f'The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate` version or fix (and potentially remove) these keys from your config file.')
+ return cls(**config_dict)
+
+ def to_json_file(self, json_file):
+ with open(json_file, 'w', encoding='utf-8') as f:
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n'
+ f.write(content)
+
+ @classmethod
+ def from_yaml_file(cls, yaml_file=None):
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
+ with open(yaml_file, encoding='utf-8') as f:
+ config_dict = yaml.safe_load(f)
+ config_dict = cls.process_config(config_dict)
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
+ if len(extra_keys) > 0:
+ raise ValueError(f'The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate` version or fix (and potentially remove) these keys from your config file.')
+ return cls(**config_dict)
+
+ def to_yaml_file(self, yaml_file):
+ with open(yaml_file, 'w', encoding='utf-8') as f:
+ yaml.safe_dump(self.to_dict(), f)
+
+ def __post_init__(self):
+ if isinstance(self.compute_environment, str):
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
+ if isinstance(self.distributed_type, str):
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
+ else:
+ self.distributed_type = DistributedType(self.distributed_type)
+ if getattr(self, 'dynamo_config', None) is None:
+ self.dynamo_config = {}
+
+@dataclass
+class ClusterConfig(BaseConfig):
+ num_processes: int = -1
+ machine_rank: int = 0
+ num_machines: int = 1
+ gpu_ids: Optional[str] = None
+ main_process_ip: Optional[str] = None
+ main_process_port: Optional[int] = None
+ rdzv_backend: Optional[str] = 'static'
+ same_network: Optional[bool] = False
+ main_training_function: str = 'main'
+ enable_cpu_affinity: bool = False
+ fp8_config: dict = None
+ deepspeed_config: dict = None
+ fsdp_config: dict = None
+ megatron_lm_config: dict = None
+ ipex_config: dict = None
+ mpirun_config: dict = None
+ downcast_bf16: bool = False
+ tpu_name: str = None
+ tpu_zone: str = None
+ tpu_use_cluster: bool = False
+ tpu_use_sudo: bool = False
+ command_file: str = None
+ commands: List[str] = None
+ tpu_vm: List[str] = None
+ tpu_env: List[str] = None
+ dynamo_config: dict = None
+
+ def __post_init__(self):
+ if self.deepspeed_config is None:
+ self.deepspeed_config = {}
+ if self.fsdp_config is None:
+ self.fsdp_config = {}
+ if self.megatron_lm_config is None:
+ self.megatron_lm_config = {}
+ if self.ipex_config is None:
+ self.ipex_config = {}
+ if self.mpirun_config is None:
+ self.mpirun_config = {}
+ if self.fp8_config is None:
+ self.fp8_config = {}
+ return super().__post_init__()
+
+@dataclass
+class SageMakerConfig(BaseConfig):
+ ec2_instance_type: str
+ iam_role_name: str
+ image_uri: Optional[str] = None
+ profile: Optional[str] = None
+ region: str = 'us-east-1'
+ num_machines: int = 1
+ gpu_ids: str = 'all'
+ base_job_name: str = f'accelerate-sagemaker-{num_machines}'
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
+ py_version: str = SAGEMAKER_PYTHON_VERSION
+ sagemaker_inputs_file: str = None
+ sagemaker_metrics_file: str = None
+ additional_args: dict = None
+ dynamo_config: dict = None
+ enable_cpu_affinity: bool = False
+
+# File: accelerate-main/src/accelerate/commands/config/config_utils.py
+import argparse
+from ...utils.dataclasses import ComputeEnvironment, DistributedType, DynamoBackend, FP8BackendType, PrecisionType, SageMakerDistributedType
+from ..menu import BulletMenu
+DYNAMO_BACKENDS = ['EAGER', 'AOT_EAGER', 'INDUCTOR', 'AOT_TS_NVFUSER', 'NVPRIMS_NVFUSER', 'CUDAGRAPHS', 'OFI', 'FX2TRT', 'ONNXRT', 'TENSORRT', 'AOT_TORCHXLA_TRACE_ONCE', 'TORHCHXLA_TRACE_ONCE', 'IPEX', 'TVM']
+
+def _ask_field(input_text, convert_value=None, default=None, error_message=None):
+ ask_again = True
+ while ask_again:
+ result = input(input_text)
+ try:
+ if default is not None and len(result) == 0:
+ return default
+ return convert_value(result) if convert_value is not None else result
+ except Exception:
+ if error_message is not None:
+ print(error_message)
+
+def _ask_options(input_text, options=[], convert_value=None, default=0):
+ menu = BulletMenu(input_text, options)
+ result = menu.run(default_choice=default)
+ return convert_value(result) if convert_value is not None else result
+
+def _convert_compute_environment(value):
+ value = int(value)
+ return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value])
+
+def _convert_distributed_mode(value):
+ value = int(value)
+ return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'MULTI_MLU', 'MULTI_MUSA', 'XLA'][value])
+
+def _convert_dynamo_backend(value):
+ value = int(value)
+ return DynamoBackend(DYNAMO_BACKENDS[value]).value
+
+def _convert_mixed_precision(value):
+ value = int(value)
+ return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value])
+
+def _convert_sagemaker_distributed_mode(value):
+ value = int(value)
+ return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value])
+
+def _convert_fp8_backend(value):
+ value = int(value)
+ return FP8BackendType(['TE', 'MSAMP'][value])
+
+def _convert_yes_no_to_bool(value):
+ return {'yes': True, 'no': False}[value.lower()]
+
+class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
+
+ def _format_usage(self, usage, actions, groups, prefix):
+ usage = super()._format_usage(usage, actions, groups, prefix)
+ usage = usage.replace(' [] ', '')
+ return usage
+
+# File: accelerate-main/src/accelerate/commands/config/default.py
+from pathlib import Path
+import torch
+from ...utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available
+from .config_args import ClusterConfig, default_json_config_file
+from .config_utils import SubcommandHelpFormatter
+description = 'Create a default config file for Accelerate with only a few flags set.'
+
+def write_basic_config(mixed_precision='no', save_location: str=default_json_config_file, use_xpu: bool=False):
+ path = Path(save_location)
+ path.parent.mkdir(parents=True, exist_ok=True)
+ if path.exists():
+ print(f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.')
+ return False
+ mixed_precision = mixed_precision.lower()
+ if mixed_precision not in ['no', 'fp16', 'bf16', 'fp8']:
+ raise ValueError(f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}")
+ config = {'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision}
+ if is_mlu_available():
+ num_mlus = torch.mlu.device_count()
+ config['num_processes'] = num_mlus
+ config['use_cpu'] = False
+ if num_mlus > 1:
+ config['distributed_type'] = 'MULTI_MLU'
+ else:
+ config['distributed_type'] = 'NO'
+ elif is_musa_available():
+ num_musas = torch.musa.device_count()
+ config['num_processes'] = num_musas
+ config['use_cpu'] = False
+ if num_musas > 1:
+ config['distributed_type'] = 'MULTI_MUSA'
+ else:
+ config['distributed_type'] = 'NO'
+ elif torch.cuda.is_available():
+ num_gpus = torch.cuda.device_count()
+ config['num_processes'] = num_gpus
+ config['use_cpu'] = False
+ if num_gpus > 1:
+ config['distributed_type'] = 'MULTI_GPU'
+ else:
+ config['distributed_type'] = 'NO'
+ elif is_xpu_available() and use_xpu:
+ num_xpus = torch.xpu.device_count()
+ config['num_processes'] = num_xpus
+ config['use_cpu'] = False
+ if num_xpus > 1:
+ config['distributed_type'] = 'MULTI_XPU'
+ else:
+ config['distributed_type'] = 'NO'
+ elif is_npu_available():
+ num_npus = torch.npu.device_count()
+ config['num_processes'] = num_npus
+ config['use_cpu'] = False
+ if num_npus > 1:
+ config['distributed_type'] = 'MULTI_NPU'
+ else:
+ config['distributed_type'] = 'NO'
+ else:
+ num_xpus = 0
+ config['use_cpu'] = True
+ config['num_processes'] = 1
+ config['distributed_type'] = 'NO'
+ config['debug'] = False
+ config['enable_cpu_affinity'] = False
+ config = ClusterConfig(**config)
+ config.to_json_file(path)
+ return path
+
+def default_command_parser(parser, parents):
+ parser = parser.add_parser('default', parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
+ parser.add_argument('--config_file', default=default_json_config_file, help="The path to use to store the config file. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed with 'huggingface'.", dest='save_location')
+ parser.add_argument('--mixed_precision', choices=['no', 'fp16', 'bf16'], type=str, help='Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.', default='no')
+ parser.set_defaults(func=default_config_command)
+ return parser
+
+def default_config_command(args):
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
+ if config_file:
+ print(f'accelerate configuration saved at {config_file}')
+
+# File: accelerate-main/src/accelerate/commands/config/sagemaker.py
+import json
+import os
+from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
+from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
+from ...utils.imports import is_boto3_available
+from .config_args import SageMakerConfig
+from .config_utils import DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool
+if is_boto3_available():
+ import boto3
+
+def _create_iam_role_for_sagemaker(role_name):
+ iam_client = boto3.client('iam')
+ sagemaker_trust_policy = {'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}]}
+ try:
+ iam_client.create_role(RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2))
+ policy_document = {'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action': ['sagemaker:*', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'ecr:BatchCheckLayerAvailability', 'ecr:GetAuthorizationToken', 'cloudwatch:PutMetricData', 'cloudwatch:GetMetricData', 'cloudwatch:GetMetricStatistics', 'cloudwatch:ListMetrics', 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:DescribeLogStreams', 'logs:PutLogEvents', 'logs:GetLogEvents', 's3:CreateBucket', 's3:ListBucket', 's3:GetBucketLocation', 's3:GetObject', 's3:PutObject'], 'Resource': '*'}]}
+ iam_client.put_role_policy(RoleName=role_name, PolicyName=f'{role_name}_policy_permission', PolicyDocument=json.dumps(policy_document, indent=2))
+ except iam_client.exceptions.EntityAlreadyExistsException:
+ print(f'role {role_name} already exists. Using existing one')
+
+def _get_iam_role_arn(role_name):
+ iam_client = boto3.client('iam')
+ return iam_client.get_role(RoleName=role_name)['Role']['Arn']
+
+def get_sagemaker_input():
+ credentials_configuration = _ask_options('How do you want to authorize?', ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '], int)
+ aws_profile = None
+ if credentials_configuration == 0:
+ aws_profile = _ask_field('Enter your AWS Profile name: [default] ', default='default')
+ os.environ['AWS_PROFILE'] = aws_profile
+ else:
+ print('Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`')
+ aws_access_key_id = _ask_field('AWS Access Key ID: ')
+ os.environ['AWS_ACCESS_KEY_ID'] = aws_access_key_id
+ aws_secret_access_key = _ask_field('AWS Secret Access Key: ')
+ os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_access_key
+ aws_region = _ask_field('Enter your AWS Region: [us-east-1]', default='us-east-1')
+ os.environ['AWS_DEFAULT_REGION'] = aws_region
+ role_management = _ask_options('Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?', ['Provide IAM Role name', 'Create new IAM role using credentials'], int)
+ if role_management == 0:
+ iam_role_name = _ask_field('Enter your IAM role name: ')
+ else:
+ iam_role_name = 'accelerate_sagemaker_execution_role'
+ print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
+ _create_iam_role_for_sagemaker(iam_role_name)
+ is_custom_docker_image = _ask_field('Do you want to use custom Docker image? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ docker_image = None
+ if is_custom_docker_image:
+ docker_image = _ask_field('Enter your Docker image: ', lambda x: str(x).lower())
+ is_sagemaker_inputs_enabled = _ask_field('Do you want to provide SageMaker input channels with data locations? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ sagemaker_inputs_file = None
+ if is_sagemaker_inputs_enabled:
+ sagemaker_inputs_file = _ask_field('Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ', lambda x: str(x).lower())
+ is_sagemaker_metrics_enabled = _ask_field('Do you want to enable SageMaker metrics? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ sagemaker_metrics_file = None
+ if is_sagemaker_metrics_enabled:
+ sagemaker_metrics_file = _ask_field('Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ', lambda x: str(x).lower())
+ distributed_type = _ask_options('What is the distributed mode?', ['No distributed training', 'Data parallelism'], _convert_sagemaker_distributed_mode)
+ dynamo_config = {}
+ use_dynamo = _ask_field('Do you wish to optimize your script with torch dynamo?[yes/NO]:', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_dynamo:
+ prefix = 'dynamo_'
+ dynamo_config[prefix + 'backend'] = _ask_options('Which dynamo backend would you like to use?', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2)
+ use_custom_options = _ask_field('Do you want to customize the defaults sent to torch.compile? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ if use_custom_options:
+ dynamo_config[prefix + 'mode'] = _ask_options('Which mode do you want to use?', TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default='default')
+ dynamo_config[prefix + 'use_fullgraph'] = _ask_field('Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ dynamo_config[prefix + 'use_dynamic'] = _ask_field('Do you want to enable dynamic shape tracing? [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ ec2_instance_query = 'Which EC2 instance type you want to use for your training?'
+ if distributed_type != SageMakerDistributedType.NO:
+ ec2_instance_type = _ask_options(ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)])
+ else:
+ ec2_instance_query += '? [ml.p3.2xlarge]:'
+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default='ml.p3.2xlarge')
+ debug = False
+ if distributed_type != SageMakerDistributedType.NO:
+ debug = _ask_field('Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ', _convert_yes_no_to_bool, default=False, error_message='Please enter yes or no.')
+ num_machines = 1
+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
+ num_machines = _ask_field('How many machines do you want use? [1]: ', int, default=1)
+ mixed_precision = _ask_options('Do you wish to use FP16 or BF16 (mixed precision)?', ['no', 'fp16', 'bf16', 'fp8'], _convert_mixed_precision)
+ if use_dynamo and mixed_precision == 'no':
+ print('Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.')
+ return SageMakerConfig(image_uri=docker_image, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=distributed_type, use_cpu=False, dynamo_config=dynamo_config, ec2_instance_type=ec2_instance_type, profile=aws_profile, region=aws_region, iam_role_name=iam_role_name, mixed_precision=mixed_precision, num_machines=num_machines, sagemaker_inputs_file=sagemaker_inputs_file, sagemaker_metrics_file=sagemaker_metrics_file, debug=debug)
+
+# File: accelerate-main/src/accelerate/commands/config/update.py
+from pathlib import Path
+from .config_args import default_config_file, load_config_from_file
+from .config_utils import SubcommandHelpFormatter
+description = 'Update an existing config file with the latest defaults while maintaining the old configuration.'
+
+def update_config(args):
+ config_file = args.config_file
+ if config_file is None and Path(default_config_file).exists():
+ config_file = default_config_file
+ elif not Path(config_file).exists():
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
+ config = load_config_from_file(config_file)
+ if config_file.endswith('.json'):
+ config.to_json_file(config_file)
+ else:
+ config.to_yaml_file(config_file)
+ return config_file
+
+def update_command_parser(parser, parents):
+ parser = parser.add_parser('update', parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
+ parser.add_argument('--config_file', default=None, help="The path to the config file to update. Will default to a file named default_config.yaml in the cache location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed with 'huggingface'.")
+ parser.set_defaults(func=update_config_command)
+ return parser
+
+def update_config_command(args):
+ config_file = update_config(args)
+ print(f'Sucessfully updated the configuration file at {config_file}.')
+
+# File: accelerate-main/src/accelerate/commands/env.py
+import argparse
+import os
+import platform
+import subprocess
+import numpy as np
+import psutil
+import torch
+from accelerate import __version__ as version
+from accelerate.commands.config import default_config_file, load_config_from_file
+from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available
+
+def env_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser('env')
+ else:
+ parser = argparse.ArgumentParser('Accelerate env command')
+ parser.add_argument('--config_file', default=None, help='The config file to use for the default values in the launching script.')
+ if subparsers is not None:
+ parser.set_defaults(func=env_command)
+ return parser
+
+def env_command(args):
+ pt_version = torch.__version__
+ pt_cuda_available = torch.cuda.is_available()
+ pt_xpu_available = is_xpu_available()
+ pt_mlu_available = is_mlu_available()
+ pt_musa_available = is_musa_available()
+ pt_npu_available = is_npu_available()
+ accelerate_config = 'Not found'
+ if args.config_file is not None or os.path.isfile(default_config_file):
+ accelerate_config = load_config_from_file(args.config_file).to_dict()
+ command = None
+ bash_location = 'Not found'
+ if os.name == 'nt':
+ command = ['where', 'accelerate']
+ elif os.name == 'posix':
+ command = ['which', 'accelerate']
+ if command is not None:
+ bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
+ info = {'`Accelerate` version': version, 'Platform': platform.platform(), '`accelerate` bash location': bash_location, 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})', 'PyTorch XPU available': str(pt_xpu_available), 'PyTorch NPU available': str(pt_npu_available), 'PyTorch MLU available': str(pt_mlu_available), 'PyTorch MUSA available': str(pt_musa_available), 'System RAM': f'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB'}
+ if pt_cuda_available:
+ info['GPU type'] = torch.cuda.get_device_name()
+ if pt_mlu_available:
+ info['MLU type'] = torch.mlu.get_device_name()
+ if pt_npu_available:
+ info['CANN version'] = torch.version.cann
+ print('\nCopy-and-paste the text below in your GitHub issue\n')
+ print('\n'.join([f'- {prop}: {val}' for (prop, val) in info.items()]))
+ print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:')
+ accelerate_config_str = '\n'.join([f'\t- {prop}: {val}' for (prop, val) in accelerate_config.items()]) if isinstance(accelerate_config, dict) else f'\t{accelerate_config}'
+ print(accelerate_config_str)
+ info['`Accelerate` configs'] = accelerate_config
+ return info
+
+def main() -> int:
+ parser = env_command_parser()
+ args = parser.parse_args()
+ env_command(args)
+ return 0
+if __name__ == '__main__':
+ raise SystemExit(main())
+
+# File: accelerate-main/src/accelerate/commands/estimate.py
+from huggingface_hub import model_info
+from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
+from accelerate import init_empty_weights
+from accelerate.commands.utils import CustomArgumentParser
+from accelerate.utils import calculate_maximum_sizes, convert_bytes, is_timm_available, is_transformers_available
+if is_transformers_available():
+ import transformers
+ from transformers import AutoConfig, AutoModel
+if is_timm_available():
+ import timm
+
+def verify_on_hub(repo: str, token: str=None):
+ try:
+ return model_info(repo, token=token)
+ except (OSError, GatedRepoError):
+ return 'gated'
+ except RepositoryNotFoundError:
+ return 'repo'
+
+def check_has_model(error):
+ if is_timm_available() and isinstance(error, RuntimeError) and ('Unknown model' in error.args[0]):
+ return 'timm'
+ elif is_transformers_available() and isinstance(error, OSError) and ('does not appear to have a file named' in error.args[0]):
+ return 'transformers'
+ else:
+ return 'unknown'
+
+def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool=False, access_token: str=None):
+ model_info = verify_on_hub(model_name, access_token)
+ if model_info == 'gated':
+ raise GatedRepoError(f'Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`.')
+ elif model_info == 'repo':
+ raise RepositoryNotFoundError(f'Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo, make sure you are authenticated via `huggingface-cli login` and have access.')
+ if library_name is None:
+ library_name = getattr(model_info, 'library_name', False)
+ if not library_name:
+ raise ValueError(f'Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)')
+ if library_name == 'transformers':
+ if not is_transformers_available():
+ raise ImportError(f'To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`')
+ print(f'Loading pretrained config for `{model_name}` from `transformers`...')
+ if model_info.config is None:
+ raise RuntimeError(f'Tried to load `{model_name}` with `transformers` but it does not have any metadata.')
+ auto_map = model_info.config.get('auto_map', False)
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
+ with init_empty_weights():
+ constructor = AutoModel
+ if isinstance(auto_map, dict):
+ value = None
+ for key in auto_map.keys():
+ if key.startswith('AutoModelFor'):
+ value = key
+ break
+ if value is not None:
+ constructor = getattr(transformers, value)
+ model = constructor.from_config(config, trust_remote_code=trust_remote_code)
+ elif library_name == 'timm':
+ if not is_timm_available():
+ raise ImportError(f'To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`')
+ print(f'Loading pretrained config for `{model_name}` from `timm`...')
+ with init_empty_weights():
+ model = timm.create_model(model_name, pretrained=False)
+ else:
+ raise ValueError(f'Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support.')
+ return model
+
+def create_ascii_table(headers: list, rows: list, title: str):
+ (sep_char, in_between) = ('│', '─')
+ column_widths = []
+ for i in range(len(headers)):
+ column_values = [row[i] for row in rows] + [headers[i]]
+ max_column_width = max((len(value) for value in column_values))
+ column_widths.append(max_column_width)
+ formats = [f'%{column_widths[i]}s' for i in range(len(rows[0]))]
+ pattern = f'{sep_char}{sep_char.join(formats)}{sep_char}'
+ diff = 0
+
+ def make_row(left_char, middle_char, right_char):
+ return f'{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}'
+ separator = make_row('├', '┼', '┤')
+ if len(title) > sum(column_widths):
+ diff = abs(len(title) - len(separator))
+ column_widths[-1] += diff
+ separator = make_row('├', '┼', '┤')
+ initial_rows = [make_row('┌', in_between, '┐'), f'{sep_char}{title.center(len(separator) - 2)}{sep_char}', make_row('├', '┬', '┤')]
+ table = '\n'.join(initial_rows) + '\n'
+ column_widths[-1] += diff
+ centered_line = [text.center(column_widths[i]) for (i, text) in enumerate(headers)]
+ table += f'{pattern % tuple(centered_line)}\n{separator}\n'
+ for (i, line) in enumerate(rows):
+ centered_line = [t.center(column_widths[i]) for (i, t) in enumerate(line)]
+ table += f'{pattern % tuple(centered_line)}\n'
+ table += f"└{'┴'.join([in_between * n for n in column_widths])}┘"
+ return table
+
+def estimate_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser('estimate-memory')
+ else:
+ parser = CustomArgumentParser(description='Model size estimator for fitting a model onto CUDA memory.')
+ parser.add_argument('model_name', type=str, help='The model name on the Hugging Face Hub.')
+ parser.add_argument('--library_name', type=str, help='The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.', choices=['timm', 'transformers'])
+ parser.add_argument('--dtypes', type=str, nargs='+', default=['float32', 'float16', 'int8', 'int4'], help='The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`', choices=['float32', 'float16', 'int8', 'int4'])
+ parser.add_argument('--trust_remote_code', action='store_true', help='Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag\n should only be used for repositories you trust and in which you have read the code, as it will execute\n code present on the Hub on your local machine.', default=False)
+ if subparsers is not None:
+ parser.set_defaults(func=estimate_command)
+ return parser
+
+def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str=None) -> dict:
+ memory_sizes = {'model': -1, 'optimizer': -1, 'gradients': -1, 'step': -1}
+ fp32_size = bytes
+ fp16_size = bytes // 2
+ if mixed_precision == 'float32':
+ memory_sizes['model'] = fp32_size
+ memory_sizes['gradients'] = fp32_size
+ memory_sizes['optimizer'] = fp32_size * 2
+ memory_sizes['step'] = fp32_size * 4
+ elif mixed_precision in ('float16', 'bfloat16') or (mixed_precision == 'fp8' and msamp_config is None):
+ memory_sizes['model'] = fp32_size
+ memory_sizes['gradients'] = fp32_size + fp16_size
+ memory_sizes['optimizer'] = fp32_size * 2
+ memory_sizes['step'] = memory_sizes['optimizer']
+ return memory_sizes
+
+def gather_data(args):
+ try:
+ model = create_empty_model(args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code)
+ except (RuntimeError, OSError) as e:
+ library = check_has_model(e)
+ if library != 'unknown':
+ raise RuntimeError(f'Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo.')
+ raise e
+ (total_size, largest_layer) = calculate_maximum_sizes(model)
+ data = []
+ for dtype in args.dtypes:
+ dtype_total_size = total_size
+ dtype_largest_layer = largest_layer[0]
+ dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
+ if dtype == 'float16':
+ dtype_total_size /= 2
+ dtype_largest_layer /= 2
+ elif dtype == 'int8':
+ dtype_total_size /= 4
+ dtype_largest_layer /= 4
+ elif dtype == 'int4':
+ dtype_total_size /= 8
+ dtype_largest_layer /= 8
+ data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
+ return data
+
+def estimate_command(args):
+ data = gather_data(args)
+ for row in data:
+ for (i, item) in enumerate(row):
+ if isinstance(item, (int, float)):
+ row[i] = convert_bytes(item)
+ elif isinstance(item, dict):
+ training_usage = max(item.values())
+ row[i] = convert_bytes(training_usage) if training_usage != -1 else 'N/A'
+ headers = ['dtype', 'Largest Layer', 'Total Size', 'Training using Adam']
+ title = f'Memory Usage for loading `{args.model_name}`'
+ table = create_ascii_table(headers, data, title)
+ print(table)
+
+def main():
+ parser = estimate_command_parser()
+ args = parser.parse_args()
+ estimate_command(args)
+if __name__ == '__main__':
+ main()
+
+# File: accelerate-main/src/accelerate/commands/launch.py
+import argparse
+import importlib
+import logging
+import os
+import subprocess
+import sys
+from pathlib import Path
+import psutil
+import torch
+from accelerate.commands.config import default_config_file, load_config_from_file
+from accelerate.commands.config.config_args import SageMakerConfig
+from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
+from accelerate.commands.utils import CustomArgumentParser
+from accelerate.state import get_int_from_env
+from accelerate.utils import ComputeEnvironment, DistributedType, PrepareForLaunch, _filter_args, check_cuda_p2p_ib_support, convert_dict_to_env_variables, is_bf16_available, is_deepspeed_available, is_mlu_available, is_musa_available, is_npu_available, is_rich_available, is_sagemaker_available, is_torch_version, is_torch_xla_available, is_xpu_available, patch_environment, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, str_to_bool
+from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
+if is_rich_available():
+ from rich import get_console
+ from rich.logging import RichHandler
+ FORMAT = '%(message)s'
+ logging.basicConfig(format=FORMAT, datefmt='[%X]', handlers=[RichHandler()])
+logger = logging.getLogger(__name__)
+options_to_group = {'multi_gpu': 'Distributed GPUs', 'tpu': 'TPU', 'use_deepspeed': 'DeepSpeed Arguments', 'use_fsdp': 'FSDP Arguments', 'use_megatron_lm': 'Megatron-LM Arguments', 'fp8_backend': 'FP8 Arguments'}
+
+def clean_option(option):
+ if 'fp8_backend' in option:
+ option = '--fp8_backend'
+ if option.startswith('--'):
+ return option[2:].replace('-', '_')
+
+class CustomHelpFormatter(argparse.HelpFormatter):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.titles = ['Hardware Selection Arguments', 'Resource Selection Arguments', 'Training Paradigm Arguments', 'positional arguments', 'optional arguments']
+
+ def add_argument(self, action: argparse.Action):
+ if 'accelerate' in sys.argv[0] and 'launch' in sys.argv[1:]:
+ args = sys.argv[2:]
+ else:
+ args = sys.argv[1:]
+ if len(args) > 1:
+ args = list(map(clean_option, args))
+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]
+ used_titles = [options_to_group[o] for o in used_platforms]
+ if action.container.title not in self.titles + used_titles:
+ action.help = argparse.SUPPRESS
+ elif action.container.title == 'Hardware Selection Arguments':
+ if set(action.option_strings).isdisjoint(set(args)):
+ action.help = argparse.SUPPRESS
+ else:
+ action.help = action.help + ' (currently selected)'
+ elif action.container.title == 'Training Paradigm Arguments':
+ if set(action.option_strings).isdisjoint(set(args)):
+ action.help = argparse.SUPPRESS
+ else:
+ action.help = action.help + ' (currently selected)'
+ action.option_strings = [s for s in action.option_strings if '-' not in s[2:]]
+ super().add_argument(action)
+
+ def end_section(self):
+ if len(self._current_section.items) < 2:
+ self._current_section.items = []
+ self._current_section.heading = ''
+ super().end_section()
+
+def launch_command_parser(subparsers=None):
+ description = 'Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)'
+ if subparsers is not None:
+ parser = subparsers.add_parser('launch', description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter)
+ else:
+ parser = CustomArgumentParser('Accelerate launch command', description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter)
+ parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.')
+ parser.add_argument('--config_file', default=None, help='The config file to use for the default values in the launching script.')
+ parser.add_argument('--quiet', '-q', action='store_true', help='Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)')
+ hardware_args = parser.add_argument_group('Hardware Selection Arguments', 'Arguments for selecting the hardware to be used.')
+ hardware_args.add_argument('--cpu', default=False, action='store_true', help='Whether or not to force the training on the CPU.')
+ hardware_args.add_argument('--multi_gpu', default=False, action='store_true', help='Whether or not this should launch a distributed GPU training.')
+ hardware_args.add_argument('--tpu', default=False, action='store_true', help='Whether or not this should launch a TPU training.')
+ hardware_args.add_argument('--ipex', default=False, action='store_true', help='Whether or not this should launch a Intel PyTorch Extension (IPEX) training.')
+ resource_args = parser.add_argument_group('Resource Selection Arguments', 'Arguments for fine-tuning how available hardware should be used.')
+ resource_args.add_argument('--mixed_precision', type=str, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether or not to use mixed precision training. Choose between FP16 and BF16 (bfloat16) training. BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.')
+ resource_args.add_argument('--num_processes', type=int, default=None, help='The total number of processes to be launched in parallel.')
+ resource_args.add_argument('--num_machines', type=int, default=None, help='The total number of machines used in this training.')
+ resource_args.add_argument('--num_cpu_threads_per_process', type=int, default=None, help='The number of CPU threads per process. Can be tuned for optimal performance.')
+ resource_args.add_argument('--enable_cpu_affinity', default=False, action='store_true', help='Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.')
+ resource_args.add_argument('--dynamo_backend', type=str, choices=['no'] + [b.lower() for b in DYNAMO_BACKENDS], help='Choose a backend to optimize your training with dynamo, see more at https://github.com/pytorch/torchdynamo.')
+ resource_args.add_argument('--dynamo_mode', type=str, default='default', choices=TORCH_DYNAMO_MODES, help='Choose a mode to optimize your training with dynamo.')
+ resource_args.add_argument('--dynamo_use_fullgraph', default=False, action='store_true', help='Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs')
+ resource_args.add_argument('--dynamo_use_dynamic', default=False, action='store_true', help='Whether to enable dynamic shape tracing.')
+ paradigm_args = parser.add_argument_group('Training Paradigm Arguments', 'Arguments for selecting which training paradigm to be used.')
+ paradigm_args.add_argument('--use_deepspeed', default=False, action='store_true', help='Whether to use deepspeed.')
+ paradigm_args.add_argument('--use_fsdp', default=False, action='store_true', help='Whether to use fsdp.')
+ paradigm_args.add_argument('--use_megatron_lm', default=False, action='store_true', help='Whether to use Megatron-LM.')
+ paradigm_args.add_argument('--use_xpu', default=False, action='store_true', help='Whether to use IPEX plugin to speed up training on XPU specifically.')
+ distributed_args = parser.add_argument_group('Distributed GPUs', 'Arguments related to distributed GPU training.')
+ distributed_args.add_argument('--gpu_ids', default=None, help='What GPUs (by id) should be used for training on this machine as a comma-seperated list')
+ distributed_args.add_argument('--same_network', default=False, action='store_true', help='Whether all machines used for multinode training exist on the same local network.')
+ distributed_args.add_argument('--machine_rank', type=int, default=None, help='The rank of the machine on which this script is launched.')
+ distributed_args.add_argument('--main_process_ip', type=str, default=None, help='The IP address of the machine of rank 0.')
+ distributed_args.add_argument('--main_process_port', type=int, default=None, help='The port to use to communicate with the machine of rank 0.')
+ distributed_args.add_argument('-t', '--tee', default='0', type=str, help='Tee std streams into a log file and also to console.')
+ distributed_args.add_argument('--log_dir', type=str, default=None, help='Base directory to use for log files when using torchrun/torch.distributed.run as launcher. Use with --tee to redirect std streams info log files.')
+ distributed_args.add_argument('--role', type=str, default='default', help='User-defined role for the workers.')
+ distributed_args.add_argument('--rdzv_backend', type=str, default='static', help="The rendezvous method to use, such as 'static' (the default) or 'c10d'")
+ distributed_args.add_argument('--rdzv_conf', type=str, default='', help='Additional rendezvous configuration (=,=,...).')
+ distributed_args.add_argument('--max_restarts', type=int, default=0, help='Maximum number of worker group restarts before failing.')
+ distributed_args.add_argument('--monitor_interval', type=float, default=0.1, help='Interval, in seconds, to monitor the state of workers.')
+ parser.add_argument('-m', '--module', action='store_true', help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.")
+ parser.add_argument('--no_python', action='store_true', help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.")
+ tpu_args = parser.add_argument_group('TPU', 'Arguments related to TPU.')
+ tpu_args.add_argument('--tpu_cluster', action='store_true', dest='tpu_use_cluster', help='Whether to use a GCP TPU pod for training.')
+ tpu_args.add_argument('--no_tpu_cluster', action='store_false', dest='tpu_use_cluster', help='Should not be passed explicitly, this is for internal use only.')
+ tpu_args.add_argument('--tpu_use_sudo', action='store_true', help='Whether to use `sudo` when running the TPU training script in each pod.')
+ tpu_args.add_argument('--vm', type=str, action='append', help='List of single Compute VM instance names. If not provided we assume usage of instance groups. For TPU pods.')
+ tpu_args.add_argument('--env', type=str, action='append', help='List of environment variables to set on the Compute VM instances. For TPU pods.')
+ tpu_args.add_argument('--main_training_function', type=str, default=None, help='The name of the main function to be executed in your script (only for TPU training).')
+ tpu_args.add_argument('--downcast_bf16', action='store_true', help='Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.')
+ deepspeed_args = parser.add_argument_group('DeepSpeed Arguments', 'Arguments related to DeepSpeed.')
+ deepspeed_args.add_argument('--deepspeed_config_file', default=None, type=str, help='DeepSpeed config file.')
+ deepspeed_args.add_argument('--zero_stage', default=None, type=int, help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). If unspecified, will default to `2`.")
+ deepspeed_args.add_argument('--offload_optimizer_device', default=None, type=str, help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). If unspecified, will default to 'none'.")
+ deepspeed_args.add_argument('--offload_param_device', default=None, type=str, help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). If unspecified, will default to 'none'.")
+ deepspeed_args.add_argument('--offload_optimizer_nvme_path', default=None, type=str, help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). If unspecified, will default to 'none'.")
+ deepspeed_args.add_argument('--offload_param_nvme_path', default=None, type=str, help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). If unspecified, will default to 'none'.")
+ deepspeed_args.add_argument('--gradient_accumulation_steps', default=None, type=int, help='No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). If unspecified, will default to `1`.')
+ deepspeed_args.add_argument('--gradient_clipping', default=None, type=float, help='gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). If unspecified, will default to `1.0`.')
+ deepspeed_args.add_argument('--zero3_init_flag', default=None, type=str, help='Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.')
+ deepspeed_args.add_argument('--zero3_save_16bit_model', default=None, type=str, help='Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.')
+ deepspeed_args.add_argument('--deepspeed_hostfile', default=None, type=str, help='DeepSpeed hostfile for configuring multi-node compute resources.')
+ deepspeed_args.add_argument('--deepspeed_exclusion_filter', default=None, type=str, help='DeepSpeed exclusion filter string when using mutli-node setup.')
+ deepspeed_args.add_argument('--deepspeed_inclusion_filter', default=None, type=str, help='DeepSpeed inclusion filter string when using mutli-node setup.')
+ deepspeed_args.add_argument('--deepspeed_multinode_launcher', default=None, type=str, help='DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.')
+ deepspeed_args.add_argument('--deepspeed_moe_layer_cls_names', default=None, type=str, help='comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... (useful only when `use_deepspeed` flag is passed).')
+ fsdp_args = parser.add_argument_group('FSDP Arguments', 'Arguments related to Fully Shared Data Parallelism.')
+ fsdp_args.add_argument('--fsdp_offload_params', default='false', type=str, help='Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).')
+ fsdp_args.add_argument('--fsdp_min_num_params', type=int, default=100000000.0, help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).")
+ fsdp_args.add_argument('--fsdp_sharding_strategy', type=str, default='FULL_SHARD', help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).")
+ fsdp_args.add_argument('--fsdp_auto_wrap_policy', type=str, default=None, help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).")
+ fsdp_args.add_argument('--fsdp_transformer_layer_cls_to_wrap', default=None, type=str, help='Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... (useful only when `use_fsdp` flag is passed).')
+ fsdp_args.add_argument('--fsdp_backward_prefetch', default=None, type=str, help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).")
+ fsdp_args.add_argument('--fsdp_state_dict_type', default=None, type=str, help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).")
+ fsdp_args.add_argument('--fsdp_forward_prefetch', default='false', type=str, help='If True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).')
+ fsdp_args.add_argument('--fsdp_use_orig_params', default='true', type=str, help='If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. (useful only when `use_fsdp` flag is passed).')
+ fsdp_args.add_argument('--fsdp_cpu_ram_efficient_loading', default='true', type=str, help='If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. (useful only when `use_fsdp` flag is passed).')
+ fsdp_args.add_argument('--fsdp_sync_module_states', default='true', type=str, help='If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0. (useful only when `use_fsdp` flag is passed).')
+ fsdp_args.add_argument('--fsdp_activation_checkpointing', default='false', type=str, help='Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).')
+ megatron_lm_args = parser.add_argument_group('Megatron-LM Arguments', 'Arguments related to Megatron-LM.')
+ megatron_lm_args.add_argument('--megatron_lm_tp_degree', type=int, default=1, help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).")
+ megatron_lm_args.add_argument('--megatron_lm_pp_degree', type=int, default=1, help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).")
+ megatron_lm_args.add_argument('--megatron_lm_num_micro_batches', type=int, default=None, help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).")
+ megatron_lm_args.add_argument('--megatron_lm_sequence_parallelism', default=None, type=str, help='Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. (useful only when `use_megatron_lm` flag is passed).')
+ megatron_lm_args.add_argument('--megatron_lm_recompute_activations', default=None, type=str, help='Decides Whether (true|false) to enable Selective Activation Recomputation. (useful only when `use_megatron_lm` flag is passed).')
+ megatron_lm_args.add_argument('--megatron_lm_use_distributed_optimizer', default=None, type=str, help='Decides Whether (true|false) to use distributed optimizer which shards optimizer state and gradients across Data Pralellel (DP) ranks. (useful only when `use_megatron_lm` flag is passed).')
+ megatron_lm_args.add_argument('--megatron_lm_gradient_clipping', default=1.0, type=float, help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). (useful only when `use_megatron_lm` flag is passed).")
+ fp8_args = parser.add_argument_group('FP8 Arguments', 'Arguments related to FP8 training (requires `--mixed_precision=fp8`)')
+ fp8_args.add_argument('--fp8_backend', type=str, choices=['te', 'msamp'], help='Choose a backend to train with FP8 (te: TransformerEngine, msamp: MS-AMP)')
+ fp8_args.add_argument('--fp8_use_autocast_during_eval', default=False, action='store_true', help='Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.')
+ fp8_args.add_argument('--fp8_margin', type=int, default=0, help='The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).')
+ fp8_args.add_argument('--fp8_interval', type=int, default=1, help='The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).')
+ fp8_args.add_argument('--fp8_format', type=str, default='E4M3', choices=['E4M3', 'HYBRID'], help='The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).')
+ fp8_args.add_argument('--fp8_amax_history_len', type=int, default=1024, help='The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).')
+ fp8_args.add_argument('--fp8_amax_compute_algo', type=str, default='most_recent', choices=['max', 'most_recent'], help='The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).')
+ fp8_args.add_argument('--fp8_override_linear_precision', type=lambda x: tuple(map(str_to_bool, x.split(','))), default=(False, False, False), help='Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-seperated string of booleans (useful only when `--fp8_backend=te` is passed).')
+ fp8_args.add_argument('--fp8_opt_level', type=str, default='O2', choices=['O1', 'O2'], help='What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed).')
+ aws_args = parser.add_argument_group('AWS Arguments', 'Arguments related to AWS.')
+ aws_args.add_argument('--aws_access_key_id', type=str, default=None, help='The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job')
+ aws_args.add_argument('--aws_secret_access_key', type=str, default=None, help='The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.')
+ parser.add_argument('--debug', action='store_true', help='Whether to print out the torch.distributed stack trace when something fails.')
+ parser.add_argument('training_script', type=str, help='The full path to the script to be launched in parallel, followed by all the arguments for the training script.')
+ mpirun_args = parser.add_argument_group('MPI Arguments', 'Arguments related to mpirun for Multi-CPU')
+ mpirun_args.add_argument('--mpirun_hostfile', type=str, default=None, help='Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.')
+ mpirun_args.add_argument('--mpirun_ccl', type=int, default=1, help='The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.')
+ parser.add_argument('training_script_args', nargs=argparse.REMAINDER, help='Arguments of the training script.')
+ if subparsers is not None:
+ parser.set_defaults(func=launch_command)
+ return parser
+
+def simple_launcher(args):
+ (cmd, current_env) = prepare_simple_launcher_cmd_env(args)
+ process = subprocess.Popen(cmd, env=current_env)
+ process.wait()
+ if process.returncode != 0:
+ if not args.quiet:
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
+ else:
+ sys.exit(1)
+
+def multi_gpu_launcher(args):
+ import torch.distributed.run as distrib_run
+ current_env = prepare_multi_gpu_env(args)
+ if not check_cuda_p2p_ib_support():
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
+ warn = False
+ if 'NCCL_P2P_DISABLE' not in current_env:
+ current_env['NCCL_P2P_DISABLE'] = '1'
+ warn = True
+ if 'NCCL_IB_DISABLE' not in current_env:
+ current_env['NCCL_IB_DISABLE'] = '1'
+ warn = True
+ if warn:
+ logger.warning(message)
+ debug = getattr(args, 'debug', False)
+ args = _filter_args(args, distrib_run.get_args_parser(), ['--training_script', args.training_script, '--training_script_args', args.training_script_args])
+ with patch_environment(**current_env):
+ try:
+ distrib_run.run(args)
+ except Exception:
+ if is_rich_available() and debug:
+ console = get_console()
+ console.print('\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]')
+ console.print_exception(suppress=[__file__], show_locals=False)
+ else:
+ raise
+
+def deepspeed_launcher(args):
+ import torch.distributed.run as distrib_run
+ if not is_deepspeed_available():
+ raise ImportError('DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.')
+ else:
+ from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
+ (cmd, current_env) = prepare_deepspeed_cmd_env(args)
+ if not check_cuda_p2p_ib_support():
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
+ warn = False
+ if 'NCCL_P2P_DISABLE' not in current_env:
+ current_env['NCCL_P2P_DISABLE'] = '1'
+ warn = True
+ if 'NCCL_IB_DISABLE' not in current_env:
+ current_env['NCCL_IB_DISABLE'] = '1'
+ warn = True
+ if warn:
+ logger.warning(message)
+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
+ with open(DEEPSPEED_ENVIRONMENT_NAME, 'a') as f:
+ valid_env_items = convert_dict_to_env_variables(current_env)
+ if len(valid_env_items) > 1:
+ f.writelines(valid_env_items)
+ process = subprocess.Popen(cmd, env=current_env)
+ process.wait()
+ if process.returncode != 0:
+ if not args.quiet:
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
+ else:
+ sys.exit(1)
+ else:
+ debug = getattr(args, 'debug', False)
+ args = _filter_args(args, distrib_run.get_args_parser(), ['--training_script', args.training_script, '--training_script_args', args.training_script_args])
+ with patch_environment(**current_env):
+ try:
+ distrib_run.run(args)
+ except Exception:
+ if is_rich_available() and debug:
+ console = get_console()
+ console.print('\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]')
+ console.print_exception(suppress=[__file__], show_locals=False)
+ else:
+ raise
+
+def tpu_launcher(args):
+ import torch_xla.distributed.xla_multiprocessing as xmp
+ if args.no_python:
+ raise ValueError('--no_python cannot be used with TPU launcher')
+ (args, current_env) = prepare_tpu(args, {})
+ if args.module:
+ mod_name = args.training_script
+ else:
+ script_path = Path(args.training_script)
+ sys.path.append(str(script_path.parent.resolve()))
+ mod_name = script_path.stem
+ mod = importlib.import_module(mod_name)
+ if not hasattr(mod, args.main_training_function):
+ raise ValueError(f'Your training script should have a function named {args.main_training_function}, or you should pass a different value to `--main_training_function`.')
+ sys.argv = [mod.__file__] + args.training_script_args
+ main_function = getattr(mod, args.main_training_function)
+ with patch_environment(**current_env):
+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
+
+def tpu_pod_launcher(args):
+ from torch_xla.distributed import xla_dist
+ current_env = {}
+ (args, current_env) = prepare_tpu(args, current_env, True)
+ debug = getattr(args, 'debug', False)
+ training_script = args.training_script
+ training_script_args = args.training_script_args
+ new_args = _filter_args(args, xla_dist.get_args_parser(), ['--tpu', args.tpu_name, '--positional', '', '--restart-tpuvm-pod-server'])
+ if args.tpu_use_sudo:
+ new_cmd = ['sudo']
+ else:
+ new_cmd = []
+ new_cmd += ['accelerate-launch', '--tpu', '--no_tpu_cluster', '--num_machines', '1', '--mixed_precision', 'no', '--dynamo_backend', 'no', '--num_processes', str(args.num_processes), '--main_training_function', str(args.main_training_function), training_script] + training_script_args
+ new_args.positional = new_cmd
+ bad_flags = ''
+ for arg in vars(new_args):
+ if arg.startswith('docker_'):
+ value = getattr(new_args, arg)
+ if value != '' and value is not None:
+ bad_flags += f'{arg}="{value}"\n'
+ if bad_flags != '':
+ raise ValueError(f'Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}')
+ new_args.env = [f'{k}={v}' for (k, v) in current_env.items()]
+ new_args.env.append('ACCELERATE_IN_TPU_POD=1')
+ try:
+ xla_dist.resolve_and_execute(new_args)
+ except Exception:
+ if is_rich_available() and debug:
+ console = get_console()
+ console.print('\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]')
+ console.print_exception(suppress=[__file__], show_locals=False)
+ else:
+ raise
+
+def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
+ if not is_sagemaker_available():
+ raise ImportError('Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`')
+ if args.module or args.no_python:
+ raise ValueError('SageMaker requires a python training script file and cannot be used with --module or --no_python')
+ from sagemaker.huggingface import HuggingFace
+ (args, sagemaker_inputs) = prepare_sagemager_args_inputs(sagemaker_config, args)
+ huggingface_estimator = HuggingFace(**args)
+ huggingface_estimator.fit(inputs=sagemaker_inputs)
+ print(f'You can find your model data at: {huggingface_estimator.model_data}')
+
+def _validate_launch_command(args):
+ if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
+ raise ValueError('You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.')
+ if args.multi_gpu and args.num_processes is not None and (args.num_processes < 2):
+ raise ValueError('You need to use at least 2 processes to use `--multi_gpu`.')
+ defaults = None
+ warned = []
+ mp_from_config_flag = False
+ if args.config_file is not None or (os.path.isfile(default_config_file) and (not args.cpu)):
+ defaults = load_config_from_file(args.config_file)
+ if not args.multi_gpu and (not args.tpu) and (not args.tpu_use_cluster) and (not args.use_deepspeed) and (not args.use_fsdp) and (not args.use_megatron_lm):
+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
+ args.multi_gpu = True if defaults.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_XPU) else False
+ args.tpu = defaults.distributed_type == DistributedType.XLA
+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
+ args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
+ if args.gpu_ids is None:
+ if defaults.gpu_ids is not None:
+ args.gpu_ids = defaults.gpu_ids
+ else:
+ args.gpu_ids = 'all'
+ if args.multi_gpu and args.num_machines is None:
+ args.num_machines = defaults.num_machines
+ if len(args.gpu_ids.split(',')) < 2 and args.gpu_ids != 'all' and args.multi_gpu and (args.num_machines <= 1):
+ raise ValueError("Less than two GPU ids were configured and tried to run on on multiple GPUs. Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`.")
+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
+ for (name, attr) in defaults.__dict__.items():
+ if isinstance(attr, dict):
+ for k in defaults.deepspeed_config:
+ setattr(args, k, defaults.deepspeed_config[k])
+ for k in defaults.fsdp_config:
+ arg_to_set = k
+ if 'fsdp' not in arg_to_set:
+ arg_to_set = 'fsdp_' + arg_to_set
+ setattr(args, arg_to_set, defaults.fsdp_config[k])
+ for k in defaults.megatron_lm_config:
+ setattr(args, k, defaults.megatron_lm_config[k])
+ for k in defaults.dynamo_config:
+ setattr(args, k, defaults.dynamo_config[k])
+ for k in defaults.ipex_config:
+ setattr(args, k, defaults.ipex_config[k])
+ for k in defaults.mpirun_config:
+ setattr(args, k, defaults.mpirun_config[k])
+ continue
+ if name not in ['compute_environment', 'mixed_precision', 'distributed_type'] and getattr(args, name, None) is None:
+ setattr(args, name, attr)
+ if not args.debug:
+ args.debug = defaults.debug
+ if not args.mixed_precision:
+ if defaults.mixed_precision is None:
+ args.mixed_precision = 'no'
+ else:
+ args.mixed_precision = defaults.mixed_precision
+ mp_from_config_flag = True
+ else:
+ if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
+ native_amp = is_torch_version('>=', '1.10')
+ else:
+ native_amp = is_bf16_available(True)
+ if args.mixed_precision == 'bf16' and (not native_amp) and (not (args.tpu and is_torch_xla_available(check_is_tpu=True))):
+ raise ValueError('bf16 mixed precision requires PyTorch >= 1.10 and a supported device.')
+ if args.dynamo_backend is None:
+ args.dynamo_backend = 'no'
+ if args.num_processes == -1:
+ raise ValueError('You need to manually pass in `--num_processes` using this config yaml.')
+ else:
+ if args.num_processes is None:
+ if args.use_xpu and is_xpu_available():
+ args.num_processes = torch.xpu.device_count()
+ elif is_mlu_available():
+ args.num_processes = torch.mlu.device_count()
+ elif is_musa_available():
+ args.num_processes = torch.musa.device_count()
+ elif is_npu_available():
+ args.num_processes = torch.npu.device_count()
+ else:
+ args.num_processes = torch.cuda.device_count()
+ warned.append(f'\t`--num_processes` was set to a value of `{args.num_processes}`')
+ if args.debug is None:
+ args.debug = False
+ if not args.multi_gpu and args.num_processes > 1 and (args.use_xpu and is_xpu_available() and (torch.xpu.device_count() > 1) or (is_mlu_available() and torch.mlu.device_count() > 1) or (is_musa_available() and torch.musa.device_count() > 1) or (is_npu_available() and torch.npu.device_count() > 1) or (torch.cuda.device_count() > 1)):
+ warned.append('\t\tMore than one GPU was found, enabling multi-GPU training.\n\t\tIf this was unintended please pass in `--num_processes=1`.')
+ args.multi_gpu = True
+ if args.num_machines is None:
+ warned.append('\t`--num_machines` was set to a value of `1`')
+ args.num_machines = 1
+ if args.mixed_precision is None:
+ warned.append("\t`--mixed_precision` was set to a value of `'no'`")
+ args.mixed_precision = 'no'
+ if not hasattr(args, 'use_cpu'):
+ args.use_cpu = args.cpu
+ if args.dynamo_backend is None:
+ warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
+ args.dynamo_backend = 'no'
+ if args.debug:
+ logger.debug('Running script in debug mode, expect distributed operations to be slightly slower.')
+ is_aws_env_disabled = defaults is None or (defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER)
+ if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
+ args.num_cpu_threads_per_process = get_int_from_env(['OMP_NUM_THREADS'], 1)
+ if args.use_cpu and args.num_processes >= 1 and (get_int_from_env(['OMP_NUM_THREADS'], 0) == 0):
+ local_size = get_int_from_env(['MPI_LOCALNRANKS', 'OMPI_COMM_WORLD_LOCAL_SIZE', 'MV2_COMM_WORLD_LOCAL_SIZE'], max(int(args.num_processes / args.num_machines), 1))
+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
+ if threads_per_process > 1:
+ args.num_cpu_threads_per_process = threads_per_process
+ warned.append(f'\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs')
+ if any(warned):
+ message = 'The following values were not passed to `accelerate launch` and had defaults used instead:\n'
+ message += '\n'.join(warned)
+ message += '\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.'
+ logger.warning(message)
+ return (args, defaults, mp_from_config_flag)
+
+def launch_command(args):
+ (args, defaults, mp_from_config_flag) = _validate_launch_command(args)
+ if args.use_deepspeed and (not args.cpu):
+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
+ if mp_from_config_flag:
+ args.deepspeed_fields_from_accelerate_config.append('mixed_precision')
+ args.deepspeed_fields_from_accelerate_config = ','.join(args.deepspeed_fields_from_accelerate_config)
+ deepspeed_launcher(args)
+ elif args.use_fsdp and (not args.cpu):
+ multi_gpu_launcher(args)
+ elif args.use_megatron_lm and (not args.cpu):
+ multi_gpu_launcher(args)
+ elif args.multi_gpu and (not args.cpu):
+ multi_gpu_launcher(args)
+ elif args.tpu and (not args.cpu):
+ if args.tpu_use_cluster:
+ tpu_pod_launcher(args)
+ else:
+ tpu_launcher(args)
+ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
+ sagemaker_launcher(defaults, args)
+ else:
+ simple_launcher(args)
+
+def main():
+ parser = launch_command_parser()
+ args = parser.parse_args()
+ launch_command(args)
+if __name__ == '__main__':
+ main()
+
+# File: accelerate-main/src/accelerate/commands/menu/cursor.py
+""""""
+import os
+import sys
+from contextlib import contextmanager
+if os.name == 'nt':
+ import ctypes
+ import msvcrt
+
+ class CursorInfo(ctypes.Structure):
+ _fields_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
+
+def hide_cursor():
+ if os.name == 'nt':
+ ci = CursorInfo()
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
+ ci.visible = False
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
+ elif os.name == 'posix':
+ sys.stdout.write('\x1b[?25l')
+ sys.stdout.flush()
+
+def show_cursor():
+ if os.name == 'nt':
+ ci = CursorInfo()
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
+ ci.visible = True
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
+ elif os.name == 'posix':
+ sys.stdout.write('\x1b[?25h')
+ sys.stdout.flush()
+
+@contextmanager
+def hide():
+ try:
+ hide_cursor()
+ yield
+ finally:
+ show_cursor()
+
+# File: accelerate-main/src/accelerate/commands/menu/helpers.py
+""""""
+import enum
+import shutil
+import sys
+(TERMINAL_WIDTH, _) = shutil.get_terminal_size()
+CURSOR_TO_CHAR = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
+
+class Direction(enum.Enum):
+ UP = 0
+ DOWN = 1
+
+def forceWrite(content, end=''):
+ sys.stdout.write(str(content) + end)
+ sys.stdout.flush()
+
+def writeColor(content, color, end=''):
+ forceWrite(f'\x1b[{color}m{content}\x1b[0m', end)
+
+def reset_cursor():
+ forceWrite('\r')
+
+def move_cursor(num_lines: int, direction: str):
+ forceWrite(f'\x1b[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}')
+
+def clear_line():
+ forceWrite(' ' * TERMINAL_WIDTH)
+ reset_cursor()
+
+def linebreak():
+ reset_cursor()
+ forceWrite('-' * TERMINAL_WIDTH)
+
+# File: accelerate-main/src/accelerate/commands/menu/input.py
+""""""
+from typing import List
+from .keymap import KEYMAP, get_character
+
+def mark(key: str):
+
+ def decorator(func):
+ handle = getattr(func, 'handle_key', [])
+ handle += [key]
+ func.handle_key = handle
+ return func
+ return decorator
+
+def mark_multiple(*keys: List[str]):
+
+ def decorator(func):
+ handle = getattr(func, 'handle_key', [])
+ handle += keys
+ func.handle_key = handle
+ return func
+ return decorator
+
+class KeyHandler(type):
+
+ def __new__(cls, name, bases, attrs):
+ new_cls = super().__new__(cls, name, bases, attrs)
+ if not hasattr(new_cls, 'key_handler'):
+ new_cls.key_handler = {}
+ new_cls.handle_input = KeyHandler.handle_input
+ for value in attrs.values():
+ handled_keys = getattr(value, 'handle_key', [])
+ for key in handled_keys:
+ new_cls.key_handler[key] = value
+ return new_cls
+
+ @staticmethod
+ def handle_input(cls):
+ char = get_character()
+ if char != KEYMAP['undefined']:
+ char = ord(char)
+ handler = cls.key_handler.get(char)
+ if handler:
+ cls.current_selection = char
+ return handler(cls)
+ else:
+ return None
+
+def register(cls):
+ return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
+
+# File: accelerate-main/src/accelerate/commands/menu/keymap.py
+""""""
+import os
+import string
+import sys
+ARROW_KEY_FLAG = 1 << 8
+KEYMAP = {'tab': ord('\t'), 'newline': ord('\r'), 'esc': 27, 'up': 65 + ARROW_KEY_FLAG, 'down': 66 + ARROW_KEY_FLAG, 'right': 67 + ARROW_KEY_FLAG, 'left': 68 + ARROW_KEY_FLAG, 'mod_int': 91, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 50, 'delete': 51, 'pg_up': 53, 'pg_down': 54}
+KEYMAP['arrow_begin'] = KEYMAP['up']
+KEYMAP['arrow_end'] = KEYMAP['left']
+if sys.platform == 'win32':
+ WIN_CH_BUFFER = []
+ WIN_KEYMAP = {b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG}
+for i in range(10):
+ KEYMAP[str(i)] = ord(str(i))
+
+def get_raw_chars():
+ if os.name == 'nt':
+ import msvcrt
+ encoding = 'mbcs'
+ while msvcrt.kbhit():
+ msvcrt.getch()
+ if len(WIN_CH_BUFFER) == 0:
+ ch = msvcrt.getch()
+ if ch in (b'\x00', b'\xe0'):
+ ch2 = ch + msvcrt.getch()
+ try:
+ chx = chr(WIN_KEYMAP[ch2])
+ WIN_CH_BUFFER.append(chr(KEYMAP['mod_int']))
+ WIN_CH_BUFFER.append(chx)
+ if ord(chx) in (KEYMAP['insert'] - 1 << 9, KEYMAP['delete'] - 1 << 9, KEYMAP['pg_up'] - 1 << 9, KEYMAP['pg_down'] - 1 << 9):
+ WIN_CH_BUFFER.append(chr(126))
+ ch = chr(KEYMAP['esc'])
+ except KeyError:
+ ch = ch2[1]
+ else:
+ ch = ch.decode(encoding)
+ else:
+ ch = WIN_CH_BUFFER.pop(0)
+ elif os.name == 'posix':
+ import termios
+ import tty
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+def get_character():
+ char = get_raw_chars()
+ if ord(char) in [KEYMAP['interrupt'], KEYMAP['newline']]:
+ return char
+ elif ord(char) == KEYMAP['esc']:
+ combo = get_raw_chars()
+ if ord(combo) == KEYMAP['mod_int']:
+ key = get_raw_chars()
+ if ord(key) >= KEYMAP['arrow_begin'] - ARROW_KEY_FLAG and ord(key) <= KEYMAP['arrow_end'] - ARROW_KEY_FLAG:
+ return chr(ord(key) + ARROW_KEY_FLAG)
+ else:
+ return KEYMAP['undefined']
+ else:
+ return get_raw_chars()
+ elif char in string.printable:
+ return char
+ else:
+ return KEYMAP['undefined']
+
+# File: accelerate-main/src/accelerate/commands/menu/selection_menu.py
+""""""
+import builtins
+import sys
+from ...utils.imports import _is_package_available
+from . import cursor, input
+from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
+from .keymap import KEYMAP
+in_colab = False
+try:
+ in_colab = _is_package_available('google.colab')
+except ModuleNotFoundError:
+ pass
+
+@input.register
+class BulletMenu:
+
+ def __init__(self, prompt: str=None, choices: list=[]):
+ self.position = 0
+ self.choices = choices
+ self.prompt = prompt
+ if sys.platform == 'win32':
+ self.arrow_char = '*'
+ else:
+ self.arrow_char = '➔ '
+
+ def write_choice(self, index, end: str=''):
+ if sys.platform != 'win32':
+ writeColor(self.choices[index], 32, end)
+ else:
+ forceWrite(self.choices[index], end)
+
+ def print_choice(self, index: int):
+ if index == self.position:
+ forceWrite(f' {self.arrow_char} ')
+ self.write_choice(index)
+ else:
+ forceWrite(f' {self.choices[index]}')
+ reset_cursor()
+
+ def move_direction(self, direction: Direction, num_spaces: int=1):
+ old_position = self.position
+ if direction == Direction.DOWN:
+ if self.position + 1 >= len(self.choices):
+ return
+ self.position += num_spaces
+ else:
+ if self.position - 1 < 0:
+ return
+ self.position -= num_spaces
+ clear_line()
+ self.print_choice(old_position)
+ move_cursor(num_spaces, direction.name)
+ self.print_choice(self.position)
+
+ @input.mark(KEYMAP['up'])
+ def move_up(self):
+ self.move_direction(Direction.UP)
+
+ @input.mark(KEYMAP['down'])
+ def move_down(self):
+ self.move_direction(Direction.DOWN)
+
+ @input.mark(KEYMAP['newline'])
+ def select(self):
+ move_cursor(len(self.choices) - self.position, 'DOWN')
+ return self.position
+
+ @input.mark(KEYMAP['interrupt'])
+ def interrupt(self):
+ move_cursor(len(self.choices) - self.position, 'DOWN')
+ raise KeyboardInterrupt
+
+ @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
+ def select_row(self):
+ index = int(chr(self.current_selection))
+ movement = index - self.position
+ if index == self.position:
+ return
+ if index < len(self.choices):
+ if self.position > index:
+ self.move_direction(Direction.UP, -movement)
+ elif self.position < index:
+ self.move_direction(Direction.DOWN, movement)
+ else:
+ return
+ else:
+ return
+
+ def run(self, default_choice: int=0):
+ if self.prompt:
+ linebreak()
+ forceWrite(self.prompt, '\n')
+ if in_colab:
+ forceWrite('Please input a choice index (starting from 0), and press enter', '\n')
+ else:
+ forceWrite('Please select a choice using the arrow or number keys, and selecting with enter', '\n')
+ self.position = default_choice
+ for i in range(len(self.choices)):
+ self.print_choice(i)
+ forceWrite('\n')
+ move_cursor(len(self.choices) - self.position, 'UP')
+ with cursor.hide():
+ while True:
+ if in_colab:
+ try:
+ choice = int(builtins.input())
+ except ValueError:
+ choice = default_choice
+ else:
+ choice = self.handle_input()
+ if choice is not None:
+ reset_cursor()
+ for _ in range(len(self.choices) + 1):
+ move_cursor(1, 'UP')
+ clear_line()
+ self.write_choice(choice, '\n')
+ return choice
+
+# File: accelerate-main/src/accelerate/commands/merge.py
+from accelerate.commands.utils import CustomArgumentParser
+from accelerate.utils import merge_fsdp_weights
+description = 'Utility to merge the weights from multiple FSDP checkpoints into a single combined checkpoint. Should be used if\n`SHARDED_STATE_DICT` was used for the model. Weights will be saved to `{output_path}`.\n\nThis is a CPU-bound process and requires enough RAM to load the entire model state dict.'
+
+def merge_command(args):
+ merge_fsdp_weights(args.checkpoint_directory, args.output_path, not args.unsafe_serialization, args.remove_checkpoint_dir)
+
+def merge_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser('merge-weights', description=description)
+ else:
+ parser = CustomArgumentParser(description=description)
+ parser.add_argument('checkpoint_directory', type=str, help='A directory containing sharded weights saved by FSDP.')
+ parser.add_argument('output_path', type=str, help='The path to save the merged weights. Defaults to the current directory. ')
+ parser.add_argument('--unsafe_serialization', action='store_false', default=False, help='Whether to save the merged weights as `.bin` rather than `.safetensors` (not recommended).')
+ parser.add_argument('--remove_checkpoint_dir', action='store_true', help='Whether to remove the checkpoint directory after merging.', default=False)
+ if subparsers is not None:
+ parser.set_defaults(func=merge_command)
+ return parser
+
+def main():
+ parser = merge_command_parser()
+ args = parser.parse_args()
+ merge_command(args)
+if __name__ == '__main__':
+ main()
+
+# File: accelerate-main/src/accelerate/commands/tpu.py
+import argparse
+import os
+import subprocess
+from packaging.version import Version, parse
+from accelerate.commands.config.config_args import default_config_file, load_config_from_file
+_description = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
+
+def tpu_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser('tpu-config', description=_description)
+ else:
+ parser = argparse.ArgumentParser('Accelerate tpu-config command', description=_description)
+ config_args = parser.add_argument_group('Config Arguments', 'Arguments that can be configured through `accelerate config`.')
+ config_args.add_argument('--config_file', type=str, default=None, help='Path to the config file to use for accelerate.')
+ config_args.add_argument('--tpu_name', default=None, help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.')
+ config_args.add_argument('--tpu_zone', default=None, help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.')
+ pod_args = parser.add_argument_group('TPU Arguments', 'Arguments for options ran inside the TPU.')
+ pod_args.add_argument('--use_alpha', action='store_true', help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.')
+ pod_args.add_argument('--command_file', default=None, help='The path to the file containing the commands to run on the pod on startup.')
+ pod_args.add_argument('--command', action='append', nargs='+', help='A command to run on the pod. Can be passed multiple times.')
+ pod_args.add_argument('--install_accelerate', action='store_true', help='Whether to install accelerate on the pod. Defaults to False.')
+ pod_args.add_argument('--accelerate_version', default='latest', help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.")
+ pod_args.add_argument('--debug', action='store_true', help='If set, will print the command that would be run instead of running it.')
+ if subparsers is not None:
+ parser.set_defaults(func=tpu_command_launcher)
+ return parser
+
+def tpu_command_launcher(args):
+ defaults = None
+ if args.config_file is not None or os.path.isfile(default_config_file):
+ defaults = load_config_from_file(args.config_file)
+ if not args.command_file and defaults.command_file is not None and (not args.command):
+ args.command_file = defaults.command_file
+ if not args.command and defaults.commands is not None:
+ args.command = defaults.commands
+ if not args.tpu_name:
+ args.tpu_name = defaults.tpu_name
+ if not args.tpu_zone:
+ args.tpu_zone = defaults.tpu_zone
+ if args.accelerate_version == 'dev':
+ args.accelerate_version = 'git+https://github.com/huggingface/accelerate.git'
+ elif args.accelerate_version == 'latest':
+ args.accelerate_version = 'accelerate -U'
+ elif isinstance(parse(args.accelerate_version), Version):
+ args.accelerate_version = f'accelerate=={args.accelerate_version}'
+ if not args.command_file and (not args.command):
+ raise ValueError('You must specify either a command file or a command to run on the pod.')
+ if args.command_file:
+ with open(args.command_file) as f:
+ args.command = [f.read().splitlines()]
+ if isinstance(args.command[0], list):
+ args.command = [line for cmd in args.command for line in cmd]
+ new_cmd = ['cd /usr/share']
+ if args.install_accelerate:
+ new_cmd += [f'pip install {args.accelerate_version}']
+ new_cmd += args.command
+ args.command = '; '.join(new_cmd)
+ cmd = ['gcloud']
+ if args.use_alpha:
+ cmd += ['alpha']
+ cmd += ['compute', 'tpus', 'tpu-vm', 'ssh', args.tpu_name, '--zone', args.tpu_zone, '--command', args.command, '--worker', 'all']
+ if args.debug:
+ print(f"Running {' '.join(cmd)}")
+ return
+ subprocess.run(cmd)
+ print('Successfully setup pod.')
+
+def main():
+ parser = tpu_command_parser()
+ args = parser.parse_args()
+ tpu_command_launcher(args)
+
+# File: accelerate-main/src/accelerate/commands/utils.py
+import argparse
+
+class _StoreAction(argparse.Action):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ new_option_strings = []
+ for option_string in self.option_strings:
+ new_option_strings.append(option_string)
+ if '_' in option_string[2:]:
+ new_option_strings.append(option_string.replace('_', '-'))
+ self.option_strings = new_option_strings
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, values)
+
+class _StoreConstAction(_StoreAction):
+
+ def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
+ super().__init__(option_strings=option_strings, dest=dest, nargs=0, const=const, default=default, required=required, help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, self.const)
+
+class _StoreTrueAction(_StoreConstAction):
+
+ def __init__(self, option_strings, dest, default=None, required=False, help=None):
+ super().__init__(option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help)
+
+class CustomArgumentGroup(argparse._ArgumentGroup):
+
+ def _add_action(self, action):
+ args = vars(action)
+ if isinstance(action, argparse._StoreTrueAction):
+ action = _StoreTrueAction(args['option_strings'], args['dest'], args['default'], args['required'], args['help'])
+ elif isinstance(action, argparse._StoreConstAction):
+ action = _StoreConstAction(args['option_strings'], args['dest'], args['const'], args['default'], args['required'], args['help'])
+ elif isinstance(action, argparse._StoreAction):
+ action = _StoreAction(**args)
+ action = super()._add_action(action)
+ return action
+
+class CustomArgumentParser(argparse.ArgumentParser):
+
+ def add_argument(self, *args, **kwargs):
+ if 'action' in kwargs:
+ if kwargs['action'] == 'store_true':
+ kwargs['action'] = _StoreTrueAction
+ else:
+ kwargs['action'] = _StoreAction
+ super().add_argument(*args, **kwargs)
+
+ def add_argument_group(self, *args, **kwargs):
+ group = CustomArgumentGroup(self, *args, **kwargs)
+ self._action_groups.append(group)
+ return group
+
+# File: accelerate-main/src/accelerate/data_loader.py
+import math
+from contextlib import suppress
+from typing import Callable, List, Optional, Union
+import torch
+from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
+from .logging import get_logger
+from .state import DistributedType, GradientState, PartialState, is_torch_xla_available
+from .utils import RNGType, broadcast, broadcast_object_list, concatenate, find_batch_size, get_data_structure, initialize_tensors, is_torch_version, is_torchdata_stateful_dataloader_available, send_to_device, slice_tensors, synchronize_rng_states
+logger = get_logger(__name__)
+_PYTORCH_DATALOADER_KWARGS = {'batch_size': 1, 'shuffle': False, 'sampler': None, 'batch_sampler': None, 'num_workers': 0, 'collate_fn': None, 'pin_memory': False, 'drop_last': False, 'timeout': 0, 'worker_init_fn': None, 'multiprocessing_context': None, 'generator': None, 'prefetch_factor': 2, 'persistent_workers': False}
+_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
+for (v, additional_kwargs) in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
+ if is_torch_version('>=', v):
+ _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
+
+class SeedableRandomSampler(RandomSampler):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.epoch = 0
+ self.initial_seed = torch.random.initial_seed()
+
+ def __iter__(self):
+ if self.generator is None:
+ self.generator = torch.Generator()
+ self.generator.manual_seed(self.initial_seed)
+ seed = self.epoch + self.initial_seed
+ self.generator.manual_seed(seed)
+ yield from super().__iter__()
+ self.set_epoch(self.epoch + 1)
+
+ def set_epoch(self, epoch: int):
+ self.epoch = epoch
+
+class BatchSamplerShard(BatchSampler):
+
+ def __init__(self, batch_sampler: BatchSampler, num_processes: int=1, process_index: int=0, split_batches: bool=False, even_batches: bool=True):
+ if split_batches and batch_sampler.batch_size % num_processes != 0:
+ raise ValueError(f'To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) needs to be a round multiple of the number of processes ({num_processes}).')
+ self.batch_sampler = batch_sampler
+ self.num_processes = num_processes
+ self.process_index = process_index
+ self.split_batches = split_batches
+ self.even_batches = even_batches
+ self.batch_size = getattr(batch_sampler, 'batch_size', None)
+ self.drop_last = getattr(batch_sampler, 'drop_last', False)
+ if self.batch_size is None and self.even_batches:
+ raise ValueError('You need to use `even_batches=False` when the batch sampler has no batch size. If you are not calling this method directly, set `accelerator.even_batches=False` instead.')
+
+ @property
+ def total_length(self):
+ return len(self.batch_sampler)
+
+ def __len__(self):
+ if self.split_batches:
+ return len(self.batch_sampler)
+ if len(self.batch_sampler) % self.num_processes == 0:
+ return len(self.batch_sampler) // self.num_processes
+ length = len(self.batch_sampler) // self.num_processes
+ if self.drop_last:
+ return length
+ elif self.even_batches:
+ return length + 1
+ else:
+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
+
+ def __iter__(self):
+ return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
+
+ def _iter_with_split(self):
+ initial_data = []
+ batch_length = self.batch_sampler.batch_size // self.num_processes
+ for (idx, batch) in enumerate(self.batch_sampler):
+ if idx == 0:
+ initial_data = batch
+ if len(batch) == self.batch_size:
+ yield batch[batch_length * self.process_index:batch_length * (self.process_index + 1)]
+ if not self.drop_last and len(initial_data) > 0 and (len(batch) < self.batch_size):
+ if not self.even_batches:
+ if len(batch) > batch_length * self.process_index:
+ yield batch[batch_length * self.process_index:batch_length * (self.process_index + 1)]
+ else:
+ while len(initial_data) < self.batch_size:
+ initial_data += initial_data
+ batch = batch + initial_data
+ yield batch[batch_length * self.process_index:batch_length * (self.process_index + 1)]
+
+ def _iter_with_no_split(self):
+ initial_data = []
+ batch_to_yield = []
+ for (idx, batch) in enumerate(self.batch_sampler):
+ if not self.drop_last and idx < self.num_processes:
+ initial_data += batch
+ if idx % self.num_processes == self.process_index:
+ batch_to_yield = batch
+ if idx % self.num_processes == self.num_processes - 1 and (self.batch_size is None or len(batch) == self.batch_size):
+ yield batch_to_yield
+ batch_to_yield = []
+ if not self.drop_last and len(initial_data) > 0:
+ if not self.even_batches:
+ if len(batch_to_yield) > 0:
+ yield batch_to_yield
+ else:
+ if len(batch_to_yield) == self.batch_size:
+ yield batch_to_yield
+ while len(initial_data) < self.num_processes * self.batch_size:
+ initial_data += initial_data
+ if len(batch) == self.batch_size:
+ batch = []
+ idx += 1
+ cycle_index = 0
+ while idx % self.num_processes != 0 or len(batch) > 0:
+ end_index = cycle_index + self.batch_size - len(batch)
+ batch += initial_data[cycle_index:end_index]
+ if idx % self.num_processes == self.process_index:
+ yield batch
+ cycle_index = end_index
+ batch = []
+ idx += 1
+
+class IterableDatasetShard(IterableDataset):
+
+ def __init__(self, dataset: IterableDataset, batch_size: int=1, drop_last: bool=False, num_processes: int=1, process_index: int=0, split_batches: bool=False):
+ if split_batches and batch_size > 1 and (batch_size % num_processes != 0):
+ raise ValueError(f'To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) needs to be a round multiple of the number of processes ({num_processes}).')
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self.drop_last = drop_last
+ self.num_processes = num_processes
+ self.process_index = process_index
+ self.split_batches = split_batches
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
+ if hasattr(self.dataset, 'set_epoch'):
+ self.dataset.set_epoch(epoch)
+
+ def __len__(self):
+ if self.drop_last:
+ return len(self.dataset) // (self.batch_size * self.num_processes) * self.batch_size
+ else:
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
+
+ def __iter__(self):
+ if not hasattr(self.dataset, 'set_epoch') and hasattr(self.dataset, 'generator') and isinstance(self.dataset.generator, torch.Generator):
+ self.dataset.generator.manual_seed(self.epoch)
+ real_batch_size = self.batch_size if self.split_batches else self.batch_size * self.num_processes
+ process_batch_size = self.batch_size // self.num_processes if self.split_batches else self.batch_size
+ process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
+ first_batch = None
+ current_batch = []
+ for element in self.dataset:
+ current_batch.append(element)
+ if len(current_batch) == real_batch_size:
+ for i in process_slice:
+ yield current_batch[i]
+ if first_batch is None:
+ first_batch = current_batch.copy()
+ current_batch = []
+ if not self.drop_last and len(current_batch) > 0:
+ if first_batch is None:
+ first_batch = current_batch.copy()
+ while len(current_batch) < real_batch_size:
+ current_batch += first_batch
+ for i in process_slice:
+ yield current_batch[i]
+
+class DataLoaderStateMixin:
+
+ def __init_subclass__(cls, **kwargs):
+ cls.end_of_dataloader = False
+ cls.remainder = -1
+
+ def reset(self):
+ self.end_of_dataloader = False
+ self.remainder = -1
+
+ def begin(self):
+ self.reset()
+ with suppress(Exception):
+ if not self._drop_last:
+ length = getattr(self.dataset, 'total_dataset_length', len(self.dataset))
+ self.remainder = length % self.total_batch_size
+ self.gradient_state._add_dataloader(self)
+
+ def end(self):
+ self.gradient_state._remove_dataloader(self)
+
+class DataLoaderAdapter:
+
+ def __init__(self, dataset, use_stateful_dataloader=False, batch_sampler=None, **kwargs):
+ self.use_stateful_dataloader = use_stateful_dataloader
+ if is_torchdata_stateful_dataloader_available():
+ from torchdata.stateful_dataloader import StatefulDataLoader
+ if use_stateful_dataloader and (not is_torchdata_stateful_dataloader_available()):
+ raise ImportError('StatefulDataLoader is not available. Please install torchdata version 0.8.0 or higher to use it.')
+ if use_stateful_dataloader:
+ self.base_dataloader = StatefulDataLoader(dataset, batch_sampler=batch_sampler, **kwargs)
+ else:
+ self.base_dataloader = DataLoader(dataset, batch_sampler=batch_sampler, **kwargs)
+ if hasattr(self.base_dataloader, 'state_dict'):
+ self.dl_state_dict = self.base_dataloader.state_dict()
+
+ def __getattr__(self, name):
+ if name == 'base_dataloader':
+ raise AttributeError()
+ return getattr(self.base_dataloader, name)
+
+ def state_dict(self):
+ return self.dl_state_dict
+
+ def load_state_dict(self, state_dict):
+ self.base_dataloader.load_state_dict(state_dict)
+
+ @property
+ def __class__(self):
+ return self.base_dataloader.__class__
+
+ def __len__(self):
+ return len(self.base_dataloader)
+
+ def adjust_state_dict_for_prefetch(self):
+ if PartialState().distributed_type != DistributedType.NO:
+ factor = PartialState().num_processes - 1
+ if self.dl_state_dict['_sampler_iter_yielded'] > 0:
+ self.dl_state_dict['_sampler_iter_yielded'] -= factor
+ if self.dl_state_dict['_num_yielded'] > 0:
+ self.dl_state_dict['_num_yielded'] -= factor
+ if self.dl_state_dict['_index_sampler_state'] is not None:
+ if 'samples_yielded' in self.dl_state_dict['_index_sampler_state'] and self.dl_state_dict['_index_sampler_state']['samples_yielded'] > 0:
+ self.dl_state_dict['_index_sampler_state']['samples_yielded'] -= self.batch_size * factor
+
+ def _update_state_dict(self):
+ if hasattr(self.base_dataloader, 'state_dict'):
+ self.dl_state_dict = self.base_dataloader.state_dict()
+ self.adjust_state_dict_for_prefetch()
+ self.dl_state_dict['_iterator_finished'] = self.end_of_dataloader
+
+class DataLoaderShard(DataLoaderAdapter, DataLoaderStateMixin):
+
+ def __init__(self, dataset, device=None, rng_types=None, synchronized_generator=None, skip_batches=0, use_stateful_dataloader=False, _drop_last: bool=False, _non_blocking: bool=False, **kwargs):
+ super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs)
+ self.device = device
+ self.rng_types = rng_types
+ self.synchronized_generator = synchronized_generator
+ self.skip_batches = skip_batches
+ self.gradient_state = GradientState()
+ self._drop_last = _drop_last
+ self._non_blocking = _non_blocking
+ self.iteration = 0
+
+ def __iter__(self):
+ if self.rng_types is not None:
+ synchronize_rng_states(self.rng_types, self.synchronized_generator)
+ self.begin()
+ self.set_epoch(self.iteration)
+ dataloader_iter = self.base_dataloader.__iter__()
+ try:
+ current_batch = next(dataloader_iter)
+ except StopIteration:
+ yield
+ batch_index = 0
+ while True:
+ try:
+ if self.device is not None:
+ current_batch = send_to_device(current_batch, self.device, non_blocking=self._non_blocking)
+ self._update_state_dict()
+ next_batch = next(dataloader_iter)
+ if batch_index >= self.skip_batches:
+ yield current_batch
+ batch_index += 1
+ current_batch = next_batch
+ except StopIteration:
+ self.end_of_dataloader = True
+ self._update_state_dict()
+ if batch_index >= self.skip_batches:
+ yield current_batch
+ break
+ self.iteration += 1
+ self.end()
+
+ def __reduce__(self):
+ args = super().__reduce__()
+ return (DataLoaderShard, *args[1:])
+
+ def set_epoch(self, epoch: int):
+ if self.iteration != epoch:
+ self.iteration = epoch
+ if hasattr(self.batch_sampler, 'sampler') and hasattr(self.batch_sampler.sampler, 'set_epoch'):
+ self.batch_sampler.sampler.set_epoch(epoch)
+ elif hasattr(self.dataset, 'set_epoch'):
+ self.dataset.set_epoch(epoch)
+
+ @property
+ def total_batch_size(self):
+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
+ return batch_sampler.batch_size if getattr(batch_sampler, 'split_batches', False) else batch_sampler.batch_size * getattr(batch_sampler, 'num_processes', 1)
+
+ @property
+ def total_dataset_length(self):
+ if hasattr(self.dataset, 'total_length'):
+ return self.dataset.total_length
+ else:
+ return len(self.dataset)
+
+ def get_sampler(self):
+ return get_sampler(self)
+
+ def set_sampler(self, sampler):
+ sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler)
+ if sampler_is_batch_sampler:
+ self.sampler.sampler = sampler
+ else:
+ self.batch_sampler.sampler = sampler
+ if hasattr(self.batch_sampler, 'batch_sampler'):
+ self.batch_sampler.batch_sampler.sampler = sampler
+if is_torch_xla_available():
+ import torch_xla.distributed.parallel_loader as xpl
+
+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
+
+ def __init__(self, dataloader: DataLoaderShard, device: torch.device):
+ super().__init__(dataloader, device)
+ self._rng_types = self._loader.rng_types
+ self._loader.rng_types = None
+ self.device = device
+
+ def __iter__(self):
+ if self._rng_types is not None:
+ synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
+ return super().__iter__()
+
+ def set_epoch(self, epoch: int):
+ if hasattr(self.dataloader, 'set_epoch'):
+ self.dataloader.set_epoch(epoch)
+
+ @property
+ def total_batch_size(self):
+ return self._loader.total_batch_size
+
+ @property
+ def total_dataset_length(self):
+ return self._loader.total_dataset_length
+
+ @property
+ def batch_sampler(self):
+ return self._loader.batch_sampler
+
+ @property
+ def dataloader(self):
+ return self._loader
+
+class DataLoaderDispatcher(DataLoaderAdapter, DataLoaderStateMixin):
+
+ def __init__(self, dataset, split_batches: bool=False, skip_batches=0, use_stateful_dataloader=False, _drop_last: bool=False, _non_blocking: bool=False, slice_fn=None, **kwargs):
+ shuffle = False
+ if is_torch_version('>=', '1.11.0'):
+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
+ if isinstance(dataset, ShufflerIterDataPipe):
+ shuffle = dataset._shuffle_enabled
+ super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs)
+ self.split_batches = split_batches
+ if shuffle:
+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
+ self.gradient_state = GradientState()
+ self.state = PartialState()
+ self._drop_last = _drop_last
+ self._non_blocking = _non_blocking
+ self.skip_batches = skip_batches
+ self.slice_fn = slice_tensors if slice_fn is None else slice_fn
+ self.iteration = 0
+
+ def _fetch_batches(self, iterator):
+ (batches, batch) = (None, None)
+ if self.state.process_index == 0:
+ try:
+ if self.split_batches:
+ self._update_state_dict()
+ batch = next(iterator)
+ else:
+ batches = []
+ for _ in range(self.state.num_processes):
+ self._update_state_dict()
+ batches.append(next(iterator))
+ try:
+ batch = concatenate(batches, dim=0)
+ except RuntimeError as e:
+ raise RuntimeError("You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`.either pass `dispatch_batches=False` and have each process fetch its own batch or pass `split_batches=True`. By doing so, the main process will fetch a full batch and slice it into `num_processes` batches for each process.") from e
+ batch_info = [get_data_structure(batch), False]
+ except StopIteration:
+ batch_info = [None, True]
+ else:
+ batch_info = [None, self._stop_iteration]
+ broadcast_object_list(batch_info)
+ self._stop_iteration = batch_info[1]
+ if self._stop_iteration:
+ if not self.split_batches and (not self._drop_last):
+ if self.state.process_index == 0 and len(batches) > 0:
+ batch = concatenate(batches, dim=0)
+ batch_info = [get_data_structure(batch), False]
+ else:
+ batch_info = [None, True]
+ broadcast_object_list(batch_info)
+ return (batch, batch_info)
+
+ def __iter__(self):
+ self.begin()
+ self.set_epoch(self.iteration)
+ main_iterator = None
+ if is_torch_version('>=', '2.0.1'):
+ main_iterator = self.base_dataloader.__iter__()
+ elif self.state.process_index == 0:
+ main_iterator = self.base_dataloader.__iter__()
+ stop_iteration = False
+ self._stop_iteration = False
+ first_batch = None
+ (next_batch, next_batch_info) = self._fetch_batches(main_iterator)
+ batch_index = 0
+ while not stop_iteration:
+ (batch, batch_info) = (next_batch, next_batch_info)
+ if self.state.process_index != 0:
+ batch = initialize_tensors(batch_info[0])
+ batch = send_to_device(batch, self.state.device, non_blocking=self._non_blocking)
+ batch = broadcast(batch, from_process=0)
+ if not self._drop_last and first_batch is None:
+ first_batch = self.slice_fn(batch, slice(0, self.state.num_processes), process_index=self.state.process_index, num_processes=self.state.num_processes)
+ if batch is None:
+ raise ValueError(f'Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration.')
+ observed_batch_size = find_batch_size(batch)
+ batch_size = observed_batch_size // self.state.num_processes
+ stop_iteration = self._stop_iteration
+ if not stop_iteration:
+ (next_batch, next_batch_info) = self._fetch_batches(main_iterator)
+ if self._stop_iteration and next_batch_info[0] is None:
+ stop_iteration = True
+ if not self._drop_last and stop_iteration and (observed_batch_size % self.state.num_processes != 0):
+ batch = concatenate([batch, first_batch], dim=0)
+ batch_size += 1
+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
+ batch = self.slice_fn(batch, data_slice, process_index=self.state.process_index, num_processes=self.state.num_processes)
+ if stop_iteration:
+ self.end_of_dataloader = True
+ self._update_state_dict()
+ self.remainder = observed_batch_size
+ if batch_index >= self.skip_batches:
+ yield batch
+ batch_index += 1
+ self.iteration += 1
+ self.end()
+
+ def set_epoch(self, epoch: int):
+ if self.iteration != epoch:
+ self.iteration = epoch
+ if hasattr(self.batch_sampler, 'sampler') and hasattr(self.batch_sampler.sampler, 'set_epoch'):
+ self.batch_sampler.sampler.set_epoch(epoch)
+ elif hasattr(self.dataset, 'set_epoch'):
+ self.dataset.set_epoch(epoch)
+
+ def __len__(self):
+ whole_length = len(self.base_dataloader)
+ if self.split_batches:
+ return whole_length
+ elif self._drop_last:
+ return whole_length // self.state.num_processes
+ else:
+ return math.ceil(whole_length / self.state.num_processes)
+
+ def __reduce__(self):
+ args = super().__reduce__()
+ return (DataLoaderDispatcher, *args[1:])
+
+ @property
+ def total_batch_size(self):
+ return self.dataset.batch_size if self.split_batches else self.dataset.batch_size * self.dataset.num_processes
+
+ @property
+ def total_dataset_length(self):
+ return len(self.dataset)
+
+ def get_sampler(self):
+ return get_sampler(self)
+
+ def set_sampler(self, sampler):
+ sampler_is_batch_sampler = isinstance(self.sampler, BatchSampler)
+ if sampler_is_batch_sampler:
+ self.sampler.sampler = sampler
+ else:
+ self.batch_sampler.sampler = sampler
+ if hasattr(self.batch_sampler, 'batch_sampler'):
+ self.batch_sampler.batch_sampler.sampler = sampler
+
+def get_sampler(dataloader):
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ if sampler_is_batch_sampler:
+ sampler = getattr(dataloader.sampler, 'sampler', None)
+ else:
+ sampler = getattr(dataloader.batch_sampler, 'sampler', None)
+ return sampler
+
+def prepare_data_loader(dataloader: DataLoader, device: Optional[torch.device]=None, num_processes: Optional[int]=None, process_index: Optional[int]=None, split_batches: bool=False, put_on_device: bool=False, rng_types: Optional[List[Union[str, RNGType]]]=None, dispatch_batches: Optional[bool]=None, even_batches: bool=True, slice_fn_for_dispatch: Optional[Callable]=None, use_seedable_sampler: bool=False, non_blocking: bool=False, use_stateful_dataloader: bool=False) -> DataLoader:
+ if dispatch_batches is None:
+ if not put_on_device:
+ dispatch_batches = False
+ else:
+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
+ if dispatch_batches and (not put_on_device):
+ raise ValueError('Using `dispatch_batches=True` requires `put_on_device=True`.')
+ state = PartialState()
+ if num_processes is None:
+ num_processes = state.num_processes
+ if process_index is None:
+ process_index = state.process_index
+ if split_batches:
+ if dataloader.batch_size is not None:
+ batch_size_for_check = dataloader.batch_size
+ elif hasattr(dataloader.batch_sampler, 'batch_size'):
+ batch_size_for_check = dataloader.batch_sampler.batch_size
+ else:
+ raise ValueError(f'In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed `dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. Your `dataloader.batch_size` is None and `dataloader.batch_sampler` (`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set.')
+ if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
+ raise ValueError(f'To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) needs to be a round multiple of the number of processes ({num_processes}).')
+ new_dataset = dataloader.dataset
+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ synchronized_generator = None
+ sampler = get_sampler(dataloader)
+ if isinstance(sampler, RandomSampler) and use_seedable_sampler:
+ sampler = SeedableRandomSampler(data_source=sampler.data_source, replacement=sampler.replacement, num_samples=sampler._num_samples, generator=getattr(sampler, 'generator', torch.Generator()))
+ if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
+ generator = torch.Generator().manual_seed(42)
+ dataloader.generator = generator
+ dataloader.sampler.generator = generator
+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and (not dispatch_batches):
+ if isinstance(new_dataset, IterableDataset):
+ if getattr(dataloader.dataset, 'generator', None) is not None:
+ synchronized_generator = dataloader.dataset.generator
+ new_dataset = IterableDatasetShard(new_dataset, batch_size=dataloader.batch_size, drop_last=dataloader.drop_last, num_processes=num_processes, process_index=process_index, split_batches=split_batches)
+ else:
+ if not use_seedable_sampler and hasattr(sampler, 'generator'):
+ if sampler.generator is None:
+ sampler.generator = torch.Generator()
+ synchronized_generator = sampler.generator
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
+ new_batch_sampler = BatchSamplerShard(batch_sampler, num_processes=num_processes, process_index=process_index, split_batches=split_batches, even_batches=even_batches)
+ ignore_kwargs = ['batch_size', 'shuffle', 'sampler', 'batch_sampler', 'drop_last']
+ if rng_types is not None and synchronized_generator is None and ('generator' in rng_types):
+ rng_types.remove('generator')
+ kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs}
+ if new_batch_sampler is None:
+ kwargs['drop_last'] = dataloader.drop_last
+ kwargs['batch_size'] = dataloader.batch_size // num_processes if split_batches and (not dispatch_batches) else dataloader.batch_size
+ if dispatch_batches:
+ kwargs.pop('generator')
+ dataloader = DataLoaderDispatcher(new_dataset, split_batches=split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, slice_fn=slice_fn_for_dispatch, use_stateful_dataloader=use_stateful_dataloader, **kwargs)
+ elif sampler_is_batch_sampler:
+ dataloader = DataLoaderShard(new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, sampler=new_batch_sampler, batch_size=dataloader.batch_size, rng_types=rng_types, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, synchronized_generator=synchronized_generator, use_stateful_dataloader=use_stateful_dataloader, **kwargs)
+ else:
+ dataloader = DataLoaderShard(new_dataset, device=device if put_on_device and state.distributed_type != DistributedType.XLA else None, batch_sampler=new_batch_sampler, rng_types=rng_types, synchronized_generator=synchronized_generator, _drop_last=dataloader.drop_last, _non_blocking=non_blocking, use_stateful_dataloader=use_stateful_dataloader, **kwargs)
+ if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
+ dataloader.set_sampler(sampler)
+ if state.distributed_type == DistributedType.XLA:
+ return MpDeviceLoaderWrapper(dataloader, device)
+ return dataloader
+
+class SkipBatchSampler(BatchSampler):
+
+ def __init__(self, batch_sampler, skip_batches=0):
+ self.batch_sampler = batch_sampler
+ self.skip_batches = skip_batches
+
+ def __iter__(self):
+ for (index, samples) in enumerate(self.batch_sampler):
+ if index >= self.skip_batches:
+ yield samples
+
+ @property
+ def total_length(self):
+ return len(self.batch_sampler)
+
+ def __len__(self):
+ return len(self.batch_sampler) - self.skip_batches
+
+class SkipDataLoader(DataLoaderAdapter, DataLoaderStateMixin):
+
+ def __init__(self, dataset, skip_batches=0, use_stateful_dataloader=False, **kwargs):
+ super().__init__(dataset, use_stateful_dataloader=use_stateful_dataloader, **kwargs)
+ self.skip_batches = skip_batches
+ self.gradient_state = GradientState()
+
+ def __iter__(self):
+ self.begin()
+ for (index, batch) in enumerate(self.base_dataloader.__iter__()):
+ if index >= self.skip_batches:
+ self._update_state_dict()
+ yield batch
+ self.end()
+
+ def __len__(self):
+ return len(self.base_dataloader) - self.skip_batches
+
+ def __reduce__(self):
+ args = super().__reduce__()
+ return (SkipDataLoader, *args[1:])
+
+def skip_first_batches(dataloader, num_batches=0):
+ state = PartialState()
+ if state.distributed_type == DistributedType.XLA:
+ device = dataloader.device
+ dataloader = dataloader.dataloader
+ dataset = dataloader.dataset
+ sampler_is_batch_sampler = False
+ if isinstance(dataset, IterableDataset):
+ new_batch_sampler = None
+ else:
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
+ new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
+ ignore_kwargs = ['batch_size', 'shuffle', 'sampler', 'batch_sampler', 'drop_last']
+ kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS if k not in ignore_kwargs}
+ if new_batch_sampler is None:
+ kwargs['drop_last'] = dataloader.drop_last
+ kwargs['batch_size'] = dataloader.batch_size
+ if isinstance(dataloader, DataLoaderDispatcher):
+ if new_batch_sampler is None:
+ kwargs['skip_batches'] = num_batches
+ dataloader = DataLoaderDispatcher(dataset, split_batches=dataloader.split_batches, batch_sampler=new_batch_sampler, _drop_last=dataloader._drop_last, **kwargs)
+ elif isinstance(dataloader, DataLoaderShard):
+ if new_batch_sampler is None:
+ kwargs['skip_batches'] = num_batches
+ elif sampler_is_batch_sampler:
+ kwargs['sampler'] = new_batch_sampler
+ kwargs['batch_size'] = dataloader.batch_size
+ else:
+ kwargs['batch_sampler'] = new_batch_sampler
+ dataloader = DataLoaderShard(dataset, device=dataloader.device, rng_types=dataloader.rng_types, synchronized_generator=dataloader.synchronized_generator, **kwargs)
+ elif new_batch_sampler is None:
+ dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
+ else:
+ dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
+ if state.distributed_type == DistributedType.XLA:
+ dataloader = MpDeviceLoaderWrapper(dataloader, device)
+ return dataloader
+
+# File: accelerate-main/src/accelerate/hooks.py
+import functools
+from typing import Dict, List, Mapping, Optional, Union
+import torch
+import torch.nn as nn
+from .state import PartialState
+from .utils import PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device
+from .utils.memory import clear_device_cache
+from .utils.modeling import get_non_persistent_buffers
+from .utils.other import recursive_getattr
+_accelerate_added_attributes = ['to', 'cuda', 'npu', 'xpu', 'mlu', 'musa']
+
+class ModelHook:
+ no_grad = False
+
+ def init_hook(self, module):
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ return (args, kwargs)
+
+ def post_forward(self, module, output):
+ return output
+
+ def detach_hook(self, module):
+ return module
+
+class SequentialHook(ModelHook):
+
+ def __init__(self, *hooks):
+ self.hooks = hooks
+
+ def init_hook(self, module):
+ for hook in self.hooks:
+ module = hook.init_hook(module)
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ for hook in self.hooks:
+ (args, kwargs) = hook.pre_forward(module, *args, **kwargs)
+ return (args, kwargs)
+
+ def post_forward(self, module, output):
+ for hook in self.hooks:
+ output = hook.post_forward(module, output)
+ return output
+
+ def detach_hook(self, module):
+ for hook in self.hooks:
+ module = hook.detach_hook(module)
+ return module
+
+def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool=False):
+ if append and getattr(module, '_hf_hook', None) is not None:
+ old_hook = module._hf_hook
+ remove_hook_from_module(module)
+ hook = SequentialHook(old_hook, hook)
+ if hasattr(module, '_hf_hook') and hasattr(module, '_old_forward'):
+ old_forward = module._old_forward
+ else:
+ old_forward = module.forward
+ module._old_forward = old_forward
+ module = hook.init_hook(module)
+ module._hf_hook = hook
+
+ def new_forward(module, *args, **kwargs):
+ (args, kwargs) = module._hf_hook.pre_forward(module, *args, **kwargs)
+ if module._hf_hook.no_grad:
+ with torch.no_grad():
+ output = module._old_forward(*args, **kwargs)
+ else:
+ output = module._old_forward(*args, **kwargs)
+ return module._hf_hook.post_forward(module, output)
+ if 'GraphModuleImpl' in str(type(module)):
+ module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
+ else:
+ module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
+ return module
+
+def remove_hook_from_module(module: nn.Module, recurse=False):
+ if hasattr(module, '_hf_hook'):
+ module._hf_hook.detach_hook(module)
+ delattr(module, '_hf_hook')
+ if hasattr(module, '_old_forward'):
+ if 'GraphModuleImpl' in str(type(module)):
+ module.__class__.forward = module._old_forward
+ else:
+ module.forward = module._old_forward
+ delattr(module, '_old_forward')
+ for attr in _accelerate_added_attributes:
+ module.__dict__.pop(attr, None)
+ if recurse:
+ for child in module.children():
+ remove_hook_from_module(child, recurse)
+ return module
+
+class AlignDevicesHook(ModelHook):
+
+ def __init__(self, execution_device: Optional[Union[int, str, torch.device]]=None, offload: bool=False, io_same_device: bool=False, weights_map: Optional[Mapping]=None, offload_buffers: bool=False, place_submodules: bool=False, skip_keys: Optional[Union[str, List[str]]]=None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]]=None):
+ self.execution_device = execution_device
+ self.offload = offload
+ self.io_same_device = io_same_device
+ self.weights_map = weights_map
+ self.offload_buffers = offload_buffers
+ self.place_submodules = place_submodules
+ self.skip_keys = skip_keys
+ self.input_device = None
+ self.param_original_devices = {}
+ self.buffer_original_devices = {}
+ self.tied_params_names = set()
+ self.tied_params_map = tied_params_map
+
+ def __repr__(self):
+ return f'AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})'
+
+ def init_hook(self, module):
+ if self.execution_device == 'meta' or self.execution_device == torch.device('meta'):
+ self.tied_params_map = None
+ if not self.offload and self.execution_device is not None:
+ for (name, _) in named_module_tensors(module, recurse=self.place_submodules):
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
+ elif self.offload:
+ self.original_devices = {name: param.device for (name, param) in named_module_tensors(module, recurse=self.place_submodules)}
+ if self.weights_map is None:
+ self.weights_map = {name: param.to('cpu') for (name, param) in named_module_tensors(module, include_buffers=self.offload_buffers, recurse=self.place_submodules)}
+ for (name, _) in named_module_tensors(module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True):
+ if self.tied_params_map is not None and recursive_getattr(module, name).data_ptr() in self.tied_params_map:
+ self.tied_params_names.add(name)
+ set_module_tensor_to_device(module, name, 'meta')
+ if not self.offload_buffers and self.execution_device is not None:
+ for (name, _) in module.named_buffers(recurse=self.place_submodules):
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
+ elif self.offload_buffers and self.execution_device is not None:
+ for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ if self.io_same_device:
+ self.input_device = find_device([args, kwargs])
+ if self.offload:
+ self.tied_pointers_to_remove = set()
+ for (name, _) in named_module_tensors(module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True):
+ fp16_statistics = None
+ value = self.weights_map[name]
+ if 'weight' in name and name.replace('weight', 'SCB') in self.weights_map.keys():
+ if value.dtype == torch.int8:
+ fp16_statistics = self.weights_map[name.replace('weight', 'SCB')]
+ if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
+ self.tied_params_map[value.data_ptr()] = {}
+ if value is not None and self.tied_params_map is not None and (value.data_ptr() in self.tied_params_map) and (self.execution_device not in self.tied_params_map[value.data_ptr()]):
+ self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
+ set_module_tensor_to_device(module, name, self.execution_device, value=value, fp16_statistics=fp16_statistics, tied_params_map=self.tied_params_map)
+ return (send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device, skip_keys=self.skip_keys))
+
+ def post_forward(self, module, output):
+ if self.offload:
+ for (name, _) in named_module_tensors(module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True):
+ set_module_tensor_to_device(module, name, 'meta')
+ if type(module).__name__ == 'Linear8bitLt':
+ module.state.SCB = None
+ module.state.CxB = None
+ for (value_pointer, device) in self.tied_pointers_to_remove:
+ del self.tied_params_map[value_pointer][device]
+ self.tied_pointers_to_remove = set()
+ if self.io_same_device and self.input_device is not None:
+ output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
+ return output
+
+ def detach_hook(self, module):
+ if self.offload:
+ for (name, device) in self.original_devices.items():
+ if device != torch.device('meta'):
+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
+ return module
+
+def attach_execution_device_hook(module: torch.nn.Module, execution_device: Union[int, str, torch.device], skip_keys: Optional[Union[str, List[str]]]=None, preload_module_classes: Optional[List[str]]=None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]]=None):
+ if not hasattr(module, '_hf_hook') and len(module.state_dict()) > 0:
+ add_hook_to_module(module, AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map))
+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
+ return
+ for child in module.children():
+ attach_execution_device_hook(child, execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map)
+
+def attach_align_device_hook(module: torch.nn.Module, execution_device: Optional[torch.device]=None, offload: bool=False, weights_map: Optional[Mapping]=None, offload_buffers: bool=False, module_name: str='', skip_keys: Optional[Union[str, List[str]]]=None, preload_module_classes: Optional[List[str]]=None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]]=None):
+ directs = named_module_tensors(module)
+ full_offload = offload and preload_module_classes is not None and (module.__class__.__name__ in preload_module_classes)
+ if len(list(directs)) > 0 or full_offload:
+ if weights_map is not None:
+ prefix = f'{module_name}.' if len(module_name) > 0 else ''
+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)
+ else:
+ prefixed_weights_map = None
+ hook = AlignDevicesHook(execution_device=execution_device, offload=offload, weights_map=prefixed_weights_map, offload_buffers=offload_buffers, place_submodules=full_offload, skip_keys=skip_keys, tied_params_map=tied_params_map)
+ add_hook_to_module(module, hook, append=True)
+ if full_offload:
+ return
+ for (child_name, child) in module.named_children():
+ child_name = f'{module_name}.{child_name}' if len(module_name) > 0 else child_name
+ attach_align_device_hook(child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map)
+
+def remove_hook_from_submodules(module: nn.Module):
+ remove_hook_from_module(module)
+ for child in module.children():
+ remove_hook_from_submodules(child)
+
+def attach_align_device_hook_on_blocks(module: nn.Module, execution_device: Optional[Union[torch.device, Dict[str, torch.device]]]=None, offload: Union[bool, Dict[str, bool]]=False, weights_map: Mapping=None, offload_buffers: bool=False, module_name: str='', skip_keys: Optional[Union[str, List[str]]]=None, preload_module_classes: Optional[List[str]]=None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]]=None):
+ if not isinstance(execution_device, Mapping) and (not isinstance(offload, dict)):
+ if not offload:
+ hook = AlignDevicesHook(execution_device=execution_device, io_same_device=True, skip_keys=skip_keys, place_submodules=True, tied_params_map=tied_params_map)
+ add_hook_to_module(module, hook)
+ else:
+ attach_align_device_hook(module, execution_device=execution_device, offload=True, weights_map=weights_map, offload_buffers=offload_buffers, module_name=module_name, skip_keys=skip_keys, tied_params_map=tied_params_map)
+ return
+ if not isinstance(execution_device, Mapping):
+ execution_device = {key: execution_device for key in offload.keys()}
+ if not isinstance(offload, Mapping):
+ offload = {key: offload for key in execution_device.keys()}
+ if module_name in execution_device and module_name in offload and (not offload[module_name]):
+ hook = AlignDevicesHook(execution_device=execution_device[module_name], offload_buffers=offload_buffers, io_same_device=module_name == '', place_submodules=True, skip_keys=skip_keys, tied_params_map=tied_params_map)
+ add_hook_to_module(module, hook)
+ attach_execution_device_hook(module, execution_device[module_name], skip_keys=skip_keys, tied_params_map=tied_params_map)
+ elif module_name in execution_device and module_name in offload:
+ attach_align_device_hook(module, execution_device=execution_device[module_name], offload=True, weights_map=weights_map, offload_buffers=offload_buffers, module_name=module_name, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map)
+ if not hasattr(module, '_hf_hook'):
+ hook = AlignDevicesHook(execution_device=execution_device[module_name], io_same_device=module_name == '', skip_keys=skip_keys, tied_params_map=tied_params_map)
+ add_hook_to_module(module, hook)
+ attach_execution_device_hook(module, execution_device[module_name], preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map)
+ elif module_name == '':
+ hook = AlignDevicesHook(execution_device=execution_device.get(''), io_same_device=True, skip_keys=skip_keys, tied_params_map=tied_params_map)
+ add_hook_to_module(module, hook)
+ for (child_name, child) in module.named_children():
+ child_name = f'{module_name}.{child_name}' if len(module_name) > 0 else child_name
+ attach_align_device_hook_on_blocks(child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map)
+
+class CpuOffload(ModelHook):
+
+ def __init__(self, execution_device: Optional[Union[str, int, torch.device]]=None, prev_module_hook: Optional['UserCpuOffloadHook']=None):
+ self.prev_module_hook = prev_module_hook
+ self.execution_device = execution_device if execution_device is not None else PartialState().default_device
+
+ def init_hook(self, module):
+ return module.to('cpu')
+
+ def pre_forward(self, module, *args, **kwargs):
+ if self.prev_module_hook is not None:
+ self.prev_module_hook.offload()
+ clear_device_cache()
+ module.to(self.execution_device)
+ return (send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device))
+
+class UserCpuOffloadHook:
+
+ def __init__(self, model, hook):
+ self.model = model
+ self.hook = hook
+
+ def offload(self):
+ self.hook.init_hook(self.model)
+
+ def remove(self):
+ remove_hook_from_module(self.model)
+
+# File: accelerate-main/src/accelerate/inference.py
+import math
+from types import MethodType
+from typing import Any, Dict, List, Optional, Tuple, Union
+from .state import PartialState
+from .utils import calculate_maximum_sizes, convert_bytes, copy_tensor_to_devices, ignorant_find_batch_size, infer_auto_device_map, is_pippy_available, pad_input_tensors, send_to_device
+
+def generate_device_map(model, num_processes: int=1, no_split_module_classes=None, max_memory: dict=None):
+ if num_processes == 1:
+ return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
+ if max_memory is None:
+ (model_size, shared) = calculate_maximum_sizes(model)
+ memory = (model_size + shared[0]) / num_processes
+ memory = convert_bytes(memory)
+ (value, ending) = memory.split(' ')
+ memory = math.ceil(float(value)) * 1.1
+ memory = f'{memory} {ending}'
+ max_memory = {i: memory for i in range(num_processes)}
+ device_map = infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, clean_result=False)
+ return device_map
+
+def find_pippy_batch_size(args, kwargs):
+ found_batch_size = None
+ if args is not None:
+ for arg in args:
+ found_batch_size = ignorant_find_batch_size(arg)
+ if found_batch_size is not None:
+ break
+ if kwargs is not None and found_batch_size is None:
+ for kwarg in kwargs.values():
+ found_batch_size = ignorant_find_batch_size(kwarg)
+ if found_batch_size is not None:
+ break
+ return found_batch_size
+
+def build_pipeline(model, split_points, args, kwargs, num_chunks):
+ from torch.distributed.pipelining import ScheduleGPipe, SplitPoint, pipeline
+ state = PartialState()
+ split_spec = {split_point: SplitPoint.BEGINNING for split_point in split_points}
+ pipe = pipeline(model, mb_args=args, mb_kwargs=kwargs, split_spec=split_spec)
+ stage = pipe.build_stage(state.local_process_index, device=state.device)
+ schedule = ScheduleGPipe(stage, num_chunks)
+ return schedule
+
+def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
+ state = PartialState()
+ output = None
+ if state.num_processes == 1:
+ output = forward(*args, **kwargs)
+ elif state.is_local_main_process:
+ found_batch_size = find_pippy_batch_size(args, kwargs)
+ if found_batch_size is None:
+ raise ValueError('Could not find batch size from args or kwargs')
+ elif found_batch_size != num_chunks:
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
+ forward(*args, **kwargs)
+ elif state.is_last_process:
+ output = forward()
+ else:
+ forward()
+ if gather_output:
+ output = copy_tensor_to_devices(output)
+ return output
+
+def prepare_pippy(model, split_points: Optional[Union[str, List[str]]]='auto', no_split_module_classes: Optional[List[str]]=None, example_args: Optional[Tuple[Any]]=(), example_kwargs: Optional[Dict[str, Any]]=None, num_chunks: Optional[int]=None, gather_output: Optional[bool]=False):
+ if not is_pippy_available():
+ raise ImportError('Using `torch.distributed.pipelining` requires PyTorch 2.4.0 or later.')
+ state = PartialState()
+ example_args = send_to_device(example_args, 'cpu')
+ example_kwargs = send_to_device(example_kwargs, 'cpu')
+ if num_chunks is None:
+ num_chunks = state.num_processes
+ if split_points == 'auto':
+ device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
+ split_points = []
+ for i in range(1, num_chunks):
+ split_points.append(next((k for (k, v) in device_map.items() if v == i)))
+ model.hf_split_points = split_points
+ stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
+ model._original_forward = model.forward
+ model._original_call = model.__call__
+ model.pippy_stage = stage
+ model.hf_split_points = split_points
+
+ def forward(*args, **kwargs):
+ return pippy_forward(stage.step, num_chunks, gather_output, *args, **kwargs)
+ model_forward = MethodType(forward, model)
+ forward.__wrapped__ = model_forward
+ model.forward = forward
+ return model
+
+# File: accelerate-main/src/accelerate/launchers.py
+import os
+import sys
+import tempfile
+import torch
+from .state import AcceleratorState, PartialState
+from .utils import PrecisionType, PrepareForLaunch, are_libraries_initialized, check_cuda_p2p_ib_support, get_gpu_info, is_mps_available, is_torch_version, patch_environment
+from .utils.constants import ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION
+
+def test_launch():
+ _ = PartialState()
+
+def notebook_launcher(function, args=(), num_processes=None, mixed_precision='no', use_port='29500', master_addr='127.0.0.1', node_rank=0, num_nodes=1, rdzv_backend='static', rdzv_endpoint='', rdzv_conf=None, rdzv_id='none', max_restarts=0, monitor_interval=0.1, log_line_prefix_template=None):
+ in_colab = False
+ in_kaggle = False
+ if any((key.startswith('KAGGLE') for key in os.environ.keys())):
+ in_kaggle = True
+ elif 'IPython' in sys.modules:
+ in_colab = 'google.colab' in str(sys.modules['IPython'].get_ipython())
+ try:
+ mixed_precision = PrecisionType(mixed_precision.lower())
+ except ValueError:
+ raise ValueError(f'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.')
+ if (in_colab or in_kaggle) and os.environ.get('TPU_NAME', None) is not None:
+ import torch_xla.distributed.xla_multiprocessing as xmp
+ if len(AcceleratorState._shared_state) > 0:
+ raise ValueError('To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside your training function. Restart your notebook and make sure no cells initializes an `Accelerator`.')
+ if num_processes is None:
+ num_processes = 8
+ launcher = PrepareForLaunch(function, distributed_type='TPU')
+ print(f'Launching a training on {num_processes} TPU cores.')
+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method='fork')
+ elif in_colab and get_gpu_info()[1] < 2:
+ if torch.cuda.is_available():
+ print('Launching training on one GPU.')
+ else:
+ print('Launching training on one CPU.')
+ function(*args)
+ else:
+ if num_processes is None:
+ raise ValueError('You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.')
+ if node_rank >= num_nodes:
+ raise ValueError('The node_rank must be less than the number of nodes.')
+ if num_processes > 1:
+ from torch.distributed.launcher.api import LaunchConfig, elastic_launch
+ from torch.multiprocessing import start_processes
+ from torch.multiprocessing.spawn import ProcessRaisedException
+ if len(AcceleratorState._shared_state) > 0:
+ raise ValueError('To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized inside your training function. Restart your notebook and make sure no cells initializes an `Accelerator`.')
+ problematic_imports = are_libraries_initialized('bitsandbytes')
+ if len(problematic_imports) > 0:
+ err = 'Could not start distributed process. Libraries known to initialize CUDA upon import have been imported already. Please keep these imports inside your training function to try and help with this:'
+ for lib_name in problematic_imports:
+ err += f'\n\t* `{lib_name}`'
+ raise RuntimeError(err)
+ patched_env = dict(nproc=num_processes, node_rank=node_rank, world_size=num_nodes * num_processes, master_addr=master_addr, master_port=use_port, mixed_precision=mixed_precision)
+ if not check_cuda_p2p_ib_support():
+ patched_env['nccl_p2p_disable'] = '1'
+ patched_env['nccl_ib_disable'] = '1'
+ with patch_environment(**patched_env):
+ if os.environ.get('ACCELERATE_DEBUG_MODE', 'false').lower() == 'true':
+ launcher = PrepareForLaunch(test_launch, distributed_type='MULTI_GPU')
+ try:
+ start_processes(launcher, args=(), nprocs=num_processes, start_method='fork')
+ except ProcessRaisedException as e:
+ err = 'An issue was found when verifying a stable environment for the notebook launcher.'
+ if 'Cannot re-initialize CUDA in forked subprocess' in e.args[0]:
+ raise RuntimeError(f'{err}This likely stems from an outside import causing issues once the `notebook_launcher()` is called. Please review your imports and test them when running the `notebook_launcher()` to identify which one is problematic and causing CUDA to be initialized.') from e
+ else:
+ raise RuntimeError(f'{err} The following error was raised: {e}') from e
+ launcher = PrepareForLaunch(function, distributed_type='MULTI_GPU')
+ print(f'Launching training on {num_processes} GPUs.')
+ try:
+ if rdzv_conf is None:
+ rdzv_conf = {}
+ if rdzv_backend == 'static':
+ rdzv_conf['rank'] = node_rank
+ if not rdzv_endpoint:
+ rdzv_endpoint = f'{master_addr}:{use_port}'
+ launch_config_kwargs = dict(min_nodes=num_nodes, max_nodes=num_nodes, nproc_per_node=num_processes, run_id=rdzv_id, rdzv_endpoint=rdzv_endpoint, rdzv_backend=rdzv_backend, rdzv_configs=rdzv_conf, max_restarts=max_restarts, monitor_interval=monitor_interval, start_method='fork')
+ if is_torch_version('>=', ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION):
+ launch_config_kwargs['log_line_prefix_template'] = log_line_prefix_template
+ elastic_launch(config=LaunchConfig(**launch_config_kwargs), entrypoint=function)(*args)
+ except ProcessRaisedException as e:
+ if 'Cannot re-initialize CUDA in forked subprocess' in e.args[0]:
+ raise RuntimeError('CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. This likely stems from an outside import causing issues once the `notebook_launcher()` is called. Please review your imports and test them when running the `notebook_launcher()` to identify which one is problematic and causing CUDA to be initialized.') from e
+ else:
+ raise RuntimeError(f'An issue was found when launching the training: {e}') from e
+ else:
+ if is_mps_available():
+ os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
+ print('Launching training on MPS.')
+ elif torch.cuda.is_available():
+ print('Launching training on one GPU.')
+ else:
+ print('Launching training on CPU.')
+ function(*args)
+
+def debug_launcher(function, args=(), num_processes=2):
+ from torch.multiprocessing import start_processes
+ with tempfile.NamedTemporaryFile() as tmp_file:
+ with patch_environment(world_size=num_processes, master_addr='127.0.0.1', master_port='29500', accelerate_mixed_precision='no', accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu='yes'):
+ launcher = PrepareForLaunch(function, debug=True)
+ start_processes(launcher, args=args, nprocs=num_processes, start_method='fork')
+
+# File: accelerate-main/src/accelerate/local_sgd.py
+import torch
+from accelerate import Accelerator, DistributedType
+
+class LocalSGD:
+
+ def __enter__(self):
+ if self.enabled:
+ self.model_sync_obj = self.model.no_sync()
+ self.model_sync_obj.__enter__()
+ return self
+
+ def __exit__(self, type, value, tb):
+ if self.enabled:
+ self._sync_and_avg_model_params()
+ self.model_sync_obj.__exit__(type, value, tb)
+
+ def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool=True):
+ if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU]:
+ raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)')
+ self.enabled = enabled and accelerator.distributed_type != DistributedType.NO
+ self.num_steps = 0
+ if self.enabled:
+ self.accelerator = accelerator
+ self.model = model
+ self.local_sgd_steps = local_sgd_steps
+
+ def step(self):
+ self.num_steps += 1
+ if not self.enabled:
+ return
+ if self.num_steps % self.local_sgd_steps == 0:
+ self._sync_and_avg_model_params()
+
+ def _sync_and_avg_model_params(self):
+ self.accelerator.wait_for_everyone()
+ with self.accelerator.autocast():
+ for param in self.model.parameters():
+ param.data = self.accelerator.reduce(param.data, reduction='mean')
+
+# File: accelerate-main/src/accelerate/logging.py
+import functools
+import logging
+import os
+from .state import PartialState
+
+class MultiProcessAdapter(logging.LoggerAdapter):
+
+ @staticmethod
+ def _should_log(main_process_only):
+ state = PartialState()
+ return not main_process_only or (main_process_only and state.is_main_process)
+
+ def log(self, level, msg, *args, **kwargs):
+ if PartialState._shared_state == {}:
+ raise RuntimeError('You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.')
+ main_process_only = kwargs.pop('main_process_only', True)
+ in_order = kwargs.pop('in_order', False)
+ kwargs.setdefault('stacklevel', 2)
+ if self.isEnabledFor(level):
+ if self._should_log(main_process_only):
+ (msg, kwargs) = self.process(msg, kwargs)
+ self.logger.log(level, msg, *args, **kwargs)
+ elif in_order:
+ state = PartialState()
+ for i in range(state.num_processes):
+ if i == state.process_index:
+ (msg, kwargs) = self.process(msg, kwargs)
+ self.logger.log(level, msg, *args, **kwargs)
+ state.wait_for_everyone()
+
+ @functools.lru_cache(None)
+ def warning_once(self, *args, **kwargs):
+ self.warning(*args, **kwargs)
+
+def get_logger(name: str, log_level: str=None):
+ if log_level is None:
+ log_level = os.environ.get('ACCELERATE_LOG_LEVEL', None)
+ logger = logging.getLogger(name)
+ if log_level is not None:
+ logger.setLevel(log_level.upper())
+ logger.root.setLevel(log_level.upper())
+ return MultiProcessAdapter(logger, {})
+
+# File: accelerate-main/src/accelerate/optimizer.py
+import inspect
+import torch
+from .state import AcceleratorState, GradientState
+from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_available
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+def move_to_device(state, device):
+ if isinstance(state, (list, tuple)):
+ return honor_type(state, (move_to_device(t, device) for t in state))
+ elif isinstance(state, dict):
+ return type(state)({k: move_to_device(v, device) for (k, v) in state.items()})
+ elif isinstance(state, torch.Tensor):
+ return state.to(device)
+ return state
+
+class AcceleratedOptimizer(torch.optim.Optimizer):
+
+ def __init__(self, optimizer, device_placement=True, scaler=None):
+ self.optimizer = optimizer
+ self.scaler = scaler
+ self.accelerator_state = AcceleratorState()
+ self.gradient_state = GradientState()
+ self.device_placement = device_placement
+ self._is_overflow = False
+ if self.scaler is not None:
+ self._accelerate_step_called = False
+ self._optimizer_original_step_method = self.optimizer.step
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
+ if device_placement:
+ state_dict = self.optimizer.state_dict()
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
+ else:
+ state_dict = move_to_device(state_dict, self.accelerator_state.device)
+ self.optimizer.load_state_dict(state_dict)
+
+ @property
+ def state(self):
+ return self.optimizer.state
+
+ @state.setter
+ def state(self, state):
+ self.optimizer.state = state
+
+ @property
+ def param_groups(self):
+ return self.optimizer.param_groups
+
+ @param_groups.setter
+ def param_groups(self, param_groups):
+ self.optimizer.param_groups = param_groups
+
+ @property
+ def defaults(self):
+ return self.optimizer.defaults
+
+ @defaults.setter
+ def defaults(self, defaults):
+ self.optimizer.defaults = defaults
+
+ def add_param_group(self, param_group):
+ self.optimizer.add_param_group(param_group)
+
+ def load_state_dict(self, state_dict):
+ if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement:
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
+ self.optimizer.load_state_dict(state_dict)
+
+ def state_dict(self):
+ return self.optimizer.state_dict()
+
+ def zero_grad(self, set_to_none=None):
+ if self.gradient_state.sync_gradients:
+ accept_arg = 'set_to_none' in inspect.signature(self.optimizer.zero_grad).parameters
+ if accept_arg:
+ if set_to_none is None:
+ set_to_none = True
+ self.optimizer.zero_grad(set_to_none=set_to_none)
+ else:
+ if set_to_none is not None:
+ raise ValueError('`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.')
+ self.optimizer.zero_grad()
+
+ def train(self):
+ if hasattr(self.optimizer, 'train') and callable(self.optimizer.train):
+ self.optimizer.train()
+
+ def eval(self):
+ if hasattr(self.optimizer, 'eval') and callable(self.optimizer.eval):
+ self.optimizer.eval()
+
+ def step(self, closure=None):
+ if is_lomo_available():
+ from lomo_optim import AdaLomo, Lomo
+ if not self.gradient_state.is_xla_gradients_synced and self.accelerator_state.distributed_type == DistributedType.XLA:
+ gradients = xm._fetch_gradients(self.optimizer)
+ xm.all_reduce('sum', gradients, scale=1.0 / xm.xrt_world_size())
+ self.gradient_state.is_xla_gradients_synced = True
+ if is_lomo_available():
+ if isinstance(self.optimizer, (Lomo, AdaLomo)):
+ return
+ if self.gradient_state.sync_gradients:
+ if self.scaler is not None:
+ self.optimizer.step = self._optimizer_patched_step_method
+ self.scaler.step(self.optimizer, closure)
+ self.scaler.update()
+ if not self._accelerate_step_called:
+ self._is_overflow = True
+ else:
+ self._is_overflow = False
+ self.optimizer.step = self._optimizer_original_step_method
+ self._accelerate_step_called = False
+ else:
+ self.optimizer.step(closure)
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
+ self.gradient_state.is_xla_gradients_synced = False
+
+ def _switch_parameters(self, parameters_map):
+ for param_group in self.optimizer.param_groups:
+ param_group['params'] = [parameters_map.get(p, p) for p in param_group['params']]
+
+ @property
+ def step_was_skipped(self):
+ return self._is_overflow
+
+ def __getstate__(self):
+ _ignored_keys = ['_accelerate_step_called', '_optimizer_original_step_method', '_optimizer_patched_step_method']
+ return {k: v for (k, v) in self.__dict__.items() if k not in _ignored_keys}
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ if self.scaler is not None:
+ self._accelerate_step_called = False
+ self._optimizer_original_step_method = self.optimizer.step
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
+
+def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
+
+ def patched_step(*args, **kwargs):
+ accelerated_optimizer._accelerate_step_called = True
+ return method(*args, **kwargs)
+ return patched_step
+
+# File: accelerate-main/src/accelerate/scheduler.py
+import warnings
+from .state import AcceleratorState, GradientState
+warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
+
+class AcceleratedScheduler:
+
+ def __init__(self, scheduler, optimizers, step_with_optimizer: bool=True, split_batches: bool=False):
+ self.scheduler = scheduler
+ self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
+ self.split_batches = split_batches
+ self.step_with_optimizer = step_with_optimizer
+ self.gradient_state = GradientState()
+
+ def step(self, *args, **kwargs):
+ if not self.step_with_optimizer:
+ self.scheduler.step(*args, **kwargs)
+ return
+ if not self.gradient_state.sync_gradients:
+ if self.gradient_state.adjust_scheduler:
+ self.scheduler._step_count += 1
+ return
+ for opt in self.optimizers:
+ if opt.step_was_skipped:
+ return
+ if self.split_batches:
+ self.scheduler.step(*args, **kwargs)
+ else:
+ num_processes = AcceleratorState().num_processes
+ for _ in range(num_processes):
+ if hasattr(self.scheduler, 'total_steps'):
+ if self.scheduler._step_count <= self.scheduler.total_steps:
+ self.scheduler.step(*args, **kwargs)
+ else:
+ self.scheduler.step(*args, **kwargs)
+
+ def get_last_lr(self):
+ return self.scheduler.get_last_lr()
+
+ def state_dict(self):
+ return self.scheduler.state_dict()
+
+ def load_state_dict(self, state_dict):
+ self.scheduler.load_state_dict(state_dict)
+
+ def get_lr(self):
+ return self.scheduler.get_lr()
+
+ def print_lr(self, *args, **kwargs):
+ return self.scheduler.print_lr(*args, **kwargs)
+
+# File: accelerate-main/src/accelerate/state.py
+from __future__ import annotations
+import logging
+import os
+import threading
+import warnings
+from contextlib import contextmanager
+from functools import partial
+from typing import Any, Callable, Optional
+import torch
+from .utils import DistributedType, DynamoBackend, GradientAccumulationPlugin, check_cuda_p2p_ib_support, check_fp8_capability, deepspeed_required, get_ccl_version, get_cpu_distributed_information, get_int_from_env, is_ccl_available, is_datasets_available, is_deepspeed_available, is_fp8_available, is_ipex_available, is_mlu_available, is_mps_available, is_musa_available, is_npu_available, is_torch_xla_available, is_xpu_available, parse_choice_from_env, parse_flag_from_env, set_numa_affinity
+from .utils.dataclasses import SageMakerDistributedType
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+if is_mlu_available(check_device=False):
+ import torch_mlu
+if is_musa_available(check_device=False):
+ import torch_musa
+if is_npu_available(check_device=False):
+ import torch_npu
+logger = logging.getLogger(__name__)
+
+def is_initialized() -> bool:
+ return AcceleratorState._shared_state != {}
+
+def do_nothing(*args, **kwargs):
+ return None
+
+class ThreadLocalSharedDict(threading.local):
+
+ def __init__(self, thread_local: bool=False):
+ self._storage = {}
+
+ def __get__(self, obj, objtype=None):
+ return self._storage
+
+ def __set__(self, obj, value):
+ self._storage = value
+SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict
+
+class PartialState:
+ _shared_state = SharedDict()
+ _known_attrs = ['_cpu', '_mixed_precision', '_shared_state', 'backend', 'debug', 'device', 'distributed_type', 'fork_launched', 'local_process_index', 'num_processes', 'process_index']
+
+ def __init__(self, cpu: bool=False, **kwargs):
+ self.__dict__ = self._shared_state
+ if not self.initialized:
+ self._cpu = cpu
+ self.backend = None
+ env_device = os.environ.get('ACCELERATE_TORCH_DEVICE', None)
+ self.device = torch.device(env_device) if env_device is not None else None
+ self.debug = parse_flag_from_env('ACCELERATE_DEBUG_MODE')
+ use_sagemaker_dp = kwargs.pop('_use_sagemaker_dp', None)
+ dist_information = None
+ if use_sagemaker_dp is None:
+ use_sagemaker_dp = os.environ.get('ACCELERATE_USE_SAGEMAKER', 'false') == 'true' and os.environ.get('ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE') != SageMakerDistributedType.NO
+ original_backend = kwargs.pop('backend', None)
+ (backend, distributed_type) = self._prepare_backend(cpu, use_sagemaker_dp, original_backend)
+ if original_backend is not None and backend != original_backend:
+ raise ValueError(f'Your assigned backend {original_backend} is not avaliable, please use {backend}')
+ self.backend = backend
+ self.distributed_type = distributed_type
+ use_deepspeed = False
+ if not cpu and self.backend != 'xla':
+ if int(os.environ.get('LOCAL_RANK', -1)) != -1:
+ if os.environ.get('ACCELERATE_USE_DEEPSPEED', 'false') == 'true':
+ if not is_deepspeed_available():
+ raise ImportError('DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source')
+ from deepspeed import comm as dist
+ if not dist.is_initialized():
+ dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
+ use_deepspeed = True
+ elif self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU) and (not torch.distributed.is_initialized()):
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
+ if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU):
+ dist_information = get_cpu_distributed_information()
+ os.environ['RANK'] = str(dist_information.rank)
+ os.environ['WORLD_SIZE'] = str(dist_information.world_size)
+ os.environ['LOCAL_RANK'] = str(dist_information.local_rank)
+ os.environ['LOCAL_WORLD_SIZE'] = str(dist_information.local_world_size)
+ if not os.environ.get('MASTER_PORT', None):
+ os.environ['MASTER_PORT'] = '29500'
+ if not os.environ.get('MASTER_ADDR', None) and dist_information.local_world_size != dist_information.world_size and (self.backend != 'mpi'):
+ raise ValueError("Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, please try exporting rank 0's hostname as `MASTER_ADDR`")
+ kwargs['rank'] = dist_information.rank
+ kwargs['world_size'] = dist_information.world_size
+ if self.distributed_type == DistributedType.MULTI_CPU and get_int_from_env(['OMP_NUM_THREADS'], 0) == 0:
+ import psutil
+ num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / dist_information.local_world_size)
+ if num_cpu_threads_per_process == 0:
+ num_cpu_threads_per_process = 1
+ torch.set_num_threads(num_cpu_threads_per_process)
+ warnings.warn(f'OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob performance.')
+ if not torch.distributed.is_initialized():
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
+ if self.backend is None:
+ self.distributed_type = DistributedType.NO
+ self.num_processes = 1
+ self.process_index = 0
+ self.local_process_index = 0
+ elif self.backend == 'xla':
+ self.set_device()
+ xm.set_replication(self.device, xm.get_xla_supported_devices())
+ self.num_processes = xm.xrt_world_size()
+ self.process_index = xm.get_ordinal()
+ if is_torch_xla_available(check_is_tpu=True):
+ self.local_process_index = xm.get_local_ordinal()
+ else:
+ self.local_process_index = int(os.environ.get('LOCAL_RANK', -1))
+ else:
+ self.num_processes = torch.distributed.get_world_size()
+ self.process_index = torch.distributed.get_rank()
+ self.local_process_index = int(os.environ.get('LOCAL_RANK', -1)) if dist_information is None else dist_information.local_rank
+ self.set_device()
+ if use_deepspeed:
+ self.distributed_type = DistributedType.DEEPSPEED
+ if parse_flag_from_env('ACCELERATE_CPU_AFFINITY', False):
+ set_numa_affinity(self.local_process_index)
+ if self.device.type == 'cuda' and (not check_cuda_p2p_ib_support()):
+ if 'NCCL_P2P_DISABLE' not in os.environ or 'NCCL_IB_DISABLE' not in os.environ:
+ raise NotImplementedError('Using RTX 4000 series doesn\'t support faster communication broadband via P2P or IB. Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which will do this automatically.')
+ self.fork_launched = parse_flag_from_env('FORK_LAUNCHED', 0)
+
+ def __repr__(self) -> str:
+ return f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend if self.backend else '')}\nNum processes: {self.num_processes}\nProcess index: {self.process_index}\nLocal process index: {self.local_process_index}\nDevice: {self.device}\n"
+
+ @staticmethod
+ def _reset_state():
+ PartialState._shared_state.clear()
+
+ @property
+ def initialized(self) -> bool:
+ return self._shared_state != {}
+
+ @property
+ def use_distributed(self):
+ return self.distributed_type != DistributedType.NO and self.num_processes > 1
+
+ @property
+ def is_last_process(self) -> bool:
+ return self.process_index == self.num_processes - 1
+
+ @property
+ def is_main_process(self) -> bool:
+ return self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
+
+ @property
+ def is_local_main_process(self) -> bool:
+ return self.local_process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
+
+ def wait_for_everyone(self):
+ if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, DistributedType.DEEPSPEED, DistributedType.FSDP):
+ torch.distributed.barrier()
+ elif self.distributed_type == DistributedType.XLA:
+ xm.rendezvous('accelerate.utils.wait_for_everyone')
+
+ def _goes_first(self, is_main: bool):
+ if not is_main:
+ self.wait_for_everyone()
+ yield
+ if is_main:
+ self.wait_for_everyone()
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool=False):
+ if self.num_processes == 1:
+ yield inputs
+ return
+ length = len(inputs)
+ if isinstance(inputs, dict):
+ length = len(inputs[list(inputs.keys())[0]])
+ if not all((len(v) == length for v in inputs.values())):
+ raise ValueError('All values in the dictionary must have the same length')
+ (num_samples_per_process, num_extras) = divmod(length, self.num_processes)
+ start_index = self.process_index * num_samples_per_process + min(self.process_index, num_extras)
+ end_index = start_index + num_samples_per_process + (1 if self.process_index < num_extras else 0)
+
+ def _split_values(inputs, start_index, end_index):
+ if isinstance(inputs, (list, tuple, torch.Tensor)):
+ if start_index >= len(inputs):
+ result = inputs[-1:]
+ else:
+ result = inputs[start_index:end_index]
+ if apply_padding:
+ if isinstance(result, torch.Tensor):
+ from accelerate.utils import pad_across_processes, send_to_device
+ tensorized_result = send_to_device(result, self.device)
+ result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
+ else:
+ result += [result[-1]] * (num_samples_per_process + 1 - len(result))
+ return result
+ elif isinstance(inputs, dict):
+ for key in inputs.keys():
+ inputs[key] = _split_values(inputs[key], start_index, end_index)
+ return inputs
+ else:
+ if is_datasets_available():
+ from datasets import Dataset
+ if isinstance(inputs, Dataset):
+ if start_index >= len(inputs):
+ start_index = len(inputs) - 1
+ if end_index > len(inputs):
+ end_index = len(inputs)
+ result_idcs = list(range(start_index, end_index))
+ if apply_padding:
+ result_idcs += [end_index - 1] * (num_samples_per_process + 1 - len(result_idcs))
+ return inputs.select(result_idcs)
+ return inputs
+ yield _split_values(inputs, start_index, end_index)
+
+ @contextmanager
+ def main_process_first(self):
+ yield from self._goes_first(self.is_main_process)
+
+ @contextmanager
+ def local_main_process_first(self):
+ yield from self._goes_first(self.is_local_main_process)
+
+ def on_main_process(self, function: Callable[..., Any]=None):
+ if not self.initialized:
+ raise ValueError('The `PartialState` or `Accelerator` must be initialized before calling this function.')
+ if self.is_main_process or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_local_main_process(self, function: Callable[..., Any]=None):
+ if self.is_local_main_process or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_last_process(self, function: Callable[..., Any]):
+ if self.is_last_process or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_process(self, function: Callable[..., Any]=None, process_index: int=None):
+ if function is None:
+ return partial(self.on_process, process_index=process_index)
+ if self.process_index == process_index or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def on_local_process(self, function: Callable[..., Any]=None, local_process_index: int=None):
+ if function is None:
+ return partial(self.on_local_process, local_process_index=local_process_index)
+ if self.local_process_index == local_process_index or not self.use_distributed:
+ return function
+ return do_nothing
+
+ def print(self, *args, **kwargs):
+ if self.is_local_main_process:
+ print(*args, **kwargs)
+
+ @property
+ def default_device(self) -> torch.device:
+ if is_mps_available():
+ os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
+ return torch.device('mps')
+ elif is_mlu_available():
+ return torch.device('mlu')
+ elif is_musa_available():
+ return torch.device('musa')
+ elif is_npu_available():
+ return torch.device('npu')
+ elif torch.cuda.is_available():
+ return torch.device('cuda')
+ elif is_xpu_available():
+ return torch.device('xpu:0')
+ else:
+ return torch.device('cpu')
+
+ def _prepare_backend(self, cpu: bool=False, sagemaker_dp=False, backend: str=None) -> tuple[str, DistributedType]:
+ distributed_type = None
+ if sagemaker_dp:
+ import smdistributed.dataparallel.torch.torch_smddp
+ backend = 'smddp'
+ distributed_type = DistributedType.MULTI_GPU
+ elif is_torch_xla_available():
+ backend = 'xla'
+ distributed_type = DistributedType.XLA
+ elif int(os.environ.get('LOCAL_RANK', -1)) != -1 and (not cpu):
+ if is_mlu_available():
+ backend = 'cncl'
+ distributed_type = DistributedType.MULTI_MLU
+ elif is_musa_available():
+ backend = 'mccl'
+ distributed_type = DistributedType.MULTI_MUSA
+ elif is_npu_available():
+ backend = 'hccl'
+ distributed_type = DistributedType.MULTI_NPU
+ elif torch.cuda.is_available():
+ if backend is None:
+ backend = 'nccl'
+ distributed_type = DistributedType.MULTI_GPU
+ if distributed_type is None and (int(os.environ.get('LOCAL_RANK', -1)) != -1 or get_int_from_env(['PMI_SIZE', 'OMPI_COMM_WORLD_SIZE', 'MV2_COMM_WORLD_SIZE', 'WORLD_SIZE'], 1) > 1):
+ if not cpu and is_xpu_available():
+ distributed_type = DistributedType.MULTI_XPU
+ else:
+ distributed_type = DistributedType.MULTI_CPU
+ if backend in (None, 'ccl') and is_ccl_available() and (get_int_from_env(['CCL_WORKER_COUNT'], 0) > 0 or distributed_type == DistributedType.MULTI_XPU):
+ if get_ccl_version() >= '1.12':
+ import oneccl_bindings_for_pytorch
+ else:
+ import torch_ccl
+ backend = 'ccl'
+ elif backend in (None, 'mpi') and torch.distributed.is_mpi_available():
+ backend = 'mpi'
+ else:
+ backend = 'gloo'
+ if distributed_type is None:
+ distributed_type = DistributedType.NO
+ return (backend, distributed_type)
+
+ def set_device(self):
+ if self.device is not None:
+ return
+ if self.distributed_type == DistributedType.NO:
+ self.device = torch.device('cpu') if self._cpu else self.default_device
+ return
+ device = str(self.distributed_type).split('.')[-1].replace('MULTI_', '').lower()
+ if device not in ('cpu', 'gpu', 'mlu', 'musa', 'npu', 'xpu', 'xla'):
+ raise ValueError(f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!")
+ if device == 'xla':
+ self.device = xm.xla_device()
+ else:
+ if device == 'gpu':
+ device = 'cuda'
+ device_module = getattr(torch, device)
+ device_index = self.local_process_index % device_module.device_count()
+ self.device = torch.device(device, device_index)
+ device_module.set_device(self.device)
+
+ def destroy_process_group(self, group=None):
+ if self.fork_launched and group is None:
+ return
+ if torch.distributed.is_initialized():
+ torch.distributed.destroy_process_group(group)
+
+ def __getattr__(self, name: str):
+ if name in self._known_attrs:
+ raise AttributeError(f'`PartialState` object has no attribute `{name}`. This happens if `PartialState._reset_state()` was called and an `Accelerator` or `PartialState` was not reinitialized.')
+ raise AttributeError(f"'PartialState' object has no attribute '{name}'")
+
+class AcceleratorState:
+ _shared_state = SharedDict()
+ _known_attrs = PartialState._known_attrs + ['deepspeed_plugin', 'use_ipex', 'fsdp_plugin', 'megatron_lm_plugin', 'dynamo_plugin']
+
+ def __init__(self, mixed_precision: str=None, cpu: bool=False, dynamo_plugin=None, deepspeed_plugin=None, fsdp_plugin=None, megatron_lm_plugin=None, _from_accelerator: bool=False, **kwargs):
+ self.__dict__ = self._shared_state
+ if parse_flag_from_env('ACCELERATE_USE_CPU'):
+ cpu = True
+ if PartialState._shared_state == {}:
+ PartialState(cpu, **kwargs)
+ self.__dict__.update(PartialState._shared_state)
+ self._check_initialized(mixed_precision, cpu)
+ if not self.initialized:
+ self.deepspeed_plugins = None
+ self.use_ipex = None
+ mixed_precision = parse_choice_from_env('ACCELERATE_MIXED_PRECISION', 'no') if mixed_precision is None else mixed_precision.lower()
+ if mixed_precision == 'fp8':
+ if not is_fp8_available():
+ raise ValueError('Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed.')
+ elif not check_fp8_capability():
+ logger.warning(f'The current device has compute capability of {torch.cuda.get_device_capability()} which is insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace or higher, compute capability of 8.9 or higher). Will use FP16 instead.')
+ mixed_precision = 'fp16'
+ self.dynamo_plugin = dynamo_plugin
+ if not _from_accelerator:
+ raise ValueError('Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` before using any functionality from the `accelerate` library.')
+ self._mixed_precision = 'no' if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
+ if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True):
+ if mixed_precision == 'bf16':
+ if os.environ.get('ACCELERATE_DOWNCAST_BF16'):
+ os.environ['XLA_USE_BF16'] = str(0)
+ os.environ['XLA_DOWNCAST_BF16'] = str(1)
+ self.downcast_bfloat = True
+ else:
+ os.environ['XLA_USE_BF16'] = str(1)
+ os.environ['XLA_DOWNCAST_BF16'] = str(0)
+ self.downcast_bfloat = False
+ elif os.environ.get('ACCELERATE_USE_DEEPSPEED', 'false') == 'true' and (not cpu):
+ self.deepspeed_plugins = deepspeed_plugin
+ self.distributed_type = DistributedType.DEEPSPEED
+ elif self.distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
+ if os.environ.get('ACCELERATE_USE_FSDP', 'false') == 'true' or fsdp_plugin is not None:
+ self.distributed_type = DistributedType.FSDP
+ if self._mixed_precision != 'no':
+ fsdp_plugin.set_mixed_precision(self._mixed_precision)
+ self.fsdp_plugin = fsdp_plugin
+ if os.environ.get('ACCELERATE_USE_MEGATRON_LM', 'false') == 'true' and self.distributed_type not in [DistributedType.MULTI_XPU]:
+ self.distributed_type = DistributedType.MEGATRON_LM
+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
+ self.megatron_lm_plugin = megatron_lm_plugin
+ elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
+ if is_ipex_available():
+ self.use_ipex = parse_flag_from_env('ACCELERATE_USE_IPEX', default=True)
+ else:
+ self.use_ipex = False
+ if self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == 'no' and (self.device.type == 'cuda'):
+ torch.backends.cuda.matmul.allow_tf32 = True
+ if self.dynamo_plugin.backend != DynamoBackend.NO and self._mixed_precision == 'no' and (self.device.type == 'musa'):
+ torch.backends.musa.matmul.allow_tf32 = True
+ PartialState._shared_state['distributed_type'] = self.distributed_type
+
+ @property
+ def initialized(self) -> bool:
+ return self._shared_state != PartialState._shared_state
+
+ def __repr__(self):
+ repr = PartialState().__repr__() + f'\nMixed precision type: {self.mixed_precision}\n'
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ repr += f'ds_config: {self.deepspeed_plugin.deepspeed_config}\n'
+ return repr
+
+ def _check_initialized(self, mixed_precision=None, cpu=None):
+ if self.initialized:
+ err = 'AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`.'
+ if cpu and self.device.type != 'cpu':
+ raise ValueError(err.format(flag='cpu=True'))
+ if mixed_precision is not None and mixed_precision != self._mixed_precision and (self.distributed_type != DistributedType.DEEPSPEED):
+ raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
+
+ @property
+ def mixed_precision(self):
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ config = self.deepspeed_plugin.deepspeed_config
+ if config.get('fp16', {}).get('enabled', False):
+ mixed_precision = 'fp16'
+ elif config.get('bf16', {}).get('enabled', False):
+ mixed_precision = 'bf16'
+ else:
+ mixed_precision = 'no'
+ else:
+ mixed_precision = self._mixed_precision
+ return mixed_precision
+
+ @staticmethod
+ def _reset_state(reset_partial_state: bool=False):
+ AcceleratorState._shared_state.clear()
+ if reset_partial_state:
+ PartialState._reset_state()
+
+ def destroy_process_group(self, group=None):
+ PartialState().destroy_process_group(group)
+
+ @property
+ def fork_launched(self):
+ return PartialState().fork_launched
+
+ @property
+ def use_distributed(self):
+ return PartialState().use_distributed
+
+ @property
+ def is_last_process(self) -> bool:
+ return PartialState().is_last_process
+
+ @property
+ def is_main_process(self) -> bool:
+ return PartialState().is_main_process
+
+ @property
+ def is_local_main_process(self) -> bool:
+ return PartialState().is_local_main_process
+
+ def wait_for_everyone(self):
+ PartialState().wait_for_everyone()
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool=False):
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
+ yield inputs
+
+ @contextmanager
+ def main_process_first(self):
+ with PartialState().main_process_first():
+ yield
+
+ @contextmanager
+ def local_main_process_first(self):
+ with PartialState().local_main_process_first():
+ yield
+
+ @property
+ def deepspeed_plugin(self):
+ if self.distributed_type != DistributedType.DEEPSPEED:
+ return None
+ from accelerate.utils.deepspeed import get_active_deepspeed_plugin
+ return get_active_deepspeed_plugin(self)
+
+ @deepspeed_required
+ def get_deepspeed_plugin(self, name: str):
+ return self.deepspeed_plugins[name]
+
+ @deepspeed_required
+ def select_deepspeed_plugin(self, name: str=None):
+ for (key, plugin) in self.deepspeed_plugins.items():
+ if key != name:
+ plugin._unselect()
+ self.deepspeed_plugins[name].select(_from_accelerator_state=True)
+
+ def print(self, *args, **kwargs):
+ PartialState().print(*args, **kwargs)
+
+ def __getattr__(self, name: str):
+ if name in self._known_attrs:
+ raise AttributeError(f'`AcceleratorState` object has no attribute `{name}`. This happens if `AcceleratorState._reset_state()` was called and an `Accelerator` or `PartialState` was not reinitialized.')
+ raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'")
+
+class GradientState:
+ _shared_state = SharedDict()
+
+ def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin]=None):
+ self.__dict__ = self._shared_state
+ if not self.initialized:
+ self.sync_gradients = True
+ self.active_dataloader = None
+ self.dataloader_references = [None]
+ self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
+ self._is_xla_gradients_synced = False
+ if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
+ self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
+
+ @property
+ def num_steps(self) -> int:
+ return self.plugin_kwargs.get('num_steps', 1)
+
+ @property
+ def adjust_scheduler(self) -> bool:
+ return self.plugin_kwargs.get('adjust_scheduler', False)
+
+ @property
+ def sync_with_dataloader(self) -> bool:
+ return self.plugin_kwargs.get('sync_with_dataloader', True)
+
+ @property
+ def initialized(self) -> bool:
+ return GradientState._shared_state != {}
+
+ @property
+ def end_of_dataloader(self) -> bool:
+ if not self.in_dataloader:
+ return False
+ return self.active_dataloader.end_of_dataloader
+
+ @property
+ def remainder(self) -> int:
+ if not self.in_dataloader:
+ return -1
+ return self.active_dataloader.remainder
+
+ def __repr__(self):
+ return f'Sync Gradients: {self.sync_gradients}\nAt end of current dataloader: {self.end_of_dataloader}\nExtra samples added: {self.remainder}\nGradient accumulation plugin: {self.plugin_kwargs}\n'
+
+ @property
+ def is_xla_gradients_synced(self):
+ if parse_flag_from_env('ACCELERATE_USE_FSDP', default=False):
+ return True
+ return self._is_xla_gradients_synced
+
+ @is_xla_gradients_synced.setter
+ def is_xla_gradients_synced(self, is_synced):
+ self._is_xla_gradients_synced = is_synced
+
+ def _set_sync_gradients(self, sync_gradients):
+ self.sync_gradients = sync_gradients
+ if self.sync_gradients and is_torch_xla_available(check_is_tpu=True) and (PartialState().distributed_type == DistributedType.XLA):
+ xm.mark_step()
+
+ def _add_dataloader(self, dataloader):
+ self.active_dataloader = dataloader
+ self.dataloader_references.append(self.active_dataloader)
+
+ def _remove_dataloader(self, dataloader):
+ self.dataloader_references.remove(dataloader)
+ self.active_dataloader = self.dataloader_references[-1]
+
+ @property
+ def in_dataloader(self) -> bool:
+ return self.active_dataloader is not None
+
+ @staticmethod
+ def _reset_state():
+ GradientState._shared_state.clear()
+
+# File: accelerate-main/src/accelerate/tracking.py
+import json
+import os
+import time
+from functools import wraps
+from typing import Any, Dict, List, Optional, Union
+import yaml
+from .logging import get_logger
+from .state import PartialState
+from .utils import LoggerType, is_aim_available, is_clearml_available, is_comet_ml_available, is_dvclive_available, is_mlflow_available, is_tensorboard_available, is_wandb_available, listify
+_available_trackers = []
+if is_tensorboard_available():
+ _available_trackers.append(LoggerType.TENSORBOARD)
+if is_wandb_available():
+ _available_trackers.append(LoggerType.WANDB)
+if is_comet_ml_available():
+ _available_trackers.append(LoggerType.COMETML)
+if is_aim_available():
+ _available_trackers.append(LoggerType.AIM)
+if is_mlflow_available():
+ _available_trackers.append(LoggerType.MLFLOW)
+if is_clearml_available():
+ _available_trackers.append(LoggerType.CLEARML)
+if is_dvclive_available():
+ _available_trackers.append(LoggerType.DVCLIVE)
+logger = get_logger(__name__)
+
+def on_main_process(function):
+
+ @wraps(function)
+ def execute_on_main_process(self, *args, **kwargs):
+ if getattr(self, 'main_process_only', False):
+ return PartialState().on_main_process(function)(self, *args, **kwargs)
+ else:
+ return function(self, *args, **kwargs)
+ return execute_on_main_process
+
+def get_available_trackers():
+ return _available_trackers
+
+class GeneralTracker:
+ main_process_only = True
+
+ def __init__(self, _blank=False):
+ if not _blank:
+ err = ''
+ if not hasattr(self, 'name'):
+ err += '`name`'
+ if not hasattr(self, 'requires_logging_directory'):
+ if len(err) > 0:
+ err += ', '
+ err += '`requires_logging_directory`'
+ if 'tracker' not in dir(self):
+ if len(err) > 0:
+ err += ', '
+ err += '`tracker`'
+ if len(err) > 0:
+ raise NotImplementedError(f'The implementation for this tracker class is missing the following required attributes. Please define them in the class definition: {err}')
+
+ def store_init_configuration(self, values: dict):
+ pass
+
+ def log(self, values: dict, step: Optional[int], **kwargs):
+ pass
+
+ def finish(self):
+ pass
+
+class TensorBoardTracker(GeneralTracker):
+ name = 'tensorboard'
+ requires_logging_directory = True
+
+ @on_main_process
+ def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
+ try:
+ from torch.utils import tensorboard
+ except ModuleNotFoundError:
+ import tensorboardX as tensorboard
+ super().__init__()
+ self.run_name = run_name
+ self.logging_dir = os.path.join(logging_dir, run_name)
+ self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)
+ logger.debug(f'Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}')
+ logger.debug('Make sure to log any initial configurations with `self.store_init_configuration` before training!')
+
+ @property
+ def tracker(self):
+ return self.writer
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ self.writer.add_hparams(values, metric_dict={})
+ self.writer.flush()
+ project_run_name = time.time()
+ dir_name = os.path.join(self.logging_dir, str(project_run_name))
+ os.makedirs(dir_name, exist_ok=True)
+ with open(os.path.join(dir_name, 'hparams.yml'), 'w') as outfile:
+ try:
+ yaml.dump(values, outfile)
+ except yaml.representer.RepresenterError:
+ logger.error('Serialization to store hyperparameters failed')
+ raise
+ logger.debug('Stored initial configuration hyperparameters to TensorBoard and hparams yaml file')
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int]=None, **kwargs):
+ values = listify(values)
+ for (k, v) in values.items():
+ if isinstance(v, (int, float)):
+ self.writer.add_scalar(k, v, global_step=step, **kwargs)
+ elif isinstance(v, str):
+ self.writer.add_text(k, v, global_step=step, **kwargs)
+ elif isinstance(v, dict):
+ self.writer.add_scalars(k, v, global_step=step, **kwargs)
+ self.writer.flush()
+ logger.debug('Successfully logged to TensorBoard')
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int], **kwargs):
+ for (k, v) in values.items():
+ self.writer.add_images(k, v, global_step=step, **kwargs)
+ logger.debug('Successfully logged images to TensorBoard')
+
+ @on_main_process
+ def finish(self):
+ self.writer.close()
+ logger.debug('TensorBoard writer closed')
+
+class WandBTracker(GeneralTracker):
+ name = 'wandb'
+ requires_logging_directory = False
+ main_process_only = False
+
+ @on_main_process
+ def __init__(self, run_name: str, **kwargs):
+ super().__init__()
+ self.run_name = run_name
+ import wandb
+ self.run = wandb.init(project=self.run_name, **kwargs)
+ logger.debug(f'Initialized WandB project {self.run_name}')
+ logger.debug('Make sure to log any initial configurations with `self.store_init_configuration` before training!')
+
+ @property
+ def tracker(self):
+ return self.run
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ import wandb
+ wandb.config.update(values, allow_val_change=True)
+ logger.debug('Stored initial configuration hyperparameters to WandB')
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int]=None, **kwargs):
+ self.run.log(values, step=step, **kwargs)
+ logger.debug('Successfully logged to WandB')
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int]=None, **kwargs):
+ import wandb
+ for (k, v) in values.items():
+ self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
+ logger.debug('Successfully logged images to WandB')
+
+ @on_main_process
+ def log_table(self, table_name: str, columns: List[str]=None, data: List[List[Any]]=None, dataframe: Any=None, step: Optional[int]=None, **kwargs):
+ import wandb
+ values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
+ self.log(values, step=step, **kwargs)
+
+ @on_main_process
+ def finish(self):
+ self.run.finish()
+ logger.debug('WandB run closed')
+
+class CometMLTracker(GeneralTracker):
+ name = 'comet_ml'
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, run_name: str, **kwargs):
+ super().__init__()
+ self.run_name = run_name
+ from comet_ml import Experiment
+ self.writer = Experiment(project_name=run_name, **kwargs)
+ logger.debug(f'Initialized CometML project {self.run_name}')
+ logger.debug('Make sure to log any initial configurations with `self.store_init_configuration` before training!')
+
+ @property
+ def tracker(self):
+ return self.writer
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ self.writer.log_parameters(values)
+ logger.debug('Stored initial configuration hyperparameters to CometML')
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int]=None, **kwargs):
+ if step is not None:
+ self.writer.set_step(step)
+ for (k, v) in values.items():
+ if isinstance(v, (int, float)):
+ self.writer.log_metric(k, v, step=step, **kwargs)
+ elif isinstance(v, str):
+ self.writer.log_other(k, v, **kwargs)
+ elif isinstance(v, dict):
+ self.writer.log_metrics(v, step=step, **kwargs)
+ logger.debug('Successfully logged to CometML')
+
+ @on_main_process
+ def finish(self):
+ self.writer.end()
+ logger.debug('CometML run closed')
+
+class AimTracker(GeneralTracker):
+ name = 'aim'
+ requires_logging_directory = True
+
+ @on_main_process
+ def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]]='.', **kwargs):
+ self.run_name = run_name
+ from aim import Run
+ self.writer = Run(repo=logging_dir, **kwargs)
+ self.writer.name = self.run_name
+ logger.debug(f'Initialized Aim project {self.run_name}')
+ logger.debug('Make sure to log any initial configurations with `self.store_init_configuration` before training!')
+
+ @property
+ def tracker(self):
+ return self.writer
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ self.writer['hparams'] = values
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int], **kwargs):
+ for (key, value) in values.items():
+ self.writer.track(value, name=key, step=step, **kwargs)
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int]=None, kwargs: Optional[Dict[str, dict]]=None):
+ import aim
+ aim_image_kw = {}
+ track_kw = {}
+ if kwargs is not None:
+ aim_image_kw = kwargs.get('aim_image', {})
+ track_kw = kwargs.get('track', {})
+ for (key, value) in values.items():
+ if isinstance(value, tuple):
+ (img, caption) = value
+ else:
+ (img, caption) = (value, '')
+ aim_image = aim.Image(img, caption=caption, **aim_image_kw)
+ self.writer.track(aim_image, name=key, step=step, **track_kw)
+
+ @on_main_process
+ def finish(self):
+ self.writer.close()
+
+class MLflowTracker(GeneralTracker):
+ name = 'mlflow'
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, experiment_name: str=None, logging_dir: Optional[Union[str, os.PathLike]]=None, run_id: Optional[str]=None, tags: Optional[Union[Dict[str, Any], str]]=None, nested_run: Optional[bool]=False, run_name: Optional[str]=None, description: Optional[str]=None):
+ experiment_name = os.environ.get('MLFLOW_EXPERIMENT_NAME', experiment_name)
+ run_id = os.environ.get('MLFLOW_RUN_ID', run_id)
+ tags = os.environ.get('MLFLOW_TAGS', tags)
+ if isinstance(tags, str):
+ tags = json.loads(tags)
+ nested_run = os.environ.get('MLFLOW_NESTED_RUN', nested_run)
+ import mlflow
+ exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
+ if len(exps) > 0:
+ if len(exps) > 1:
+ logger.warning('Multiple experiments with the same name found. Using first one.')
+ experiment_id = exps[0].experiment_id
+ else:
+ experiment_id = mlflow.create_experiment(name=experiment_name, artifact_location=logging_dir, tags=tags)
+ self.active_run = mlflow.start_run(run_id=run_id, experiment_id=experiment_id, run_name=run_name, nested=nested_run, tags=tags, description=description)
+ logger.debug(f'Initialized mlflow experiment {experiment_name}')
+ logger.debug('Make sure to log any initial configurations with `self.store_init_configuration` before training!')
+
+ @property
+ def tracker(self):
+ return self.active_run
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ import mlflow
+ for (name, value) in list(values.items()):
+ if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
+ logger.warning_once(f'''Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow's log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute.''')
+ del values[name]
+ values_list = list(values.items())
+ for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):
+ mlflow.log_params(dict(values_list[i:i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
+ logger.debug('Stored initial configuration hyperparameters to MLflow')
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int]):
+ metrics = {}
+ for (k, v) in values.items():
+ if isinstance(v, (int, float)):
+ metrics[k] = v
+ else:
+ logger.warning_once(f'''MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. MLflow's log_metric() only accepts float and int types so we dropped this attribute.''')
+ import mlflow
+ mlflow.log_metrics(metrics, step=step)
+ logger.debug('Successfully logged to mlflow')
+
+ @on_main_process
+ def finish(self):
+ import mlflow
+ mlflow.end_run()
+
+class ClearMLTracker(GeneralTracker):
+ name = 'clearml'
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, run_name: str=None, **kwargs):
+ from clearml import Task
+ current_task = Task.current_task()
+ self._initialized_externally = False
+ if current_task:
+ self._initialized_externally = True
+ self.task = current_task
+ return
+ kwargs.setdefault('project_name', os.environ.get('CLEARML_PROJECT', run_name))
+ kwargs.setdefault('task_name', os.environ.get('CLEARML_TASK', run_name))
+ self.task = Task.init(**kwargs)
+
+ @property
+ def tracker(self):
+ return self.task
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ return self.task.connect_configuration(values)
+
+ @on_main_process
+ def log(self, values: Dict[str, Union[int, float]], step: Optional[int]=None, **kwargs):
+ clearml_logger = self.task.get_logger()
+ for (k, v) in values.items():
+ if not isinstance(v, (int, float)):
+ logger.warning_once(f'''Accelerator is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a scalar. This invocation of ClearML logger's report_scalar() is incorrect so we dropped this attribute.''')
+ continue
+ if step is None:
+ clearml_logger.report_single_value(name=k, value=v, **kwargs)
+ continue
+ (title, series) = ClearMLTracker._get_title_series(k)
+ clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
+
+ @on_main_process
+ def log_images(self, values: dict, step: Optional[int]=None, **kwargs):
+ clearml_logger = self.task.get_logger()
+ for (k, v) in values.items():
+ (title, series) = ClearMLTracker._get_title_series(k)
+ clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
+
+ @on_main_process
+ def log_table(self, table_name: str, columns: List[str]=None, data: List[List[Any]]=None, dataframe: Any=None, step: Optional[int]=None, **kwargs):
+ to_report = dataframe
+ if dataframe is None:
+ if data is None:
+ raise ValueError('`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`')
+ to_report = [columns] + data if columns else data
+ (title, series) = ClearMLTracker._get_title_series(table_name)
+ self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
+
+ @on_main_process
+ def finish(self):
+ if self.task and (not self._initialized_externally):
+ self.task.close()
+
+ @staticmethod
+ def _get_title_series(name):
+ for prefix in ['eval', 'test', 'train']:
+ if name.startswith(prefix + '_'):
+ return (name[len(prefix) + 1:], prefix)
+ return (name, 'train')
+
+class DVCLiveTracker(GeneralTracker):
+ name = 'dvclive'
+ requires_logging_directory = False
+
+ @on_main_process
+ def __init__(self, run_name: Optional[str]=None, live: Optional[Any]=None, **kwargs):
+ from dvclive import Live
+ super().__init__()
+ self.live = live if live is not None else Live(**kwargs)
+
+ @property
+ def tracker(self):
+ return self.live
+
+ @on_main_process
+ def store_init_configuration(self, values: dict):
+ self.live.log_params(values)
+
+ @on_main_process
+ def log(self, values: dict, step: Optional[int]=None, **kwargs):
+ from dvclive.plots import Metric
+ if step is not None:
+ self.live.step = step
+ for (k, v) in values.items():
+ if Metric.could_log(v):
+ self.live.log_metric(k, v, **kwargs)
+ else:
+ logger.warning_once(f'''Accelerator attempted to log a value of "{v}" of type {type(v)} for key "{k}" as a scalar. This invocation of DVCLive's Live.log_metric() is incorrect so we dropped this attribute.''')
+ self.live.next_step()
+
+ @on_main_process
+ def finish(self):
+ self.live.end()
+LOGGER_TYPE_TO_CLASS = {'aim': AimTracker, 'comet_ml': CometMLTracker, 'mlflow': MLflowTracker, 'tensorboard': TensorBoardTracker, 'wandb': WandBTracker, 'clearml': ClearMLTracker, 'dvclive': DVCLiveTracker}
+
+def filter_trackers(log_with: List[Union[str, LoggerType, GeneralTracker]], logging_dir: Union[str, os.PathLike]=None):
+ loggers = []
+ if log_with is not None:
+ if not isinstance(log_with, (list, tuple)):
+ log_with = [log_with]
+ if 'all' in log_with or LoggerType.ALL in log_with:
+ loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()
+ else:
+ for log_type in log_with:
+ if log_type not in LoggerType and (not issubclass(type(log_type), GeneralTracker)):
+ raise ValueError(f'Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}')
+ if issubclass(type(log_type), GeneralTracker):
+ loggers.append(log_type)
+ else:
+ log_type = LoggerType(log_type)
+ if log_type not in loggers:
+ if log_type in get_available_trackers():
+ tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]
+ if tracker_init.requires_logging_directory:
+ if logging_dir is None:
+ raise ValueError(f'Logging with `{log_type}` requires a `logging_dir` to be passed in.')
+ loggers.append(log_type)
+ else:
+ logger.debug(f'Tried adding logger {log_type}, but package is unavailable in the system.')
+ return loggers
+