fucking crazy zshrc
Browse files- .zshrc +352 -65
- password +14 -2
- steal_sdscripts_metadata +103 -0
.zshrc
CHANGED
@@ -1,9 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
source $HOME/toolkit/git-wrapper.zsh
|
2 |
|
|
|
3 |
export ZSH="$HOME/.oh-my-zsh"
|
4 |
|
|
|
5 |
plugins=(git autojump conda-env)
|
6 |
|
|
|
7 |
ZSH_THEME="kade"
|
8 |
# CASE_SENSITIVE="true"
|
9 |
# HYPHEN_INSENSITIVE="true"
|
@@ -14,38 +28,89 @@ ZSH_THEME="kade"
|
|
14 |
# COMPLETION_WAITING_DOTS="true"
|
15 |
# DISABLE_UNTRACKED_FILES_DIRTY="true"
|
16 |
|
|
|
17 |
export LANG=ja_JP.UTF-8
|
18 |
export LC_ALL=ja_JP.UTF-8
|
|
|
19 |
export HISTSIZE=500000
|
20 |
-
|
21 |
export COMFYUI_PATH="$HOME/ComfyUI"
|
|
|
22 |
export BNB_CUDA_VERSION=126
|
23 |
-
|
24 |
export RUST_BACKTRACE=1
|
25 |
-
|
26 |
export DOTNET_CLI_TELEMETRY_OPTOUT=1
|
|
|
27 |
export CLICOLOR=126
|
28 |
|
29 |
# ⚠️ TODO: This needs to be benched but I'm too bad at this!
|
|
|
|
|
|
|
|
|
30 |
export NUMEXPR_MAX_THREADS=24
|
|
|
|
|
|
|
31 |
export VECLIB_MAXIMUM_THREADS=24
|
|
|
|
|
|
|
32 |
export MKL_NUM_THREADS=24
|
|
|
|
|
|
|
33 |
export OMP_NUM_THREADS=24
|
34 |
|
|
|
|
|
|
|
|
|
|
|
35 |
export TOKENIZERS_PARALLELISM=false
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
source $ZSH/oh-my-zsh.sh
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
export PATH=$PATH:$HOME/source/repos/dataset-tools/target/x86_64-unknown-linux-gnu/release:$HOME/.cargo/bin:$HOME/miniconda3/bin:$HOME/toolkit:$HOME/db/redis-stable/src:$HOME/db/postgresql/bin:$HOME/.local/bin:/opt/cuda/bin
|
|
|
|
|
|
|
|
|
|
|
40 |
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib:/opt/cuda/targets/x86_64-linux/lib
|
|
|
|
|
|
|
41 |
export COMFYUI_MODEL_PATH=/home/kade/ComfyUI/models
|
42 |
|
|
|
43 |
alias upx='/home/kade/.local/bin/upx'
|
|
|
44 |
alias ls='ls --color=always'
|
|
|
45 |
alias ll="ls -lah --color=always"
|
|
|
46 |
alias cp='cp --reflink=auto'
|
|
|
47 |
alias t="tensorboard --logdir=$HOME/output_dir/logs"
|
|
|
48 |
alias rt="vim ~/.tmux.conf && echo \"Reloading tmux config\" && tmux source ~/.tmux.conf"
|
|
|
49 |
alias zr="vim ~/.zshrc && echo \"Reloading zsh config\" && source ~/.zshrc"
|
50 |
|
51 |
# The kanji 接 (せつ) [setsu] means "touch," "contact," "adjoin," or "piece together."
|
@@ -56,28 +121,45 @@ alias zr="vim ~/.zshrc && echo \"Reloading zsh config\" && source ~/.zshrc"
|
|
56 |
# 2. Type "setsu" (せつ) in hiragana.
|
57 |
# 3. Press the spacebar to convert it to the kanji 接.
|
58 |
alias 接="tmux attach"
|
|
|
|
|
59 |
alias ta="tmux attach"
|
60 |
|
|
|
61 |
alias ga="git add . && git commit -avs && git push"
|
|
|
62 |
alias gs="git status"
|
|
|
63 |
alias wd="git diff --word-diff-regex='[^,]+' --patience"
|
|
|
64 |
alias vim="nvim"
|
|
|
65 |
alias vi="nvim"
|
|
|
66 |
alias v="nvim"
|
|
|
67 |
alias grh='git reset --hard'
|
|
|
68 |
alias gcs='git clone --recurse-submodules'
|
|
|
69 |
alias grabber="Grabber-cli"
|
70 |
|
71 |
# 'pie' is a shortcut for installing a Python package in editable mode
|
72 |
# using the pip command with the --use-pep517 option.
|
73 |
alias pie='pip install -e . --use-pep517'
|
|
|
74 |
alias gc="git commit -avs --verbose"
|
|
|
75 |
alias dir="dir --color=always"
|
76 |
|
|
|
77 |
alias ezc="nvim ~/.zshrc && source ~/.zshrc"
|
78 |
|
|
|
79 |
source /home/kade/.config/broot/launcher/bash/br
|
80 |
|
|
|
|
|
81 |
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
|
82 |
|
83 |
# >>> conda initialize >>>
|
@@ -110,7 +192,39 @@ display_git_help() {
|
|
110 |
}
|
111 |
display_git_help
|
112 |
|
113 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
# It searches for all .txt files in the specified directory and its subdirectories.
|
115 |
# If a file contains the specified tag, the function removes the tag from its original position
|
116 |
# and prepends it to the beginning of the file.
|
@@ -162,6 +276,32 @@ update_conda() {
|
|
162 |
echo "All environments have been upgraded."
|
163 |
}
|
164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
list_word_freqs() {
|
166 |
local target_dir=$1
|
167 |
if [[ -z "$target_dir" ]]; then
|
@@ -210,6 +350,19 @@ list_word_freqs() {
|
|
210 |
rm "$combined_file"
|
211 |
}
|
212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
sample_prompts() {
|
214 |
local target_directory="$1"
|
215 |
|
@@ -254,6 +407,11 @@ replace_comma_with_keep_tags() {
|
|
254 |
done
|
255 |
}
|
256 |
|
|
|
|
|
|
|
|
|
|
|
257 |
display_custom_help() {
|
258 |
echo "----------------------------------------------------------------------------------------------------------------------"
|
259 |
printf "%s\n" "$(conda env list)"
|
@@ -421,10 +579,6 @@ oui() {
|
|
421 |
open-webui serve --port 6969
|
422 |
}
|
423 |
|
424 |
-
llama() {
|
425 |
-
~/models/Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile -cb -np 4 -a llama-3-8b --embedding --port 11434
|
426 |
-
}
|
427 |
-
|
428 |
# Function to copy matching .caption files
|
429 |
copy_matching_caption_files() {
|
430 |
# Define the target directory
|
@@ -533,6 +687,45 @@ update_dir() {
|
|
533 |
fi
|
534 |
}
|
535 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
536 |
chop_lora() {
|
537 |
local input_file="$1"
|
538 |
local base_name="${input_file:r}" # Remove extension
|
@@ -559,6 +752,25 @@ chop_lora() {
|
|
559 |
done
|
560 |
}
|
561 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
562 |
swch() {
|
563 |
if [ -z "$1" ]; then
|
564 |
echo "Please provide a branch name."
|
@@ -568,6 +780,26 @@ swch() {
|
|
568 |
git clean -fxd && git pull && git checkout $branchname
|
569 |
}
|
570 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
571 |
extract_iframes() {
|
572 |
# Assign input arguments
|
573 |
input_file="$1"
|
@@ -580,56 +812,27 @@ extract_iframes() {
|
|
580 |
/usr/bin/ffmpeg -i "$input_file" -f image2 -vf "select=eq(pict_type\,PICT_TYPE_I)*gt(scene\,$scene_change_fraction),showinfo" -fps_mode vfr "${base_name}-%06d.png"
|
581 |
}
|
582 |
|
583 |
-
|
584 |
-
|
585 |
-
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
echo "Conversion complete."
|
606 |
-
}
|
607 |
-
|
608 |
-
convert_pxl_to_png() {
|
609 |
-
local target_directory="$1"
|
610 |
-
|
611 |
-
# Ensure the target directory exists
|
612 |
-
if [[ ! -d "$target_directory" ]]; then
|
613 |
-
echo "The specified directory does not exist: $target_directory" >&2
|
614 |
-
return 1
|
615 |
-
fi
|
616 |
-
|
617 |
-
# Find all PXL files in the target directory and all subdirectories
|
618 |
-
find "$target_directory" -type f -name "*.pxl" | while read -r file; do
|
619 |
-
input_path="$file"
|
620 |
-
output_path="${file%.pxl}.png"
|
621 |
-
|
622 |
-
# Convert PXL to PNG using ImageMagick
|
623 |
-
if magick convert "$input_path" "$output_path"; then
|
624 |
-
echo "Converted: $input_path -> $output_path"
|
625 |
-
else
|
626 |
-
echo "Failed to convert $input_path" >&2
|
627 |
-
fi
|
628 |
-
done
|
629 |
-
|
630 |
-
echo "Conversion complete."
|
631 |
-
}
|
632 |
-
|
633 |
seed() {
|
634 |
local filePath="$1"
|
635 |
python3 -c "
|
@@ -637,27 +840,111 @@ import safetensors, json
|
|
637 |
filePath = '$filePath'
|
638 |
print(json.loads(safetensors.safe_open(filePath, 'np').metadata().get('ss_seed', 'Not found')))"
|
639 |
}
|
640 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
641 |
png2mp4() {
|
642 |
-
ffmpeg -framerate 8 -pattern_type glob -i '*.png' -vf scale=
|
643 |
-c:v libx264 -pix_fmt yuv420p out.mp4
|
644 |
}
|
645 |
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
651 |
c() {
|
652 |
cd ~/ComfyUI &&
|
653 |
conda activate comfyui
|
654 |
python main.py --listen 0.0.0.0 --preview-method taesd --use-pytorch-cross-attention --disable-xformers --front-end-version Comfy-Org/ComfyUI_frontend@latest --fast
|
655 |
}
|
656 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
657 |
conda_prompt_info() {
|
658 |
if [[ -n "$CONDA_DEFAULT_ENV" ]]; then
|
659 |
echo "(${CONDA_DEFAULT_ENV})"
|
660 |
fi
|
661 |
}
|
662 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
663 |
display_custom_help
|
|
|
1 |
+
# Configuration for Oh My Zsh and custom settings
|
2 |
+
# ----------------------------------------------
|
3 |
+
# 1. Source the custom git wrapper script
|
4 |
+
# 2. Set the path for Oh My Zsh installation
|
5 |
+
# 3. Define plugins for extended functionality:
|
6 |
+
# - git: Provides aliases and functions for Git
|
7 |
+
# - autojump: Enables quick navigation to frequently visited directories
|
8 |
+
# - conda-env: Adds support for Conda environment management
|
9 |
+
# 4. Set the custom theme for the shell prompt
|
10 |
+
|
11 |
+
# Load the custom git wrapper script
|
12 |
source $HOME/toolkit/git-wrapper.zsh
|
13 |
|
14 |
+
# Set the path to the Oh My Zsh installation directory
|
15 |
export ZSH="$HOME/.oh-my-zsh"
|
16 |
|
17 |
+
# Enable Oh My Zsh plugins for additional features
|
18 |
plugins=(git autojump conda-env)
|
19 |
|
20 |
+
# Set the custom theme for the shell prompt
|
21 |
ZSH_THEME="kade"
|
22 |
# CASE_SENSITIVE="true"
|
23 |
# HYPHEN_INSENSITIVE="true"
|
|
|
28 |
# COMPLETION_WAITING_DOTS="true"
|
29 |
# DISABLE_UNTRACKED_FILES_DIRTY="true"
|
30 |
|
31 |
+
# Set the system language and locale to Japanese UTF-8
|
32 |
export LANG=ja_JP.UTF-8
|
33 |
export LC_ALL=ja_JP.UTF-8
|
34 |
+
# Set the maximum number of commands to store in the shell history
|
35 |
export HISTSIZE=500000
|
36 |
+
# Set the path to the ComfyUI installation
|
37 |
export COMFYUI_PATH="$HOME/ComfyUI"
|
38 |
+
# Set the CUDA version for bitsandbytes library
|
39 |
export BNB_CUDA_VERSION=126
|
40 |
+
# Enable full backtrace for Rust programs
|
41 |
export RUST_BACKTRACE=1
|
42 |
+
# Opt out of .NET CLI telemetry data collection
|
43 |
export DOTNET_CLI_TELEMETRY_OPTOUT=1
|
44 |
+
# Enable color output in the terminal (value might need adjustment)
|
45 |
export CLICOLOR=126
|
46 |
|
47 |
# ⚠️ TODO: This needs to be benched but I'm too bad at this!
|
48 |
+
|
49 |
+
# Set the maximum number of threads for NumExpr library
|
50 |
+
# NumExpr is used for fast numerical array operations
|
51 |
+
# This setting can improve performance for multi-threaded NumPy operations
|
52 |
export NUMEXPR_MAX_THREADS=24
|
53 |
+
# Set the maximum number of threads for Apple's Accelerate framework (VecLib)
|
54 |
+
# This affects performance of vector and matrix operations on macOS
|
55 |
+
# Note: This setting may not have an effect on non-macOS systems
|
56 |
export VECLIB_MAXIMUM_THREADS=24
|
57 |
+
# Set the number of threads for Intel Math Kernel Library (MKL)
|
58 |
+
# MKL is used for optimized mathematical operations, especially in NumPy
|
59 |
+
# This can significantly impact performance of linear algebra operations
|
60 |
export MKL_NUM_THREADS=24
|
61 |
+
# Set the number of threads for OpenMP
|
62 |
+
# OpenMP is used for parallel programming in C, C++, and Fortran
|
63 |
+
# This affects the performance of libraries and applications using OpenMP
|
64 |
export OMP_NUM_THREADS=24
|
65 |
|
66 |
+
# Disable parallelism for the Hugging Face Tokenizers library
|
67 |
+
# This can help prevent potential deadlocks or race conditions in multi-threaded environments
|
68 |
+
# It's particularly useful when using tokenizers in conjunction with DataLoader in PyTorch
|
69 |
+
# Setting this to false ensures more predictable behavior, especially in production environments
|
70 |
+
# However, it may slightly reduce performance in some scenarios where parallel tokenization is beneficial
|
71 |
export TOKENIZERS_PARALLELISM=false
|
72 |
|
73 |
+
# Source the Oh My Zsh script
|
74 |
+
# This line loads Oh My Zsh, a popular framework for managing Zsh configuration
|
75 |
+
# It sets up various features like themes, plugins, and custom functions
|
76 |
+
# The $ZSH variable should be set to the installation directory of Oh My Zsh
|
77 |
+
# This is typically done earlier in the .zshrc file, often as: export ZSH="$HOME/.oh-my-zsh"
|
78 |
+
# After sourcing, all Oh My Zsh functionality becomes available in your shell session
|
79 |
source $ZSH/oh-my-zsh.sh
|
80 |
|
81 |
+
# Extend the system PATH to include various directories:
|
82 |
+
# - Custom dataset tools in the user's repository
|
83 |
+
# - Rust's Cargo binary directory
|
84 |
+
# - Miniconda3 binary directory
|
85 |
+
# - User's toolkit directory
|
86 |
+
# - Redis and PostgreSQL binary directories
|
87 |
+
# - User's local bin directory
|
88 |
+
# - CUDA binary directory
|
89 |
export PATH=$PATH:$HOME/source/repos/dataset-tools/target/x86_64-unknown-linux-gnu/release:$HOME/.cargo/bin:$HOME/miniconda3/bin:$HOME/toolkit:$HOME/db/redis-stable/src:$HOME/db/postgresql/bin:$HOME/.local/bin:/opt/cuda/bin
|
90 |
+
|
91 |
+
# Extend the LD_LIBRARY_PATH to include:
|
92 |
+
# - Conda environment's library directory
|
93 |
+
# - CUDA library directory for x86_64 Linux
|
94 |
+
# This ensures that dynamically linked libraries in these locations can be found at runtime
|
95 |
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib:/opt/cuda/targets/x86_64-linux/lib
|
96 |
+
|
97 |
+
# Set the path for ComfyUI models
|
98 |
+
# This environment variable likely tells ComfyUI where to look for AI models
|
99 |
export COMFYUI_MODEL_PATH=/home/kade/ComfyUI/models
|
100 |
|
101 |
+
# Use the UPX executable compression tool from the local bin directory
|
102 |
alias upx='/home/kade/.local/bin/upx'
|
103 |
+
# Always display colorized output for the 'ls' command
|
104 |
alias ls='ls --color=always'
|
105 |
+
# List all files in long format, including hidden files, with human-readable sizes and colorized output
|
106 |
alias ll="ls -lah --color=always"
|
107 |
+
# Use the 'reflink' option for 'cp' to enable copy-on-write when possible, improving efficiency
|
108 |
alias cp='cp --reflink=auto'
|
109 |
+
# Launch TensorBoard with the log directory set to the user's output_dir/logs
|
110 |
alias t="tensorboard --logdir=$HOME/output_dir/logs"
|
111 |
+
# Edit tmux configuration, display a message, and reload the tmux configuration
|
112 |
alias rt="vim ~/.tmux.conf && echo \"Reloading tmux config\" && tmux source ~/.tmux.conf"
|
113 |
+
# Edit zsh configuration, display a message, and reload the zsh configuration
|
114 |
alias zr="vim ~/.zshrc && echo \"Reloading zsh config\" && source ~/.zshrc"
|
115 |
|
116 |
# The kanji 接 (せつ) [setsu] means "touch," "contact," "adjoin," or "piece together."
|
|
|
121 |
# 2. Type "setsu" (せつ) in hiragana.
|
122 |
# 3. Press the spacebar to convert it to the kanji 接.
|
123 |
alias 接="tmux attach"
|
124 |
+
# Alias for attaching to an existing tmux session
|
125 |
+
# 'ta' is a shorthand for 'tmux attach'
|
126 |
alias ta="tmux attach"
|
127 |
|
128 |
+
# Alias for adding all changes, committing with a signed verbose message, and pushing to remote
|
129 |
alias ga="git add . && git commit -avs && git push"
|
130 |
+
# Alias for checking the current status of the git repository
|
131 |
alias gs="git status"
|
132 |
+
# Alias for displaying word-level differences in git, using a custom regex and the patience algorithm
|
133 |
alias wd="git diff --word-diff-regex='[^,]+' --patience"
|
134 |
+
# Alias for using Neovim instead of Vim
|
135 |
alias vim="nvim"
|
136 |
+
# Another alias for using Neovim instead of Vim
|
137 |
alias vi="nvim"
|
138 |
+
# Short alias for quickly opening Neovim
|
139 |
alias v="nvim"
|
140 |
+
# Alias for resetting the git repository to the last commit, discarding all changes
|
141 |
alias grh='git reset --hard'
|
142 |
+
# Alias for cloning a git repository including all its submodules
|
143 |
alias gcs='git clone --recurse-submodules'
|
144 |
+
# Alias for running the Grabber-cli command
|
145 |
alias grabber="Grabber-cli"
|
146 |
|
147 |
# 'pie' is a shortcut for installing a Python package in editable mode
|
148 |
# using the pip command with the --use-pep517 option.
|
149 |
alias pie='pip install -e . --use-pep517'
|
150 |
+
# Alias for creating a signed, verbose git commit
|
151 |
alias gc="git commit -avs --verbose"
|
152 |
+
# Alias for displaying directory contents with colorized output
|
153 |
alias dir="dir --color=always"
|
154 |
|
155 |
+
# Alias for quickly editing and reloading the zsh configuration file
|
156 |
alias ezc="nvim ~/.zshrc && source ~/.zshrc"
|
157 |
|
158 |
+
# Source the broot launcher script for enhanced file navigation
|
159 |
source /home/kade/.config/broot/launcher/bash/br
|
160 |
|
161 |
+
# Source the fzf (Fuzzy Finder) configuration for zsh if it exists
|
162 |
+
# This enables fzf functionality in the shell, including keybindings and auto-completion
|
163 |
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
|
164 |
|
165 |
# >>> conda initialize >>>
|
|
|
192 |
}
|
193 |
display_git_help
|
194 |
|
195 |
+
# Function: re (Resize SDXL LoRA)
|
196 |
+
# Description:
|
197 |
+
# This function resizes an SDXL LoRA (Low-Rank Adaptation) model using the resize_lora.py script.
|
198 |
+
# It applies a specific resizing recipe to reduce the size of the LoRA while maintaining its effectiveness.
|
199 |
+
#
|
200 |
+
# Usage:
|
201 |
+
# re <target_file>
|
202 |
+
#
|
203 |
+
# Parameters:
|
204 |
+
# $1 (target_file): Path to the input LoRA safetensors file to be resized.
|
205 |
+
#
|
206 |
+
# Actions:
|
207 |
+
# 1. Calls the resize_lora.py script with the following arguments:
|
208 |
+
# - Verbose output (-vv)
|
209 |
+
# - Custom resizing recipe (-r fro_ckpt=1,thr=-3.55)
|
210 |
+
# - Path to the SDXL checkpoint file (ponyDiffusionV6XL_v6StartWithThisOne.safetensors)
|
211 |
+
# - Path to the input LoRA file
|
212 |
+
#
|
213 |
+
# Recipe Explanation:
|
214 |
+
# - fro_ckpt=1: Uses the Frobenius norm of the checkpoint layer as the score metric
|
215 |
+
# - thr=-3.55: Sets a threshold for singular values at 10^-3.55 ≈ 0.000282 times the reference
|
216 |
+
#
|
217 |
+
# Notes:
|
218 |
+
# - This function assumes the resize_lora.py script is located at ~/source/repos/resize_lora/
|
219 |
+
# - The SDXL checkpoint file is expected to be in ~/ComfyUI/models/checkpoints/
|
220 |
+
# - Output will be verbose (-vv) for detailed information during the resizing process
|
221 |
+
# - The resized LoRA will be saved in the same directory as the script by default
|
222 |
+
function re() {
|
223 |
+
target_file="$1"
|
224 |
+
python ~/source/repos/resize_lora/resize_lora.py -vv -r fro_ckpt=1,thr=-3.55 ~/ComfyUI/models/checkpoints/ponyDiffusionV6XL_v6StartWithThisOne.safetensors "$target_file"
|
225 |
+
}
|
226 |
+
|
227 |
+
# This function takes two arguments: a tag and a directory.
|
228 |
# It searches for all .txt files in the specified directory and its subdirectories.
|
229 |
# If a file contains the specified tag, the function removes the tag from its original position
|
230 |
# and prepends it to the beginning of the file.
|
|
|
276 |
echo "All environments have been upgraded."
|
277 |
}
|
278 |
|
279 |
+
# Function: list_word_freqs
|
280 |
+
# Description:
|
281 |
+
# This function analyzes text files in a specified directory and lists the most frequent words (tags).
|
282 |
+
#
|
283 |
+
# Usage:
|
284 |
+
# list_word_freqs <target_directory>
|
285 |
+
#
|
286 |
+
# Parameters:
|
287 |
+
# - target_directory: The directory containing the text files to analyze.
|
288 |
+
#
|
289 |
+
# Functionality:
|
290 |
+
# 1. Combines all .txt files in the target directory into a single temporary file.
|
291 |
+
# 2. Uses awk to process the combined file:
|
292 |
+
# - Ignores common words like "a", "the", "and", etc.
|
293 |
+
# - Converts all words to lowercase and removes non-alphabetic characters.
|
294 |
+
# - Counts the frequency of each word.
|
295 |
+
# 3. Sorts the words by frequency in descending order.
|
296 |
+
# 4. Displays the top 40 most frequent words along with their occurrence count.
|
297 |
+
#
|
298 |
+
# Output:
|
299 |
+
# Prints a list of the 40 most frequent words in the format: <frequency> <word>
|
300 |
+
#
|
301 |
+
# Note:
|
302 |
+
# - This function is useful for analyzing tag frequencies in image caption files or similar text-based datasets.
|
303 |
+
# - The list of ignored words can be modified to suit specific needs.
|
304 |
+
# - The function creates a temporary file which is automatically removed after processing.
|
305 |
list_word_freqs() {
|
306 |
local target_dir=$1
|
307 |
if [[ -z "$target_dir" ]]; then
|
|
|
350 |
rm "$combined_file"
|
351 |
}
|
352 |
|
353 |
+
# Function: sample_prompts
|
354 |
+
# Description:
|
355 |
+
# This function takes a sample of the tags (or captions) in a target training directory.
|
356 |
+
# It reads and displays the contents of all .txt files in the specified directory,
|
357 |
+
# providing a quick overview of the tags or captions used for training.
|
358 |
+
#
|
359 |
+
# Usage: sample_prompts <target_directory>
|
360 |
+
#
|
361 |
+
# Parameters:
|
362 |
+
# - target_directory: The directory containing the .txt files with tags or captions.
|
363 |
+
#
|
364 |
+
# Output:
|
365 |
+
# Prints the contents of each .txt file in the target directory, separated by newlines.
|
366 |
sample_prompts() {
|
367 |
local target_directory="$1"
|
368 |
|
|
|
407 |
done
|
408 |
}
|
409 |
|
410 |
+
# Function: display_custom_help
|
411 |
+
# Description:
|
412 |
+
# This function displays a custom help menu with various commands, environment information,
|
413 |
+
# and useful instructions for different tools and services. It provides a quick reference
|
414 |
+
# for commonly used commands, LLM setups, taggers, captioners, and database configurations.
|
415 |
display_custom_help() {
|
416 |
echo "----------------------------------------------------------------------------------------------------------------------"
|
417 |
printf "%s\n" "$(conda env list)"
|
|
|
579 |
open-webui serve --port 6969
|
580 |
}
|
581 |
|
|
|
|
|
|
|
|
|
582 |
# Function to copy matching .caption files
|
583 |
copy_matching_caption_files() {
|
584 |
# Define the target directory
|
|
|
687 |
fi
|
688 |
}
|
689 |
|
690 |
+
# Function: chop_lora
|
691 |
+
# Description:
|
692 |
+
# This function processes a LoRA (Low-Rank Adaptation) model file by selectively
|
693 |
+
# keeping or removing specific layers based on predefined presets. It uses the
|
694 |
+
# chop_blocks.py script to perform the actual layer manipulation.
|
695 |
+
#
|
696 |
+
# Usage:
|
697 |
+
# chop_lora <input_file>
|
698 |
+
#
|
699 |
+
# Parameters:
|
700 |
+
# $1 - The input LoRA model file (typically a .safetensors file)
|
701 |
+
#
|
702 |
+
# Presets:
|
703 |
+
# The function defines several presets, each represented by a 21-digit binary string:
|
704 |
+
# - ringdingding: This vector string was used for the Stoat LoRA.
|
705 |
+
# - squeaker: I really have no idea what this is.
|
706 |
+
# - heavylifter: Keeps only one specific layer that seems to learn the most.
|
707 |
+
# - style1 and style2: Different configurations for style transfer
|
708 |
+
# - beeg: A configuration that keeps only the largest layers.
|
709 |
+
# - all: Keeps all layers
|
710 |
+
# - allin: Keeps only the input layers
|
711 |
+
# - allmid: Keeps only the middle layers
|
712 |
+
# - allout: Keeps only the output layers
|
713 |
+
#
|
714 |
+
# Actions:
|
715 |
+
# 1. Extracts the base name of the input file (without extension)
|
716 |
+
# 2. Iterates through each preset
|
717 |
+
# 3. For each preset, generates an output filename and runs the chop_blocks.py script
|
718 |
+
# 4. The script creates a new LoRA file with only the specified layers retained
|
719 |
+
#
|
720 |
+
# Output:
|
721 |
+
# Creates multiple output files, one for each preset, named as:
|
722 |
+
# "<base_name>-<preset_name>.safetensors"
|
723 |
+
#
|
724 |
+
# Notes:
|
725 |
+
# - Requires the chop_blocks.py script to be located at ~/source/repos/resize_lora/chop_blocks.py
|
726 |
+
# - The binary strings represent which layers to keep (1) or remove (0)
|
727 |
+
# - This function allows for quick generation of multiple variants of a LoRA model,
|
728 |
+
# each emphasizing different aspects or effects
|
729 |
chop_lora() {
|
730 |
local input_file="$1"
|
731 |
local base_name="${input_file:r}" # Remove extension
|
|
|
752 |
done
|
753 |
}
|
754 |
|
755 |
+
# Function: swch (Switch Git Branch)
|
756 |
+
# Description:
|
757 |
+
# This function facilitates switching between Git branches while ensuring a clean working directory.
|
758 |
+
#
|
759 |
+
# Usage:
|
760 |
+
# swch <branch_name>
|
761 |
+
#
|
762 |
+
# Parameters:
|
763 |
+
# $1 - The name of the branch to switch to.
|
764 |
+
#
|
765 |
+
# Actions:
|
766 |
+
# 1. Checks if a branch name is provided.
|
767 |
+
# 2. Cleans the working directory, removing untracked files and directories.
|
768 |
+
# 3. Pulls the latest changes from the remote repository.
|
769 |
+
# 4. Checks out the specified branch.
|
770 |
+
#
|
771 |
+
# Notes:
|
772 |
+
# - Use with caution as 'git clean -fxd' will remove all untracked files and directories.
|
773 |
+
# - Ensure all important changes are committed or stashed before using this function.
|
774 |
swch() {
|
775 |
if [ -z "$1" ]; then
|
776 |
echo "Please provide a branch name."
|
|
|
780 |
git clean -fxd && git pull && git checkout $branchname
|
781 |
}
|
782 |
|
783 |
+
# Function: extract_iframes
|
784 |
+
# Description:
|
785 |
+
# This function extracts I-frames from a video file using ffmpeg.
|
786 |
+
#
|
787 |
+
# Usage:
|
788 |
+
# extract_iframes <input_file> [<scene_change_fraction>]
|
789 |
+
#
|
790 |
+
# Parameters:
|
791 |
+
# $1 - The input video file (required)
|
792 |
+
# $2 - The scene change fraction threshold (optional, default: 0.1)
|
793 |
+
#
|
794 |
+
# Actions:
|
795 |
+
# 1. Assigns input arguments to variables
|
796 |
+
# 2. Extracts the base filename without extension
|
797 |
+
# 3. Runs ffmpeg to extract I-frames based on the scene change threshold
|
798 |
+
# 4. Saves extracted frames as PNG files with sequential numbering
|
799 |
+
#
|
800 |
+
# Notes:
|
801 |
+
# - Requires ffmpeg to be installed and accessible via /usr/bin/ffmpeg
|
802 |
+
# - Output files will be named as "<base_name>-XXXXXX.png" in the current directory
|
803 |
extract_iframes() {
|
804 |
# Assign input arguments
|
805 |
input_file="$1"
|
|
|
812 |
/usr/bin/ffmpeg -i "$input_file" -f image2 -vf "select=eq(pict_type\,PICT_TYPE_I)*gt(scene\,$scene_change_fraction),showinfo" -fps_mode vfr "${base_name}-%06d.png"
|
813 |
}
|
814 |
|
815 |
+
# Function: seed
|
816 |
+
# Description:
|
817 |
+
# This function extracts the seed value from a LoRA (Low-Rank Adaptation) model's metadata.
|
818 |
+
#
|
819 |
+
# Usage:
|
820 |
+
# seed <file_path>
|
821 |
+
#
|
822 |
+
# Parameters:
|
823 |
+
# $1 - The path to the LoRA model file (usually a .safetensors file)
|
824 |
+
#
|
825 |
+
# Actions:
|
826 |
+
# 1. Takes the file path as an argument
|
827 |
+
# 2. Uses Python to read the safetensors file
|
828 |
+
# 3. Extracts the metadata from the file
|
829 |
+
# 4. Attempts to retrieve the 'ss_seed' value from the metadata
|
830 |
+
# 5. Prints the seed value if found, or 'Not found' if not present
|
831 |
+
#
|
832 |
+
# Notes:
|
833 |
+
# - Requires Python 3 with the 'safetensors' module installed
|
834 |
+
# - The seed is typically used to reproduce the exact training conditions of the LoRA
|
835 |
+
# - If the seed is not found, it may indicate the LoRA was created without recording this information
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
836 |
seed() {
|
837 |
local filePath="$1"
|
838 |
python3 -c "
|
|
|
840 |
filePath = '$filePath'
|
841 |
print(json.loads(safetensors.safe_open(filePath, 'np').metadata().get('ss_seed', 'Not found')))"
|
842 |
}
|
843 |
+
# Function: png2mp4
|
844 |
+
# Description:
|
845 |
+
# This function converts a series of PNG images into an MP4 video file using ffmpeg.
|
846 |
+
#
|
847 |
+
# Usage:
|
848 |
+
# png2mp4
|
849 |
+
#
|
850 |
+
# Parameters:
|
851 |
+
# None (uses all PNG files in the current directory)
|
852 |
+
#
|
853 |
+
# Actions:
|
854 |
+
# 1. Sets the frame rate to 8 fps
|
855 |
+
# 2. Uses glob pattern to include all PNG files in the current directory
|
856 |
+
# 3. Scales the output video to 1024x1024 resolution
|
857 |
+
# 4. Sets the Constant Rate Factor (CRF) to 28 for good compression
|
858 |
+
# 5. Uses the libx264 codec for H.264 encoding
|
859 |
+
# 6. Sets the pixel format to yuv420p for compatibility
|
860 |
+
# 7. Outputs the result as 'out.mp4' in the current directory
|
861 |
+
#
|
862 |
+
# Notes:
|
863 |
+
# - Requires ffmpeg to be installed and accessible in the system path
|
864 |
+
# - PNG files should be in the current directory
|
865 |
+
# - Output video will be named 'out.mp4' and placed in the current directory
|
866 |
+
# - Adjust the framerate, scale, or CRF value as needed for different results
|
867 |
png2mp4() {
|
868 |
+
ffmpeg -framerate 8 -pattern_type glob -i '*.png' -vf scale=1024x1024 -crf 28 \
|
869 |
-c:v libx264 -pix_fmt yuv420p out.mp4
|
870 |
}
|
871 |
|
872 |
+
# Function: c
|
873 |
+
# Description:
|
874 |
+
# This function launches ComfyUI with specific settings tailored to the user's preferences.
|
875 |
+
#
|
876 |
+
# Usage:
|
877 |
+
# c
|
878 |
+
#
|
879 |
+
# Actions:
|
880 |
+
# 1. Changes directory to ~/ComfyUI
|
881 |
+
# 2. Activates the 'comfyui' conda environment
|
882 |
+
# 3. Launches ComfyUI with the following settings:
|
883 |
+
# - Listens on all network interfaces (0.0.0.0)
|
884 |
+
# - Uses 'taesd' as the preview method
|
885 |
+
# - Enables PyTorch cross-attention
|
886 |
+
# - Disables xformers
|
887 |
+
# - Uses the latest version of Comfy-Org/ComfyUI_frontend
|
888 |
+
# - Enables fast mode
|
889 |
+
#
|
890 |
+
# Parameters:
|
891 |
+
# None
|
892 |
+
#
|
893 |
+
# Notes:
|
894 |
+
# - Requires ComfyUI to be installed in ~/ComfyUI
|
895 |
+
# - Requires a conda environment named 'comfyui' with necessary dependencies
|
896 |
+
# - The --listen 0.0.0.0 option allows access from other devices on the network
|
897 |
+
# - --preview-method taesd provides better previews
|
898 |
+
# - --use-pytorch-cross-attention and --disable-xformers affect performance and compatibility
|
899 |
+
# - --front-end-version ensures the latest UI is used
|
900 |
+
# - --fast option may improve overall performance
|
901 |
c() {
|
902 |
cd ~/ComfyUI &&
|
903 |
conda activate comfyui
|
904 |
python main.py --listen 0.0.0.0 --preview-method taesd --use-pytorch-cross-attention --disable-xformers --front-end-version Comfy-Org/ComfyUI_frontend@latest --fast
|
905 |
}
|
906 |
|
907 |
+
# Function: conda_prompt_info
|
908 |
+
# Description:
|
909 |
+
# This function displays information about the currently active Conda environment.
|
910 |
+
#
|
911 |
+
# Usage:
|
912 |
+
# conda_prompt_info
|
913 |
+
#
|
914 |
+
# Returns:
|
915 |
+
# A string containing the name of the active Conda environment, enclosed in parentheses.
|
916 |
+
# If no Conda environment is active, it returns an empty string.
|
917 |
+
#
|
918 |
+
# Details:
|
919 |
+
# 1. Checks if the CONDA_DEFAULT_ENV environment variable is set and non-empty.
|
920 |
+
# 2. If CONDA_DEFAULT_ENV is set, it echoes the environment name in parentheses.
|
921 |
+
# 3. If CONDA_DEFAULT_ENV is not set or empty, the function returns silently.
|
922 |
+
#
|
923 |
+
# Example output:
|
924 |
+
# If CONDA_DEFAULT_ENV is set to "myenv", the function will output: (myenv)
|
925 |
+
#
|
926 |
+
# Notes:
|
927 |
+
# - This function is typically used in command prompts or shell scripts to
|
928 |
+
# visually indicate the active Conda environment to the user.
|
929 |
+
# - It can be incorporated into PS1 or other prompt variables to automatically
|
930 |
+
# display the Conda environment in the shell prompt.
|
931 |
conda_prompt_info() {
|
932 |
if [[ -n "$CONDA_DEFAULT_ENV" ]]; then
|
933 |
echo "(${CONDA_DEFAULT_ENV})"
|
934 |
fi
|
935 |
}
|
936 |
|
937 |
+
# Function: display_custom_help
|
938 |
+
# Description:
|
939 |
+
# This function displays custom help information for user-defined functions and aliases.
|
940 |
+
# It provides a quick reference for commonly used commands and their descriptions.
|
941 |
+
#
|
942 |
+
# Usage:
|
943 |
+
# display_custom_help
|
944 |
+
#
|
945 |
+
# Output:
|
946 |
+
# Prints a formatted list of custom commands and their brief descriptions.
|
947 |
+
#
|
948 |
+
# Note:
|
949 |
+
# Add or modify entries in this function to keep your personal command reference up-to-date.
|
950 |
display_custom_help
|
password
CHANGED
@@ -1,14 +1,26 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
# -*- coding: utf-8 -*-
|
3 |
|
|
|
|
|
|
|
|
|
4 |
import random
|
5 |
import string
|
6 |
|
|
|
7 |
def generate_password(length=16):
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
characters = string.ascii_letters + string.digits + string.punctuation
|
9 |
password = ''.join(random.choice(characters) for _ in range(length))
|
10 |
return password
|
11 |
|
|
|
12 |
# Generate a strong 16-character long password
|
13 |
-
|
14 |
-
print(
|
|
|
1 |
#!/usr/bin/env python
|
2 |
# -*- coding: utf-8 -*-
|
3 |
|
4 |
+
"""
|
5 |
+
Generate a random strong password with 16 characters.
|
6 |
+
"""
|
7 |
+
|
8 |
import random
|
9 |
import string
|
10 |
|
11 |
+
|
12 |
def generate_password(length=16):
|
13 |
+
"""
|
14 |
+
Generate a random password with the given length.
|
15 |
+
|
16 |
+
:param length: Length of the password to generate (default is 16)
|
17 |
+
:return: A randomly generated password string
|
18 |
+
"""
|
19 |
characters = string.ascii_letters + string.digits + string.punctuation
|
20 |
password = ''.join(random.choice(characters) for _ in range(length))
|
21 |
return password
|
22 |
|
23 |
+
|
24 |
# Generate a strong 16-character long password
|
25 |
+
STRONG_PASSWORD = generate_password()
|
26 |
+
print(STRONG_PASSWORD)
|
steal_sdscripts_metadata
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
"""
|
5 |
+
This script automates the process of updating a Stable Diffusion training
|
6 |
+
script with settings extracted from a LoRA model's JSON metadata.
|
7 |
+
|
8 |
+
It performs the following main tasks:
|
9 |
+
1. Reads a JSON file containing LoRA model metadata
|
10 |
+
2. Parses an existing Stable Diffusion training script
|
11 |
+
3. Maps metadata keys to corresponding script arguments
|
12 |
+
4. Updates the script with values from the metadata
|
13 |
+
5. Handles special cases and complex arguments (e.g., network_args)
|
14 |
+
6. Writes the updated script to a new file
|
15 |
+
|
16 |
+
Usage:
|
17 |
+
python steal_sdscripts_metadata <metadata_file> <script_file> <output_file>
|
18 |
+
|
19 |
+
This tool is particularly useful for replicating training conditions or
|
20 |
+
fine-tuning existing models based on successful previous runs.
|
21 |
+
"""
|
22 |
+
|
23 |
+
import json
|
24 |
+
import re
|
25 |
+
import argparse
|
26 |
+
|
27 |
+
# Parse command-line arguments
|
28 |
+
parser = argparse.ArgumentParser(
|
29 |
+
description='Update training script based on metadata.'
|
30 |
+
)
|
31 |
+
parser.add_argument(
|
32 |
+
'metadata_file', type=str, help='Path to the metadata JSON file'
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
'script_file', type=str, help='Path to the training script file'
|
36 |
+
)
|
37 |
+
parser.add_argument(
|
38 |
+
'output_file', type=str, help='Path to save the updated training script'
|
39 |
+
)
|
40 |
+
args = parser.parse_args()
|
41 |
+
|
42 |
+
# Read the metadata JSON file
|
43 |
+
with open(args.metadata_file, 'r', encoding='utf-8') as f:
|
44 |
+
metadata = json.load(f)
|
45 |
+
|
46 |
+
# Read the training script
|
47 |
+
with open(args.script_file, 'r', encoding='utf-8') as f:
|
48 |
+
script_content = f.read()
|
49 |
+
|
50 |
+
# Define mappings between JSON keys and script arguments
|
51 |
+
mappings = {
|
52 |
+
'ss_network_dim': '--network_dim',
|
53 |
+
'ss_network_alpha': '--network_alpha',
|
54 |
+
'ss_learning_rate': '--learning_rate',
|
55 |
+
'ss_unet_lr': '--unet_lr',
|
56 |
+
'ss_text_encoder_lr': '--text_encoder_lr',
|
57 |
+
'ss_max_train_steps': '--max_train_steps',
|
58 |
+
'ss_train_batch_size': '--train_batch_size',
|
59 |
+
'ss_gradient_accumulation_steps': '--gradient_accumulation_steps',
|
60 |
+
'ss_mixed_precision': '--mixed_precision',
|
61 |
+
'ss_seed': '--seed',
|
62 |
+
'ss_resolution': '--resolution',
|
63 |
+
'ss_clip_skip': '--clip_skip',
|
64 |
+
'ss_lr_scheduler': '--lr_scheduler',
|
65 |
+
'ss_network_module': '--network_module',
|
66 |
+
}
|
67 |
+
|
68 |
+
# Update script content based on metadata
|
69 |
+
for json_key, script_arg in mappings.items():
|
70 |
+
if json_key in metadata:
|
71 |
+
value = metadata[json_key]
|
72 |
+
|
73 |
+
# Handle special cases
|
74 |
+
if json_key == 'ss_resolution':
|
75 |
+
value = f'"{value[1:-1]}"' # Remove parentheses and add quotes
|
76 |
+
elif isinstance(value, str):
|
77 |
+
value = f'"{value}"'
|
78 |
+
|
79 |
+
# Replace or add the argument in the script
|
80 |
+
pattern = f'{script_arg}=\\S+'
|
81 |
+
replacement = f'{script_arg}={value}'
|
82 |
+
if re.search(pattern, script_content):
|
83 |
+
script_content = re.sub(pattern, replacement, script_content)
|
84 |
+
else:
|
85 |
+
script_content = script_content.replace(
|
86 |
+
'args=(', f'args=(\n {replacement}'
|
87 |
+
)
|
88 |
+
|
89 |
+
# Handle network_args separately as it's more complex
|
90 |
+
if 'ss_network_args' in metadata:
|
91 |
+
network_args = metadata['ss_network_args']
|
92 |
+
NETWORK_ARGS_STR = ' '.join(
|
93 |
+
[f'"{k}={v}"' for k, v in network_args.items()]
|
94 |
+
)
|
95 |
+
PATTERN = r'--network_args(\s+".+")+'
|
96 |
+
replacement = f'--network_args\n {NETWORK_ARGS_STR}'
|
97 |
+
script_content = re.sub(PATTERN, replacement, script_content)
|
98 |
+
|
99 |
+
# Write the updated script
|
100 |
+
with open(args.output_file, 'w', encoding='utf-8') as f:
|
101 |
+
f.write(script_content)
|
102 |
+
|
103 |
+
print(f"Updated training script has been saved as '{args.output_file}'")
|