K00B404 commited on
Commit
92b02bc
·
verified ·
1 Parent(s): e3d5bbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -169
app.py CHANGED
@@ -6,209 +6,109 @@ from huggingface_hub import AsyncInferenceClient
6
  from translatepy import Translator
7
  from gradio_client import Client, handle_file
8
  from PIL import Image
9
- from lora_saver import main as backup_loras
10
-
11
- os.system('quent_models.py')
12
-
13
- from loras import loras
14
- from huggingface_hub import login
15
- from themes import IndonesiaTheme # Import custom IndonesiaTheme
16
- from lorify import Lorify
17
- from css import css2
18
  MAX_SEED = np.iinfo(np.int32).max
19
 
20
-
21
  HF_TOKEN = os.getenv('HF_TOKEN')
22
  HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN')
23
 
24
- qwen_client = Client("K00B404/HugChatWrap",hf_token=HF_TOKEN)
25
- loaded_loras=[]
26
- import os
27
- from pathlib import Path
28
-
29
-
30
  def load_local_loras(lora_directory="lora_models"):
31
- """
32
- Load loras from local safetensor files instead of remote API
33
- Returns list of lora configurations
34
- """
35
  loras_list_custom = []
36
 
37
- # Ensure directory exists
38
  if not os.path.exists(lora_directory):
39
  print(f"[-] Warning: Lora directory {lora_directory} not found")
40
  return []
41
 
42
- # Get all safetensor files
43
  lora_files = list(Path(lora_directory).glob("*.safetensors"))
44
 
45
  for lora_file in lora_files:
46
- # Get base name without extension
47
  lora_name = lora_file.stem
48
-
49
- # Create local lora path
50
  lora_path = str(lora_file.absolute())
51
-
52
- # Add to lora list
53
- lora_config = {
54
  "name": lora_name,
55
- "path": lora_path,
56
- "type": "local"
57
- }
58
- loras_list_custom.append(lora_config)
59
-
60
  return loras_list_custom
61
 
62
-
63
- '''for lora in loras:
64
- print(lora.get('repo'))
65
- loaded_loras.append(lora.get('repo'))
66
-
67
  # Function to enable LoRA if selected
68
- def enable_lora(lora_add, basemodel):
69
- print(f"[-] Determining model: LoRA {'enabled' if lora_add else 'disabled'}, base model: {basemodel}")
70
- return basemodel if not lora_add else lora_add
71
- '''
72
-
73
-
74
- # Replace the existing loras_list_custom with:
75
- loras_list_custom = load_local_loras()
76
-
77
- # Update the Gradio interface dropdown to use the local paths
78
- lora_model_choice = gr.Dropdown(
79
- label="🎨 Select LoRA",
80
- choices=[lora["path"] for lora in loras_list_custom],
81
- value=loras_list_custom[0]["path"] if loras_list_custom else None
82
- )
83
-
84
- # Update the enable_lora function to handle local files
85
  def enable_lora(lora_path, basemodel):
86
  print(f"[-] Determining model: LoRA {'enabled' if lora_path else 'disabled'}, base model: {basemodel}")
87
  if not lora_path:
88
  return basemodel
89
 
90
- # Load the local safetensor file
91
  return {
92
- "base_model": basemodel,
93
- "lora": {
94
- "path": lora_path,
95
- "scale": 1.0 # You can adjust this scale as needed
96
- }
97
  }
98
 
99
-
100
-
101
-
102
-
103
-
104
-
105
  # Function to generate image
106
- async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
107
  try:
108
  if seed == -1:
109
  seed = random.randint(0, MAX_SEED)
110
  seed = int(seed)
111
 
112
- print(f"[-] Menerjemahkan prompt: {prompt}")
113
  text = str(Translator().translate(prompt, 'English')) + "," + lora_word
114
 
115
- print(f"[-] Generating image with prompt: {text}, model: {model}")
116
- client = AsyncInferenceClient()
117
- image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  return image, seed
119
  except Exception as e:
120
  print(f"[-] Error generating image: {e}")
121
  return None, None
122
 
123
- # Function to upscale image
124
- def get_upscale_finegrain(prompt, img_path, upscale_factor):
125
- try:
126
- print(f"[-] Memulai proses upscaling dengan faktor {upscale_factor} untuk gambar {img_path}")
127
- client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN)
128
- result = client.predict(
129
- input_image=handle_file(img_path),
130
- prompt=prompt,
131
- negative_prompt="worst quality, low quality, normal quality",
132
- upscale_factor=upscale_factor,
133
- controlnet_scale=0.6,
134
- controlnet_decay=1,
135
- condition_scale=6,
136
- denoise_strength=0.35,
137
- num_inference_steps=18,
138
- solver="DDIM",
139
- api_name="/process"
140
- )
141
- print(f"[-] Proses upscaling berhasil.")
142
- return result[1] # Return upscale image path
143
- except Exception as e:
144
- print(f"[-] Error scaling image: {e}")
145
- return None
146
-
147
- # Main function to generate images and optionally upscale
148
- async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
149
- print(f"[-] Memulai generasi gambar dengan prompt: {prompt}")
150
-
151
- model = enable_lora(lora_model, basemodel) if process_lora else basemodel
152
- print(f"[-] Menggunakan model: {model}")
153
-
154
- image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
155
-
156
- if image is None:
157
- print("[-] Image generation failed.")
158
- return []
159
-
160
- image_path = "temp_image.jpg"
161
- print(f"[-] Menyimpan gambar sementara di: {image_path}")
162
- image.save(image_path, format="JPEG")
163
-
164
- upscale_image_path = None
165
- if process_upscale:
166
- print(f"[-] Memproses upscaling dengan faktor: {upscale_factor}")
167
- upscale_image_path = get_upscale_finegrain(prompt, image_path, upscale_factor)
168
- if upscale_image_path is not None and os.path.exists(upscale_image_path):
169
- print(f"[-] Proses upscaling selesai. Gambar tersimpan di: {upscale_image_path}")
170
- return [image_path, upscale_image_path] # Return both images
171
- else:
172
- print("[-] Upscaling gagal, jalur gambar upscale tidak ditemukan.")
173
-
174
- return [image_path]
175
-
176
- base_models=[
177
- "black-forest-labs/FLUX.1-schnell",
178
- "black-forest-labs/FLUX.1-DEV",
179
- "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
180
- #"city96/FLUX.1-dev-gguf"
181
- ]
182
-
183
- loras_list_custom=[
184
- "Keltezaa/anal-riding-missionary",
185
- "Keltezaa/Fingering",
186
- "Keltezaa/Spreading",
187
- "Keltezaa/flux-prone-ass-spread-hd",
188
- "Keltezaa/Flux_P",
189
- "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
190
- "XLabs-AI/flux-RealismLora",
191
- ]# + loaded_loras # add loras loaded from file
192
-
193
- backup_loras(loras_list_custom)
194
 
195
  # Creating Gradio interface
196
  with gr.Blocks(css=css2, theme=IndonesiaTheme()) as WallpaperFluxMaker:
197
- # Displaying the application title
198
  gr.HTML('<div id="banner">✨ Flux MultiMode Generator + Upscaler ✨</div>')
199
 
200
  with gr.Column(elem_id="col-container"):
201
- # Output section (replacing ImageSlider with gr.Gallery)
202
  with gr.Row():
203
- output_res = gr.Gallery(label="⚡ Flux / Upscaled Image ⚡", elem_id="output-res", columns=2, height="auto")
 
 
 
 
 
204
 
205
- # User input section split into two columns
206
  with gr.Row():
207
- # Column 1: Input prompt, LoRA, and base model
208
  with gr.Column(scale=1, elem_id="col-left"):
209
  prompt = gr.Textbox(
210
  label="📜 Description",
211
- placeholder="Tuliskan prompt Anda dalam bahasa apapun, yang akan langsung diterjemahkan ke bahasa Inggris.",
212
  elem_id="textbox-prompt"
213
  )
214
 
@@ -218,32 +118,38 @@ with gr.Blocks(css=css2, theme=IndonesiaTheme()) as WallpaperFluxMaker:
218
  value=base_models[0]
219
  )
220
 
 
221
  lora_model_choice = gr.Dropdown(
222
- label="🎨 Select LoRA",
223
- choices=loras_list_custom,
224
- value=loras_list_custom[0]
225
  )
226
 
227
- process_lora = gr.Checkbox(label="🎨 Aktifkan LoRA")
228
- process_upscale = gr.Checkbox(label="🔍 Aktifkan Peningkatan Resolusi")
229
- upscale_factor = gr.Radio(label="🔍 Faktor Peningkatan Resolusi", choices=[2, 4, 8], value=2)
 
 
 
 
230
 
231
- # Column 2: Advanced options (always open)
232
  with gr.Column(scale=1, elem_id="col-right"):
233
- with gr.Accordion(label="⚙️ Opsi Lanjutan", open=True):
234
- width = gr.Slider(label="Lebar", minimum=512, maximum=1280, step=8, value=1280)
235
- height = gr.Slider(label="Tinggi", minimum=512, maximum=1280, step=8, value=768)
236
- scales = gr.Slider(label="Skala", minimum=1, maximum=20, step=1, value=8)
237
- steps = gr.Slider(label="Langkah", minimum=1, maximum=100, step=1, value=8)
238
  seed = gr.Number(label="Seed", value=-1)
239
 
240
- # Button to generate image
241
- btn = gr.Button("🚀 Buat Gambar", elem_id="generate-btn")
242
-
243
- # Running the `gen` function when "Generate" button is pressed
244
- btn.click(fn=gen, inputs=[
245
- prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora
246
- ], outputs=output_res)
 
 
 
247
 
248
- # Launching the Gradio app
249
  WallpaperFluxMaker.queue(api_open=True).launch(show_api=True)
 
6
  from translatepy import Translator
7
  from gradio_client import Client, handle_file
8
  from PIL import Image
9
+ from pathlib import Path
 
 
 
 
 
 
 
 
10
  MAX_SEED = np.iinfo(np.int32).max
11
 
 
12
  HF_TOKEN = os.getenv('HF_TOKEN')
13
  HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN')
14
 
 
 
 
 
 
 
15
  def load_local_loras(lora_directory="lora_models"):
16
+ """Load loras from local safetensor files"""
 
 
 
17
  loras_list_custom = []
18
 
 
19
  if not os.path.exists(lora_directory):
20
  print(f"[-] Warning: Lora directory {lora_directory} not found")
21
  return []
22
 
 
23
  lora_files = list(Path(lora_directory).glob("*.safetensors"))
24
 
25
  for lora_file in lora_files:
 
26
  lora_name = lora_file.stem
 
 
27
  lora_path = str(lora_file.absolute())
28
+ loras_list_custom.append({
 
 
29
  "name": lora_name,
30
+ "path": lora_path
31
+ })
32
+
33
+ print(f"[-] Loaded {len(loras_list_custom)} local loras")
 
34
  return loras_list_custom
35
 
 
 
 
 
 
36
  # Function to enable LoRA if selected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def enable_lora(lora_path, basemodel):
38
  print(f"[-] Determining model: LoRA {'enabled' if lora_path else 'disabled'}, base model: {basemodel}")
39
  if not lora_path:
40
  return basemodel
41
 
42
+ # Configure model with local lora
43
  return {
44
+ "model": basemodel,
45
+ "lora_weights": lora_path,
46
+ "lora_scale": 0.75 # Adjust this value as needed
 
 
47
  }
48
 
 
 
 
 
 
 
49
  # Function to generate image
50
+ async def generate_image(prompt, model_config, lora_word, width, height, scales, steps, seed):
51
  try:
52
  if seed == -1:
53
  seed = random.randint(0, MAX_SEED)
54
  seed = int(seed)
55
 
56
+ print(f"[-] Translating prompt: {prompt}")
57
  text = str(Translator().translate(prompt, 'English')) + "," + lora_word
58
 
59
+ print(f"[-] Generating image with prompt: {text}")
60
+ client = AsyncInferenceClient(token=HF_TOKEN)
61
+
62
+ # Handle both simple model string and lora config
63
+ if isinstance(model_config, dict):
64
+ print(f"[-] Using model with LoRA: {model_config}")
65
+ image = await client.text_to_image(
66
+ prompt=text,
67
+ height=height,
68
+ width=width,
69
+ guidance_scale=scales,
70
+ num_inference_steps=steps,
71
+ model=model_config["model"],
72
+ lora_weights=model_config["lora_weights"],
73
+ lora_scale=model_config["lora_scale"]
74
+ )
75
+ else:
76
+ print(f"[-] Using base model: {model_config}")
77
+ image = await client.text_to_image(
78
+ prompt=text,
79
+ height=height,
80
+ width=width,
81
+ guidance_scale=scales,
82
+ num_inference_steps=steps,
83
+ model=model_config
84
+ )
85
+
86
  return image, seed
87
  except Exception as e:
88
  print(f"[-] Error generating image: {e}")
89
  return None, None
90
 
91
+ # Load local loras
92
+ local_loras = load_local_loras()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  # Creating Gradio interface
95
  with gr.Blocks(css=css2, theme=IndonesiaTheme()) as WallpaperFluxMaker:
 
96
  gr.HTML('<div id="banner">✨ Flux MultiMode Generator + Upscaler ✨</div>')
97
 
98
  with gr.Column(elem_id="col-container"):
 
99
  with gr.Row():
100
+ output_res = gr.Gallery(
101
+ label="⚡ Flux / Upscaled Image ⚡",
102
+ elem_id="output-res",
103
+ columns=2,
104
+ height="auto"
105
+ )
106
 
 
107
  with gr.Row():
 
108
  with gr.Column(scale=1, elem_id="col-left"):
109
  prompt = gr.Textbox(
110
  label="📜 Description",
111
+ placeholder="Write your prompt in any language, it will be translated to English.",
112
  elem_id="textbox-prompt"
113
  )
114
 
 
118
  value=base_models[0]
119
  )
120
 
121
+ # Updated to use local lora paths
122
  lora_model_choice = gr.Dropdown(
123
+ label="🎨 Select LoRA",
124
+ choices=[lora["path"] for lora in local_loras],
125
+ value=local_loras[0]["path"] if local_loras else None
126
  )
127
 
128
+ process_lora = gr.Checkbox(label="🎨 Enable LoRA")
129
+ process_upscale = gr.Checkbox(label="🔍 Enable Upscaling")
130
+ upscale_factor = gr.Radio(
131
+ label="🔍 Upscale Factor",
132
+ choices=[2, 4, 8],
133
+ value=2
134
+ )
135
 
 
136
  with gr.Column(scale=1, elem_id="col-right"):
137
+ with gr.Accordion(label="⚙️ Advanced Options", open=True):
138
+ width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=1280)
139
+ height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=768)
140
+ scales = gr.Slider(label="Scale", minimum=1, maximum=20, step=1, value=8)
141
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=8)
142
  seed = gr.Number(label="Seed", value=-1)
143
 
144
+ btn = gr.Button("🚀 Generate Image", elem_id="generate-btn")
145
+
146
+ btn.click(
147
+ fn=gen,
148
+ inputs=[
149
+ prompt, basemodel_choice, width, height, scales, steps, seed,
150
+ upscale_factor, process_upscale, lora_model_choice, process_lora
151
+ ],
152
+ outputs=output_res
153
+ )
154
 
 
155
  WallpaperFluxMaker.queue(api_open=True).launch(show_api=True)