Update app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
13 |
|
14 |
HF_TOKEN = os.getenv('HF_TOKEN_UPSCALER')
|
15 |
HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN_UPSCALER')
|
|
|
16 |
loaded_loras=[]
|
17 |
for lora in loras:
|
18 |
print(lora.get('repo'))
|
@@ -23,6 +24,34 @@ def enable_lora(lora_add, basemodel):
|
|
23 |
print(f"[-] Menentukan model: LoRA {'diaktifkan' if lora_add else 'tidak diaktifkan'}, model dasar: {basemodel}")
|
24 |
return basemodel if not lora_add else lora_add
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
# Function to generate image
|
27 |
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
|
28 |
try:
|
@@ -31,7 +60,7 @@ async def generate_image(prompt, model, lora_word, width, height, scales, steps,
|
|
31 |
seed = int(seed)
|
32 |
|
33 |
print(f"[-] Menerjemahkan prompt: {prompt}")
|
34 |
-
text = str(Translator().translate(prompt, 'English')) + "," + lora_word
|
35 |
|
36 |
print(f"[-] Generating image with prompt: {text}, model: {model}")
|
37 |
client = AsyncInferenceClient()
|
|
|
13 |
|
14 |
HF_TOKEN = os.getenv('HF_TOKEN_UPSCALER')
|
15 |
HF_TOKEN_UPSCALER = os.getenv('HF_TOKEN_UPSCALER')
|
16 |
+
qwen_client = Client("K00B404/HugChatWrap",hf_token=HF_TOKEN)
|
17 |
loaded_loras=[]
|
18 |
for lora in loras:
|
19 |
print(lora.get('repo'))
|
|
|
24 |
print(f"[-] Menentukan model: LoRA {'diaktifkan' if lora_add else 'tidak diaktifkan'}, model dasar: {basemodel}")
|
25 |
return basemodel if not lora_add else lora_add
|
26 |
|
27 |
+
def generate_character_description(character_prompt):
|
28 |
+
"""Generate detailed character description using Qwen"""
|
29 |
+
system_message = """You are a character description generator. Create detailed, vivid descriptions
|
30 |
+
of characters including their physical appearance, personality, and notable features. Keep the
|
31 |
+
description focused on visual elements that could be used for image generation."""
|
32 |
+
|
33 |
+
try:
|
34 |
+
|
35 |
+
result = qwen_client.predict(
|
36 |
+
message=character_prompt,
|
37 |
+
param_2=system_message,
|
38 |
+
param_3=128,
|
39 |
+
param_4=0.7,
|
40 |
+
param_5=0.95,
|
41 |
+
api_name="/chat"
|
42 |
+
)
|
43 |
+
'''result = qwen_client.predict(
|
44 |
+
message=character_prompt,
|
45 |
+
system_message=system_message,
|
46 |
+
max_tokens=512,
|
47 |
+
temperature=0.7,
|
48 |
+
top_p=0.95,
|
49 |
+
api_name="/chat"
|
50 |
+
)'''
|
51 |
+
return result
|
52 |
+
except Exception as e:
|
53 |
+
return f"Error generating description: {str(e)}"
|
54 |
+
|
55 |
# Function to generate image
|
56 |
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
|
57 |
try:
|
|
|
60 |
seed = int(seed)
|
61 |
|
62 |
print(f"[-] Menerjemahkan prompt: {prompt}")
|
63 |
+
text = generate_character_description(str(Translator().translate(prompt, 'English'))) + "," + lora_word
|
64 |
|
65 |
print(f"[-] Generating image with prompt: {text}, model: {model}")
|
66 |
client = AsyncInferenceClient()
|