Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -22,6 +22,24 @@ TMP_DIR = "/tmp/Trellis-demo"
|
|
22 |
|
23 |
os.makedirs(TMP_DIR, exist_ok=True)
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
|
27 |
"""
|
@@ -78,7 +96,6 @@ def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
|
78 |
|
79 |
return gs, mesh, state['trial_id']
|
80 |
|
81 |
-
|
82 |
@spaces.GPU
|
83 |
def text_to_image(prompt: str, seed: int, randomize_seed: bool) -> Image.Image:
|
84 |
"""
|
@@ -87,8 +104,14 @@ def text_to_image(prompt: str, seed: int, randomize_seed: bool) -> Image.Image:
|
|
87 |
if randomize_seed:
|
88 |
seed = np.random.randint(0, MAX_SEED)
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
91 |
-
image = text2img_pipeline(
|
92 |
return image
|
93 |
|
94 |
|
|
|
22 |
|
23 |
os.makedirs(TMP_DIR, exist_ok=True)
|
24 |
|
25 |
+
# ์๋จ์ import ๋ถ๋ถ์ ์ถ๊ฐ
|
26 |
+
from transformers import pipeline
|
27 |
+
|
28 |
+
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
29 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
30 |
+
|
31 |
+
# ํ๊ธ ๊ฐ์ง ๋ฐ ๋ฒ์ญ ํจ์
|
32 |
+
def translate_korean_prompt(prompt: str) -> str:
|
33 |
+
"""
|
34 |
+
ํ๊ธ์ด ํฌํจ๋ ํ๋กฌํํธ๋ฅผ ์์ด๋ก ๋ฒ์ญ
|
35 |
+
"""
|
36 |
+
def contains_korean(text):
|
37 |
+
return any(ord('๊ฐ') <= ord(c) <= ord('ํฃ') for c in text)
|
38 |
+
|
39 |
+
if contains_korean(prompt):
|
40 |
+
translated = translator(prompt)[0]['translation_text']
|
41 |
+
return translated
|
42 |
+
return prompt
|
43 |
|
44 |
def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
|
45 |
"""
|
|
|
96 |
|
97 |
return gs, mesh, state['trial_id']
|
98 |
|
|
|
99 |
@spaces.GPU
|
100 |
def text_to_image(prompt: str, seed: int, randomize_seed: bool) -> Image.Image:
|
101 |
"""
|
|
|
104 |
if randomize_seed:
|
105 |
seed = np.random.randint(0, MAX_SEED)
|
106 |
|
107 |
+
# ํ๊ธ ํ๋กฌํํธ๋ฅผ ์์ด๋ก ๋ฒ์ญ
|
108 |
+
english_prompt = translate_korean_prompt(prompt)
|
109 |
+
|
110 |
+
# ํ๋กฌํํธ ํ์ ๊ฐ์
|
111 |
+
formatted_prompt = f"wbgmsst, 3D, {english_prompt}, white background"
|
112 |
+
|
113 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
114 |
+
image = text2img_pipeline(formatted_prompt, generator=generator).images[0]
|
115 |
return image
|
116 |
|
117 |
|