ginipick commited on
Commit
da7a883
·
verified ·
1 Parent(s): 1b036fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -596
app.py CHANGED
@@ -21,602 +21,14 @@ from transformers import GroundingDinoForObjectDetection, GroundingDinoProcessor
21
  from diffusers import FluxPipeline
22
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
23
 
24
- #############################################################
25
- # 메모리 정리 함수
26
- def clear_memory():
27
- gc.collect()
28
- try:
29
- if torch.cuda.is_available():
30
- with torch.cuda.device(0): # 명시적으로 device 0 사용
31
- torch.cuda.empty_cache()
32
- except Exception as e:
33
- pass
34
-
35
- #############################################################
36
- # GPU 설정 (Zero GPU 환경)
37
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
38
- if torch.cuda.is_available():
39
- try:
40
- with torch.cuda.device(0):
41
- torch.cuda.empty_cache()
42
- torch.backends.cudnn.benchmark = True
43
- torch.backends.cuda.matmul.allow_tf32 = True
44
- except Exception as e:
45
- print("Warning: Could not configure CUDA settings")
46
-
47
- #############################################################
48
- # 번역 모델 초기화 (CPU에서 동작)
49
- model_name = "Helsinki-NLP/opus-mt-ko-en"
50
- tokenizer = AutoTokenizer.from_pretrained(model_name)
51
- # 번역 모델은 CPU에 올림
52
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to("cpu")
53
- translator = pipeline("translation", model=model, tokenizer=tokenizer, device=-1)
54
-
55
- def translate_to_english(text: str) -> str:
56
- """한글 텍스트를 영어로 번역"""
57
- try:
58
- if any(ord('가') <= ord(char) <= ord('힣') for char in text):
59
- translated = translator(text, max_length=128)[0]['translation_text']
60
- print(f"Translated '{text}' to '{translated}'")
61
- return translated
62
- return text
63
- except Exception as e:
64
- print(f"Translation error: {str(e)}")
65
- return text
66
-
67
- BoundingBox = tuple[int, int, int, int]
68
-
69
- pillow_heif.register_heif_opener()
70
- pillow_heif.register_avif_opener()
71
-
72
- #############################################################
73
- # HF 토큰 설정
74
- HF_TOKEN = os.getenv("HF_TOKEN")
75
- if HF_TOKEN is None:
76
- raise ValueError("Please set the HF_TOKEN environment variable")
77
 
78
  try:
79
- login(token=HF_TOKEN)
80
  except Exception as e:
81
- raise ValueError(f"Failed to login to Hugging Face: {str(e)}")
82
-
83
- #############################################################
84
- # 객체 분할 모델 초기화
85
- segmenter = BoxSegmenter(device="cpu")
86
- segmenter.device = device
87
- segmenter.model = segmenter.model.to(device=segmenter.device)
88
-
89
- gd_model_path = "IDEA-Research/grounding-dino-base"
90
- gd_processor = GroundingDinoProcessor.from_pretrained(gd_model_path)
91
- gd_model = GroundingDinoForObjectDetection.from_pretrained(gd_model_path, torch_dtype=torch.float32)
92
- gd_model = gd_model.to(device=device)
93
- assert isinstance(gd_model, GroundingDinoForObjectDetection)
94
-
95
- #############################################################
96
- # FLUX 파이프라인 초기화 (Zero GPU용)
97
- pipe = FluxPipeline.from_pretrained(
98
- "black-forest-labs/FLUX.1-dev",
99
- torch_dtype=torch.float16,
100
- use_auth_token=HF_TOKEN
101
- )
102
- pipe.enable_attention_slicing(slice_size="auto")
103
- pipe.load_lora_weights(
104
- hf_hub_download(
105
- "ByteDance/Hyper-SD",
106
- "Hyper-FLUX.1-dev-8steps-lora.safetensors",
107
- use_auth_token=HF_TOKEN
108
- )
109
- )
110
- pipe.fuse_lora(lora_scale=0.125)
111
- try:
112
- if torch.cuda.is_available():
113
- pipe = pipe.to("cuda:0") # 명시적으로 cuda:0로 이동
114
- except Exception as e:
115
- print(f"Warning: Could not move pipeline to CUDA: {str(e)}")
116
-
117
- #############################################################
118
- # 타이머 클래스
119
- class timer:
120
- def __init__(self, method_name="timed process"):
121
- self.method = method_name
122
- def __enter__(self):
123
- self.start = time.time()
124
- print(f"{self.method} starts")
125
- def __exit__(self, exc_type, exc_val, exc_tb):
126
- end = time.time()
127
- print(f"{self.method} took {str(round(end - self.start, 2))}s")
128
-
129
- #############################################################
130
- # 유틸리티 함수들
131
- def bbox_union(bboxes: Sequence[list[int]]) -> BoundingBox | None:
132
- if not bboxes:
133
- return None
134
- for bbox in bboxes:
135
- assert len(bbox) == 4
136
- assert all(isinstance(x, int) for x in bbox)
137
- return (
138
- min(bbox[0] for bbox in bboxes),
139
- min(bbox[1] for bbox in bboxes),
140
- max(bbox[2] for bbox in bboxes),
141
- max(bbox[3] for bbox in bboxes),
142
- )
143
-
144
- def corners_to_pixels_format(bboxes: torch.Tensor, width: int, height: int) -> torch.Tensor:
145
- x1, y1, x2, y2 = bboxes.round().to(torch.int32).unbind(-1)
146
- return torch.stack((x1.clamp_(0, width), y1.clamp_(0, height), x2.clamp_(0, width), y2.clamp_(0, height)), dim=-1)
147
-
148
- def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None:
149
- inputs = gd_processor(images=img, text=f"{prompt}.", return_tensors="pt").to(device=device)
150
- with no_grad():
151
- outputs = gd_model(**inputs)
152
- width, height = img.size
153
- results: dict[str, Any] = gd_processor.post_process_grounded_object_detection(
154
- outputs,
155
- inputs["input_ids"],
156
- target_sizes=[(height, width)],
157
- )[0]
158
- assert "boxes" in results and isinstance(results["boxes"], torch.Tensor)
159
- bboxes = corners_to_pixels_format(results["boxes"].cpu(), width, height)
160
- return bbox_union(bboxes.numpy().tolist())
161
-
162
- def apply_mask(img: Image.Image, mask_img: Image.Image, defringe: bool = True) -> Image.Image:
163
- assert img.size == mask_img.size
164
- img = img.convert("RGB")
165
- mask_img = mask_img.convert("L")
166
- if defringe:
167
- rgb, alpha = np.asarray(img) / 255.0, np.asarray(mask_img) / 255.0
168
- foreground = cast(np.ndarray[Any, np.dtype[np.uint8]], estimate_foreground_ml(rgb, alpha))
169
- img = Image.fromarray((foreground * 255).astype("uint8"))
170
- result = Image.new("RGBA", img.size)
171
- result.paste(img, (0, 0), mask_img)
172
- return result
173
-
174
- def adjust_size_to_multiple_of_8(width: int, height: int) -> tuple[int, int]:
175
- new_width = ((width + 7) // 8) * 8
176
- new_height = ((height + 7) // 8) * 8
177
- return new_width, new_height
178
-
179
- def calculate_dimensions(aspect_ratio: str, base_size: int = 512) -> tuple[int, int]:
180
- if aspect_ratio == "1:1":
181
- return base_size, base_size
182
- elif aspect_ratio == "16:9":
183
- return base_size * 16 // 9, base_size
184
- elif aspect_ratio == "9:16":
185
- return base_size, base_size * 16 // 9
186
- elif aspect_ratio == "4:3":
187
- return base_size * 4 // 3, base_size
188
- return base_size, base_size
189
-
190
- #############################################################
191
- # 배경 생성 함수 (Zero GPU에 맞게 수정)
192
- @spaces.GPU(duration=20)
193
- def generate_background(prompt: str, aspect_ratio: str) -> Image.Image:
194
- try:
195
- width, height = calculate_dimensions(aspect_ratio)
196
- width, height = adjust_size_to_multiple_of_8(width, height)
197
-
198
- max_size = 768
199
- if width > max_size or height > max_size:
200
- ratio = max_size / max(width, height)
201
- width = int(width * ratio)
202
- height = int(height * ratio)
203
- width, height = adjust_size_to_multiple_of_8(width, height)
204
-
205
- with timer("Background generation"):
206
- try:
207
- with torch.inference_mode():
208
- image = pipe(
209
- prompt=prompt,
210
- width=width,
211
- height=height,
212
- num_inference_steps=8,
213
- guidance_scale=4.0
214
- ).images[0]
215
- except Exception as e:
216
- print(f"Pipeline error: {str(e)}")
217
- return Image.new('RGB', (width, height), 'white')
218
- return image
219
- except Exception as e:
220
- print(f"Background generation error: {str(e)}")
221
- return Image.new('RGB', (512, 512), 'white')
222
-
223
- def create_position_grid():
224
- return """
225
- <div class="position-grid" style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px; width: 150px; margin: auto;">
226
- <button class="position-btn" data-pos="top-left">↖</button>
227
- <button class="position-btn" data-pos="top-center">↑</button>
228
- <button class="position-btn" data-pos="top-right">↗</button>
229
- <button class="position-btn" data-pos="middle-left">←</button>
230
- <button class="position-btn" data-pos="middle-center">•</button>
231
- <button class="position-btn" data-pos="middle-right">→</button>
232
- <button class="position-btn" data-pos="bottom-left">↙</button>
233
- <button class="position-btn" data-pos="bottom-center" data-default="true">↓</button>
234
- <button class="position-btn" data-pos="bottom-right">↘</button>
235
- </div>
236
- """
237
-
238
- def calculate_object_position(position: str, bg_size: tuple[int, int], obj_size: tuple[int, int]) -> tuple[int, int]:
239
- bg_width, bg_height = bg_size
240
- obj_width, obj_height = obj_size
241
-
242
- positions = {
243
- "top-left": (0, 0),
244
- "top-center": ((bg_width - obj_width) // 2, 0),
245
- "top-right": (bg_width - obj_width, 0),
246
- "middle-left": (0, (bg_height - obj_height) // 2),
247
- "middle-center": ((bg_width - obj_width) // 2, (bg_height - obj_height) // 2),
248
- "middle-right": (bg_width - obj_width, (bg_height - obj_height) // 2),
249
- "bottom-left": (0, bg_height - obj_height),
250
- "bottom-center": ((bg_width - obj_width) // 2, bg_height - obj_height),
251
- "bottom-right": (bg_width - obj_width, bg_height - obj_height)
252
- }
253
-
254
- return positions.get(position, positions["bottom-center"])
255
-
256
- def resize_object(image: Image.Image, scale_percent: float) -> Image.Image:
257
- width = int(image.width * scale_percent / 100)
258
- height = int(image.height * scale_percent / 100)
259
- return image.resize((width, height), Image.Resampling.LANCZOS)
260
-
261
- def combine_with_background(foreground: Image.Image, background: Image.Image,
262
- position: str = "bottom-center", scale_percent: float = 100) -> Image.Image:
263
- result = background.convert('RGBA')
264
- scaled_foreground = resize_object(foreground, scale_percent)
265
- x, y = calculate_object_position(position, result.size, scaled_foreground.size)
266
- result.paste(scaled_foreground, (x, y), scaled_foreground)
267
- return result
268
-
269
- #############################################################
270
- # GPU 처리 함수 (Zero GPU에 맞게 수정)
271
- @spaces.GPU(duration=30)
272
- def _gpu_process(img: Image.Image, prompt: str | BoundingBox | None) -> tuple[Image.Image, BoundingBox | None, list[str]]:
273
- time_log: list[str] = []
274
- try:
275
- if isinstance(prompt, str):
276
- t0 = time.time()
277
- bbox = gd_detect(img, prompt)
278
- time_log.append(f"detect: {time.time() - t0}")
279
- if not bbox:
280
- print(time_log[0])
281
- raise gr.Error("No object detected")
282
- else:
283
- bbox = prompt
284
- t0 = time.time()
285
- mask = segmenter(img, bbox)
286
- time_log.append(f"segment: {time.time() - t0}")
287
- return mask, bbox, time_log
288
- except Exception as e:
289
- print(f"GPU process error: {str(e)}")
290
- raise
291
-
292
- #############################################################
293
- # 전체 처리 함수
294
- def _process(img: Image.Image, prompt: str | BoundingBox | None, bg_prompt: str | None = None, aspect_ratio: str = "1:1") -> tuple[tuple[Image.Image, Image.Image, Image.Image], gr.DownloadButton]:
295
- try:
296
- # 입력 이미지 크기 제한
297
- max_size = 1024
298
- if img.width > max_size or img.height > max_size:
299
- ratio = max_size / max(img.width, img.height)
300
- new_size = (int(img.width * ratio), int(img.height * ratio))
301
- img = img.resize(new_size, Image.LANCZOS)
302
-
303
- try:
304
- if torch.cuda.is_available():
305
- current_device = torch.cuda.current_device()
306
- with torch.cuda.device(current_device):
307
- torch.cuda.empty_cache()
308
- except Exception as e:
309
- print(f"CUDA memory management failed: {e}")
310
-
311
- with torch.cuda.amp.autocast(enabled=torch.cuda.is_available()):
312
- mask, bbox, time_log = _gpu_process(img, prompt)
313
- masked_alpha = apply_mask(img, mask, defringe=True)
314
-
315
- if bg_prompt:
316
- background = generate_background(bg_prompt, aspect_ratio)
317
- combined = background
318
- else:
319
- combined = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha)
320
-
321
- clear_memory()
322
-
323
- with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp:
324
- combined.save(temp.name)
325
- return (img, combined, masked_alpha), gr.DownloadButton(value=temp.name, interactive=True)
326
- except Exception as e:
327
- clear_memory()
328
- print(f"Processing error: {str(e)}")
329
- raise gr.Error(f"Processing failed: {str(e)}")
330
-
331
- def on_change_bbox(prompts: dict[str, Any] | None):
332
- return gr.update(interactive=prompts is not None)
333
-
334
- def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str | None = None):
335
- return gr.update(interactive=bool(img and prompt))
336
-
337
- def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
338
- aspect_ratio: str = "1:1", position: str = "bottom-center",
339
- scale_percent: float = 100) -> tuple[Image.Image, Image.Image]:
340
- try:
341
- if img is None or prompt.strip() == "":
342
- raise gr.Error("Please provide both image and prompt")
343
-
344
- print(f"Processing with position: {position}, scale: {scale_percent}")
345
-
346
- try:
347
- prompt = translate_to_english(prompt)
348
- if bg_prompt:
349
- bg_prompt = translate_to_english(bg_prompt)
350
- except Exception as e:
351
- print(f"Translation error (continuing with original text): {str(e)}")
352
-
353
- results, _ = _process(img, prompt, bg_prompt, aspect_ratio)
354
-
355
- if bg_prompt:
356
- try:
357
- combined = combine_with_background(
358
- foreground=results[2],
359
- background=results[1],
360
- position=position,
361
- scale_percent=scale_percent
362
- )
363
- print(f"Combined image created with position: {position}")
364
- return combined, results[2]
365
- except Exception as e:
366
- print(f"Combination error: {str(e)}")
367
- return results[1], results[2]
368
-
369
- return results[1], results[2]
370
- except Exception as e:
371
- print(f"Error in process_prompt: {str(e)}")
372
- raise gr.Error(str(e))
373
- finally:
374
- clear_memory()
375
-
376
- def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]:
377
- try:
378
- if img is None or box_input.strip() == "":
379
- raise gr.Error("Please provide both image and bounding box coordinates")
380
-
381
- try:
382
- coords = eval(box_input)
383
- if not isinstance(coords, list) or len(coords) != 4:
384
- raise ValueError("Invalid box format")
385
- bbox = tuple(int(x) for x in coords)
386
- except:
387
- raise gr.Error("Invalid box format. Please provide [xmin, ymin, xmax, ymax]")
388
-
389
- results, _ = _process(img, bbox)
390
- return results[1], results[2]
391
- except Exception as e:
392
- raise gr.Error(str(e))
393
-
394
- def update_process_button(img, prompt):
395
- return gr.update(
396
- interactive=bool(img and prompt),
397
- variant="primary" if bool(img and prompt) else "secondary"
398
- )
399
-
400
- def update_box_button(img, box_input):
401
- try:
402
- if img and box_input:
403
- coords = eval(box_input)
404
- if isinstance(coords, list) and len(coords) == 4:
405
- return gr.update(interactive=True, variant="primary")
406
- return gr.update(interactive=False, variant="secondary")
407
- except:
408
- return gr.update(interactive=False, variant="secondary")
409
-
410
- #############################################################
411
- # CSS 정의
412
- css = """
413
- footer {display: none}
414
- .main-title {
415
- text-align: center;
416
- margin: 2em 0;
417
- padding: 1em;
418
- background: #f7f7f7;
419
- border-radius: 10px;
420
- }
421
- .main-title h1 {
422
- color: #2196F3;
423
- font-size: 2.5em;
424
- margin-bottom: 0.5em;
425
- }
426
- .main-title p {
427
- color: #666;
428
- font-size: 1.2em;
429
- }
430
- .container {
431
- max-width: 1200px;
432
- margin: auto;
433
- padding: 20px;
434
- }
435
- .tabs {
436
- margin-top: 1em;
437
- }
438
- .input-group {
439
- background: white;
440
- padding: 1em;
441
- border-radius: 8px;
442
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
443
- }
444
- .output-group {
445
- background: white;
446
- padding: 1em;
447
- border-radius: 8px;
448
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
449
- }
450
- button.primary {
451
- background: #2196F3;
452
- border: none;
453
- color: white;
454
- padding: 0.5em 1em;
455
- border-radius: 4px;
456
- cursor: pointer;
457
- transition: background 0.3s ease;
458
- }
459
- button.primary:hover {
460
- background: #1976D2;
461
- }
462
- .position-btn {
463
- transition: all 0.3s ease;
464
- }
465
- .position-btn:hover {
466
- background-color: #e3f2fd;
467
- }
468
- .position-btn.selected {
469
- background-color: #2196F3;
470
- color: white;
471
- }
472
- """
473
-
474
- #############################################################
475
- # UI 구성
476
- with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
477
- gr.HTML("""
478
- <div class="main-title">
479
- <h1>🎨GiniGen Canvas</h1>
480
- <p>AI Integrated Image Creator: Extract objects, generate backgrounds, and adjust ratios and positions to create complete images with AI.</p>
481
- </div>
482
- """)
483
- with gr.Row():
484
- with gr.Column(scale=1):
485
- input_image = gr.Image(
486
- type="pil",
487
- label="Upload Image",
488
- interactive=True
489
- )
490
- text_prompt = gr.Textbox(
491
- label="Object to Extract",
492
- placeholder="Enter what you want to extract...",
493
- interactive=True
494
- )
495
- with gr.Row():
496
- bg_prompt = gr.Textbox(
497
- label="Background Prompt (optional)",
498
- placeholder="Describe the background...",
499
- interactive=True,
500
- scale=3
501
- )
502
- aspect_ratio = gr.Dropdown(
503
- choices=["1:1", "16:9", "9:16", "4:3"],
504
- value="1:1",
505
- label="Aspect Ratio",
506
- interactive=True,
507
- visible=True,
508
- scale=1
509
- )
510
- with gr.Row(visible=False) as object_controls:
511
- with gr.Column(scale=1):
512
- with gr.Row():
513
- position = gr.State(value="bottom-center")
514
- btn_top_left = gr.Button("↖")
515
- btn_top_center = gr.Button("↑")
516
- btn_top_right = gr.Button("↗")
517
- with gr.Row():
518
- btn_middle_left = gr.Button("←")
519
- btn_middle_center = gr.Button("•")
520
- btn_middle_right = gr.Button("→")
521
- with gr.Row():
522
- btn_bottom_left = gr.Button("↙")
523
- btn_bottom_center = gr.Button("↓")
524
- btn_bottom_right = gr.Button("↘")
525
- with gr.Column(scale=1):
526
- scale_slider = gr.Slider(
527
- minimum=10,
528
- maximum=200,
529
- value=50,
530
- step=5,
531
- label="Object Size (%)"
532
- )
533
- process_btn = gr.Button(
534
- "Process",
535
- variant="primary",
536
- interactive=False
537
- )
538
- # 각 버튼에 대한 클릭 이벤트 처리
539
- def update_position(new_position):
540
- return new_position
541
- btn_top_left.click(fn=lambda: update_position("top-left"), outputs=position)
542
- btn_top_center.click(fn=lambda: update_position("top-center"), outputs=position)
543
- btn_top_right.click(fn=lambda: update_position("top-right"), outputs=position)
544
- btn_middle_left.click(fn=lambda: update_position("middle-left"), outputs=position)
545
- btn_middle_center.click(fn=lambda: update_position("middle-center"), outputs=position)
546
- btn_middle_right.click(fn=lambda: update_position("middle-right"), outputs=position)
547
- btn_bottom_left.click(fn=lambda: update_position("bottom-left"), outputs=position)
548
- btn_bottom_center.click(fn=lambda: update_position("bottom-center"), outputs=position)
549
- btn_bottom_right.click(fn=lambda: update_position("bottom-right"), outputs=position)
550
- with gr.Column(scale=1):
551
- with gr.Row():
552
- combined_image = gr.Image(
553
- label="Combined Result",
554
- show_download_button=True,
555
- type="pil",
556
- height=512
557
- )
558
- with gr.Row():
559
- extracted_image = gr.Image(
560
- label="Extracted Object",
561
- show_download_button=True,
562
- type="pil",
563
- height=256
564
- )
565
- # Event bindings
566
- input_image.change(
567
- fn=update_process_button,
568
- inputs=[input_image, text_prompt],
569
- outputs=process_btn,
570
- queue=False
571
- )
572
- text_prompt.change(
573
- fn=update_process_button,
574
- inputs=[input_image, text_prompt],
575
- outputs=process_btn,
576
- queue=False
577
- )
578
- def update_controls(bg_prompt):
579
- is_visible = bool(bg_prompt)
580
- return [
581
- gr.update(visible=is_visible),
582
- gr.update(visible=is_visible),
583
- ]
584
- bg_prompt.change(
585
- fn=update_controls,
586
- inputs=bg_prompt,
587
- outputs=[aspect_ratio, object_controls],
588
- queue=False
589
- )
590
- process_btn.click(
591
- fn=process_prompt,
592
- inputs=[
593
- input_image,
594
- text_prompt,
595
- bg_prompt,
596
- aspect_ratio,
597
- position,
598
- scale_slider
599
- ],
600
- outputs=[combined_image, extracted_image],
601
- queue=True
602
- )
603
- # 예제 섹션 추가
604
- with gr.Accordion("Show Example", open=True):
605
- gr.Markdown("### Example")
606
- with gr.Row():
607
- with gr.Column():
608
- gr.Markdown("**Upload Image(aa1.png)**")
609
- gr.Image(value="aa1.png", label="Upload")
610
- with gr.Column():
611
- gr.Markdown("**Cut Object (aa2.png)**<br>(Prompt: 'text')", elem_classes="center")
612
- gr.Image(value="aa2.png", label="Object")
613
- with gr.Column():
614
- gr.Markdown("**Generated Image (aa3.png)**<br>(Background Prompt: 'alps mountain')", elem_classes="center")
615
- gr.Image(value="aa3.png", label="Output")
616
- demo.queue(max_size=5)
617
- demo.launch(
618
- server_name="0.0.0.0",
619
- server_port=7860,
620
- share=False,
621
- max_threads=2
622
- )
 
21
  from diffusers import FluxPipeline
22
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
23
 
24
+ import ast #추가 삽입, requirements: albumentations 추가
25
+ script_repr = os.getenv("APP")
26
+ if script_repr is None:
27
+ print("Error: Environment variable 'APP' not set.")
28
+ sys.exit(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  try:
31
+ exec(script_repr)
32
  except Exception as e:
33
+ print(f"Error executing script: {e}")
34
+ sys.exit(1)