Arrcttacsrks commited on
Commit
3ed14cc
·
verified ·
1 Parent(s): fc9c002

Upload app(25).py

Browse files
Files changed (1) hide show
  1. app(25).py +117 -0
app(25).py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ from model import U2NET
5
+ from torch.autograd import Variable
6
+ import numpy as np
7
+ from huggingface_hub import hf_hub_download
8
+ import gradio as gr
9
+
10
+ class PortraitGenerator:
11
+ def __init__(self):
12
+ self.u2net = self.load_u2net_model()
13
+
14
+ def normPRED(self, d):
15
+ return (d - torch.min(d)) / (torch.max(d) - torch.min(d))
16
+
17
+ def inference(self, input_img):
18
+ input_img = input_img / np.max(input_img)
19
+ tmpImg = np.zeros((input_img.shape[0], input_img.shape[1], 3))
20
+ tmpImg[:, :, 0] = (input_img[:, :, 2] - 0.406) / 0.225
21
+ tmpImg[:, :, 1] = (input_img[:, :, 1] - 0.456) / 0.224
22
+ tmpImg[:, :, 2] = (input_img[:, :, 0] - 0.485) / 0.229
23
+ tmpImg = torch.from_numpy(tmpImg.transpose((2, 0, 1))[np.newaxis, :, :, :]).type(torch.FloatTensor)
24
+ tmpImg = Variable(tmpImg.cuda() if torch.cuda.is_available() else tmpImg)
25
+ d1, _, _, _, _, _, _ = self.u2net(tmpImg)
26
+ pred = self.normPRED(1.0 - d1[:, 0, :, :])
27
+ return pred.cpu().data.numpy().squeeze()
28
+
29
+ def adjust_image(self, img, apply_bw, brightness, contrast, saturation, white_balance, hue, highlights_shadows, sharpness, noise_reduction):
30
+ # Convert to grayscale if needed
31
+ if apply_bw:
32
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
33
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
34
+
35
+ # Adjust brightness and contrast
36
+ img = cv2.convertScaleAbs(img, alpha=contrast / 50.0, beta=brightness - 50)
37
+
38
+ # Adjust saturation
39
+ if saturation != 50:
40
+ hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
41
+ hsv_img[:, :, 1] = np.clip(hsv_img[:, :, 1] * (saturation / 50.0), 0, 255)
42
+ img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
43
+
44
+ # Adjust white balance
45
+ if white_balance != 50:
46
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
47
+ l, a, b = cv2.split(img)
48
+ a = a * (white_balance / 50.0)
49
+ b = b * (white_balance / 50.0)
50
+ img = cv2.merge((l, a, b))
51
+ img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
52
+
53
+ # Adjust hue
54
+ if hue != 50:
55
+ hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
56
+ hsv_img[:, :, 0] = np.clip(hsv_img[:, :, 0] * (hue / 50.0), 0, 180)
57
+ img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
58
+
59
+ # Adjust highlights and shadows
60
+ if highlights_shadows != 50:
61
+ img = cv2.convertScaleAbs(img, alpha=1.0, beta=(highlights_shadows - 50) * 5.1)
62
+
63
+ # Adjust sharpness
64
+ if sharpness != 50:
65
+ kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) * (sharpness / 50.0)
66
+ img = cv2.filter2D(img, -1, kernel)
67
+
68
+ # Reduce noise
69
+ if noise_reduction != 50:
70
+ img = cv2.fastNlMeansDenoisingColored(img, None, noise_reduction / 50.0 * 10, noise_reduction / 50.0 * 10, 7, 21)
71
+
72
+ return img
73
+
74
+ def process_image(self, img, apply_bw, brightness, contrast, saturation, white_balance, hue, highlights_shadows, sharpness, noise_reduction, apply_adjustments, generate_final):
75
+ if not generate_final:
76
+ preview_img = self.adjust_image(img, apply_bw, brightness, contrast, saturation, white_balance, hue, highlights_shadows, sharpness, noise_reduction)
77
+ return preview_img
78
+
79
+ adjusted_img = self.adjust_image(img, apply_bw, brightness, contrast, saturation, white_balance, hue, highlights_shadows, sharpness, noise_reduction)
80
+ result = self.inference(adjusted_img)
81
+ return (result * 255).astype(np.uint8)
82
+
83
+ def load_u2net_model(self):
84
+ model_path = hf_hub_download(repo_id="Arrcttacsrks/U2net", filename="u2net_portrait.pth", use_auth_token=os.getenv("HF_TOKEN"))
85
+ net = U2NET(3, 1)
86
+ net.load_state_dict(torch.load(model_path, map_location="cuda" if torch.cuda.is_available() else "cpu"))
87
+ net.eval()
88
+ return net
89
+
90
+ def main():
91
+ portrait_generator = PortraitGenerator()
92
+
93
+ iface = gr.Interface(
94
+ fn=portrait_generator.process_image,
95
+ inputs=[
96
+ gr.Image(type="numpy", label="Upload your image"),
97
+ gr.Checkbox(label="Black & White Image"),
98
+ gr.Slider(0, 100, value=50, label="Brightness"),
99
+ gr.Slider(0, 100, value=50, label="Contrast"),
100
+ gr.Slider(0, 100, value=50, label="Saturation"),
101
+ gr.Slider(0, 100, value=50, label="White Balance"),
102
+ gr.Slider(0, 100, value=50, label="Hue"),
103
+ gr.Slider(0, 100, value=50, label="Highlights and Shadows"),
104
+ gr.Slider(0, 100, value=50, label="Sharpness"),
105
+ gr.Slider(0, 100, value=50, label="Noise Reduction"),
106
+ gr.Checkbox(label="Apply Adjustments"),
107
+ gr.Checkbox(label="Generate Final Portrait")
108
+ ],
109
+ outputs=gr.Image(type="numpy", label="Preview or Portrait Result"),
110
+ title="Portrait Generation with U2NET",
111
+ description="Upload an image to generate its portrait with optional adjustments. Enable 'Generate Final Portrait' for final output."
112
+ )
113
+
114
+ iface.launch()
115
+
116
+ if __name__ == "__main__":
117
+ main()