|
import spaces |
|
import gradio as gr |
|
import subprocess |
|
from PIL import Image,ImageOps,ImageDraw,ImageFilter |
|
import json |
|
import os |
|
import time |
|
import mp_box |
|
from mp_estimate import ratios_cordinates,estimate_horizontal,estimate_vertical,mean_std_label,normalized_to_pixel,get_feature_angles_cordinate,create_detail_labels,get_feature_ratios_cordinate |
|
from mp_utils import get_pixel_cordinate_list,extract_landmark,get_pixel_cordinate,get_pixel_xyz,get_normalized_landmarks |
|
from glibvision.draw_utils import points_to_box,box_to_xy,plus_point |
|
|
|
|
|
from glibvision.cv2_utils import plot_points,create_color_image,pil_to_bgr_image,set_plot_text,copy_image |
|
from glibvision.numpy_utils import rotate_point_euler,load_data |
|
from gradio_utils import save_image,save_buffer,clear_old_files ,read_file |
|
|
|
import cv2 |
|
|
|
|
|
import numpy as np |
|
from numpy.typing import NDArray |
|
|
|
''' |
|
innner_eyes_blur - inner eyes blur |
|
iris_mask_blur - final iris edge blur |
|
''' |
|
|
|
def process_images(image,base_image,order, |
|
double_check_offset_center,center_index, |
|
draw_mediapipe_mesh,z_multiply=0.8,draw_mediapipe_angle=False,draw_hozizontal_line=False,draw_vertical_line=False,draw_faceratio_line=False, |
|
progress=gr.Progress(track_tqdm=True)): |
|
clear_old_files() |
|
""" |
|
image_indices = [4,199,#6,#center of eye |
|
133,362,#inner eye |
|
33,263, #outer eye |
|
61,291]#mouth |
|
""" |
|
|
|
|
|
|
|
|
|
def landmarks_to_model_corsinates(face_landmarks,indices,w,h): |
|
cordinates = [] |
|
z_depth = w if w<h else h |
|
z_depth *=z_multiply |
|
for index in indices: |
|
xyz = get_pixel_xyz(face_landmarker_result.face_landmarks,index,w,h) |
|
|
|
cordinates.append([ |
|
xyz[0],xyz[1],xyz[2]*z_depth |
|
]) |
|
return cordinates |
|
|
|
if image == None: |
|
raise gr.Error("Need Image") |
|
cv2_image = pil_to_bgr_image(image) |
|
size = cv2_image.shape |
|
center: tuple[float, float] = (size[1] / 2, size[0] / 2) |
|
|
|
|
|
import math |
|
def calculate_distance(xy, xy2): |
|
return math.sqrt((xy2[0] - xy[0])**2 + (xy2[1] - xy[1])**2) |
|
|
|
mp_image,face_landmarker_result = extract_landmark(cv2_image,"face_landmarker.task",0,0,True) |
|
im = mp_image.numpy_view() |
|
h,w = im.shape[:2] |
|
|
|
first_landmarker_result = None |
|
def get_first_landmarker_result(): |
|
if first_landmarker_result: |
|
return first_landmarker_result |
|
else: |
|
return face_landmarker_result |
|
|
|
first_translation_vector = None |
|
if double_check_offset_center: |
|
root_cordinate = get_pixel_cordinate(face_landmarker_result.face_landmarks,center_index,w,h) |
|
diff_center_x = center[0] - root_cordinate[0] |
|
diff_center_y = center[1] - root_cordinate[1] |
|
base = np.zeros_like(cv2_image) |
|
copy_image(base,cv2_image,diff_center_x,diff_center_y) |
|
|
|
first_landmarker_result = face_landmarker_result |
|
mp_image,face_landmarker_result = extract_landmark(base,"face_landmarker.task",0,0,True) |
|
im = mp_image.numpy_view() |
|
transformation_matrix=first_landmarker_result.facial_transformation_matrixes[0] |
|
rotation_matrix, first_translation_vector = transformation_matrix[:3, :3],transformation_matrix[:3, 3] |
|
else: |
|
diff_center_x=0 |
|
diff_center_y=0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if draw_mediapipe_mesh: |
|
result = first_landmarker_result |
|
if result == None: |
|
result = face_landmarker_result |
|
image = mp_box.draw_landmarks_on_image(result,image) |
|
cv2_image = pil_to_bgr_image(image) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
z_angles,y_ratios,h_cordinates,_ = estimate_horizontal(get_first_landmarker_result().face_landmarks) |
|
|
|
if draw_hozizontal_line: |
|
for cordinates in h_cordinates: |
|
|
|
points = normalized_to_pixel(cordinates,w,h) |
|
|
|
plot_points(cv2_image,points[:2],False,5,(255,0,0),3) |
|
|
|
|
|
|
|
_,x_ratios,v_cordinates,_ = estimate_vertical(get_first_landmarker_result().face_landmarks) |
|
if draw_vertical_line: |
|
for cordinates in v_cordinates: |
|
plot_points(cv2_image,normalized_to_pixel(cordinates,w,h),False,5,(0,0,255),3,(255,0,0)) |
|
|
|
|
|
key_cordinates,angles = get_feature_angles_cordinate(get_first_landmarker_result().face_landmarks) |
|
for cordinates in key_cordinates: |
|
pass |
|
|
|
key_cordinates,angles = get_feature_ratios_cordinate(get_first_landmarker_result().face_landmarks) |
|
for cordinates in key_cordinates: |
|
pass |
|
|
|
|
|
|
|
z_angle_text = mean_std_label(z_angles,True) |
|
y_ratio_text = mean_std_label(y_ratios) |
|
x_ratio_text = mean_std_label(x_ratios) |
|
|
|
z_angle_detail = create_detail_labels(z_angles,True) |
|
y_ratio_detail = create_detail_labels(y_ratios) |
|
x_ratio_detail = f"forehead-chin = {np.mean(x_ratios)}" |
|
|
|
|
|
focal_length: float = calculate_distance(cordinates[0],cordinates[1]) |
|
focal_length = focal_length*1 |
|
|
|
camera_matrix: NDArray = np.array([ |
|
[focal_length, 0, center[0]], |
|
[0, -focal_length, center[1]], |
|
[0, 0, 1] |
|
], dtype="double") |
|
dist_coeffs: NDArray = np.zeros((4, 1)) |
|
|
|
|
|
|
|
image_points: NDArray = np.array(cordinates, dtype="double") |
|
|
|
from scipy.spatial.transform import Rotation as R |
|
def print_euler(rotation_vector,label=""): |
|
order = "yxz" |
|
rotation_matrix, _ = cv2.Rodrigues(rotation_vector) |
|
|
|
r = R.from_matrix(rotation_matrix) |
|
euler_angles = r.as_euler(order, degrees=True) |
|
label = f"{label} Euler Angles {order} (degrees): {euler_angles}" |
|
return label |
|
|
|
rotation_vector = None |
|
translation_vector = None |
|
im_with_pose = cv2_image |
|
mediapipe_text = None |
|
|
|
|
|
def face_landmarker_result_to_angle_label(face_landmarker_result,order="yxz"): |
|
if len(face_landmarker_result.facial_transformation_matrixes)>0: |
|
|
|
transformation_matrix=face_landmarker_result.facial_transformation_matrixes[0] |
|
|
|
rotation_matrix, translation_vector = transformation_matrix[:3, :3],transformation_matrix[:3, 3] |
|
|
|
vector_multiply=10 |
|
scaled_translation_vector =(translation_vector[0]*vector_multiply,translation_vector[1]*vector_multiply,translation_vector[2]*vector_multiply) |
|
|
|
|
|
|
|
|
|
r = R.from_matrix(rotation_matrix) |
|
euler_angles = r.as_euler(order, degrees=True) |
|
|
|
label = f"[{order[0]}:{euler_angles[0]:.2f},{order[1]}:{-euler_angles[1]:.2f},{order[2]}:{-euler_angles[2]:.2f}]" |
|
|
|
return label,rotation_matrix,scaled_translation_vector |
|
|
|
if first_landmarker_result != None: |
|
mediapipe_first_text,_,_ = face_landmarker_result_to_angle_label(first_landmarker_result,order) |
|
else: |
|
mediapipe_first_text = "" |
|
|
|
mediapipe_second_text,rotation_matrix,scaled_translation_vector = face_landmarker_result_to_angle_label(face_landmarker_result,order) |
|
|
|
rotation_vector, _ = cv2.Rodrigues(rotation_matrix) |
|
translation_vector = scaled_translation_vector |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if draw_mediapipe_angle: |
|
root_cordinate = get_pixel_xyz(get_first_landmarker_result().face_landmarks,4,w,h) |
|
|
|
r = R.from_matrix(rotation_matrix) |
|
euler_angles = r.as_euler("yxz", degrees=False) |
|
|
|
draw_cordinate1=rotate_point_euler((0,0,-100),[-euler_angles[1],euler_angles[0],euler_angles[2]],"yxz") |
|
draw_cordinate2=rotate_point_euler((0,0,-200),[-euler_angles[1],euler_angles[0],euler_angles[2]],"yxz") |
|
|
|
plot_points(im_with_pose,[root_cordinate[:2]+draw_cordinate1[:2],root_cordinate[:2]+draw_cordinate2[:2],root_cordinate[:2]],False,5,(0,128,0),3,(0,255,0)) |
|
|
|
|
|
landmarks = get_normalized_landmarks(get_first_landmarker_result().face_landmarks) |
|
face_ratio_infos = [] |
|
|
|
|
|
|
|
|
|
|
|
v_cordinates=[ |
|
["philtrum",landmarks[175],landmarks[13],np.mean((landmarks[164],landmarks[2]),axis=0).tolist()], |
|
["straight",landmarks[175],landmarks[94],landmarks[9]], |
|
["face",landmarks[175],landmarks[9],landmarks[127],landmarks[356]], |
|
["r-eyes",landmarks[33],landmarks[190],landmarks[414]], |
|
["r-contour",landmarks[127],landmarks[33],landmarks[190]], |
|
["l-eyes",landmarks[263],landmarks[414],landmarks[190]], |
|
["l-contour",landmarks[356],landmarks[263],landmarks[414]], |
|
["lips",landmarks[17],landmarks[13],np.mean((landmarks[37],landmarks[267]),axis=0).tolist()], |
|
["mouth-eye",landmarks[61],landmarks[291],landmarks[133],landmarks[362]], |
|
] |
|
|
|
for cordinates in v_cordinates: |
|
ratio=ratios_cordinates(cordinates[1:]) |
|
if draw_faceratio_line: |
|
plot_points(cv2_image,normalized_to_pixel(cordinates[1:],w,h),False,5,(0,255,255),3,(255,255,0)) |
|
label = f"{cordinates[0]}:{ratio:.2f}" |
|
face_ratio_infos.append(label) |
|
face_ratio_info=",".join(face_ratio_infos) |
|
return cv2.cvtColor(im_with_pose,cv2.COLOR_BGR2RGB),mediapipe_first_text,mediapipe_second_text,z_angle_text,y_ratio_text,x_ratio_text,z_angle_detail,y_ratio_detail,x_ratio_detail,face_ratio_info |
|
|
|
|
|
|
|
def find_nearest_weighted_euclidean_2d(target_angles_full, all_angles_full, weights): |
|
target_angles = target_angles_full[:5] |
|
all_angles = all_angles_full[:, :5] |
|
|
|
weighted_diff = (all_angles - target_angles) * weights |
|
distances = np.linalg.norm(weighted_diff, axis=1) |
|
nearest_index = np.argmin(distances) |
|
return nearest_index, all_angles_full[nearest_index] |
|
|
|
import math |
|
from mp_estimate import estimate_horizontal_points ,estimate_vertical_points,estimate_rotations_v2 |
|
|
|
import joblib |
|
stacking8_model = joblib.load(f"models/stacking8.joblib") |
|
cached_models = {} |
|
def find_angles(image,order): |
|
if image is None: |
|
raise gr.Error("need image") |
|
cv2_image = pil_to_bgr_image(image) |
|
size = cv2_image.shape |
|
mp_image,face_landmarker_result = extract_landmark(cv2_image,"face_landmarker.task",0,0,True) |
|
|
|
|
|
features_text = estimate_rotations_v2(face_landmarker_result) |
|
features_value_origin = [float(value) for value in features_text.split(",")] |
|
features_value = features_value_origin.copy() |
|
print("features x-angle",math.degrees(features_value[3])-90) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x_ratios = 11 |
|
|
|
|
|
features_values = [ |
|
[np.add(features_value[-x_ratios:],features_value[0:1])], |
|
[features_value[:-x_ratios]], |
|
[np.hstack([features_value[ 3:5],features_value[ 6:-x_ratios]])] |
|
|
|
] |
|
|
|
|
|
from scipy.spatial.transform import Rotation as R |
|
def flatten_for(lst): |
|
return [round(item, 3) for sublist in lst for item in sublist] |
|
def change_euler_order(orderd_array,from_order,to_order,degrees=True): |
|
r = R.from_euler(from_order,orderd_array,degrees=degrees) |
|
result = r.as_euler(to_order,degrees=degrees) |
|
return np.round(result,2).tolist() |
|
|
|
def load_joblib(path): |
|
if path in cached_models: |
|
return cached_models[path] |
|
else: |
|
model = joblib.load(path) |
|
cached_models[path] = model |
|
return model |
|
|
|
def estimate(model_path,scaler_path,features_values,multi=True): |
|
|
|
scalers = load_joblib("models/"+scaler_path) |
|
if not isinstance(scalers,list): |
|
scalers=(scalers,scalers,scalers) |
|
for i,scaler in enumerate(scalers): |
|
|
|
features_values[i] = scaler.transform(features_values[i].copy()) |
|
|
|
|
|
result_preds=[] |
|
models = load_joblib("models/"+model_path) |
|
|
|
if multi: |
|
for i,model in enumerate(models): |
|
y_pred = model.predict(features_values[i]) |
|
result_preds.append(y_pred.round(2)) |
|
result_preds=flatten_for(result_preds) |
|
yxz =[result_preds[1],result_preds[0],result_preds[2]] |
|
else: |
|
result_preds=models.predict(features_values[0]) |
|
result_preds=flatten_for(result_preds) |
|
|
|
|
|
|
|
|
|
|
|
return result_preds |
|
|
|
|
|
|
|
|
|
def estimate2(model_key,features_values): |
|
model_path=f"models/{model_key}.joblib" |
|
scaler_path=f"models/{model_key}_scaler.joblib" |
|
polynomial_path=f"models/{model_key}_polynomial_features.joblib" |
|
selectkbest_path=f"models/{model_key}_selectkbest.joblib" |
|
|
|
model = load_joblib(model_path) |
|
scaler = load_joblib(scaler_path) |
|
polynomial = load_joblib(polynomial_path) |
|
selectkbest = load_joblib(selectkbest_path) |
|
|
|
result_preds=[] |
|
for i in range(3): |
|
x = polynomial[i].transform(features_values[i].copy()) |
|
x = selectkbest[i].transform(x) |
|
x = scaler[i].transform(x) |
|
y_pred = model[i].predict(x) |
|
result_preds.append(y_pred.round(2)) |
|
return result_preds |
|
|
|
""" |
|
import onnxruntime as ort |
|
def estimate3(model_key,features_values): |
|
model_path=f"models/{model_key}.onnx" |
|
ort_session = ort.InferenceSession(model_path) |
|
|
|
#result_preds=[] |
|
#result_preds=models.predict(features_values[0]) |
|
#result_preds=flatten_for(result_preds) |
|
input_name = ort_session.get_inputs()[0].name |
|
input_data = features_values.astype(np.float32) |
|
result_preds = ort_session.run(None, {input_name: input_data}) |
|
#print((result_preds)) |
|
return result_preds[0] # yxz-orderd x,y,z |
|
""" |
|
|
|
|
|
|
|
|
|
features_value = features_value_origin.copy() |
|
features_values = [ |
|
[features_value],[features_value],[features_value] |
|
] |
|
|
|
short_result = estimate2('hyper-hgr-random15',features_values.copy()) |
|
|
|
|
|
|
|
|
|
|
|
middle_result = estimate2('hyper-hgr-random45',features_values.copy()) |
|
|
|
long_result = estimate2('hyper-hgr-random90',features_values.copy()) |
|
|
|
|
|
e1_key="lgbm-optimizer_15dart_random" |
|
short_result2a = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy()) |
|
e1_key="lgbm-optimizer_15_random" |
|
short_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy()) |
|
|
|
e1_key="lgbm-optimizer_45_random" |
|
|
|
middle_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy()) |
|
e1_key="lgbm-optimizer_90_random" |
|
long_result2 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy()) |
|
|
|
e1_key="etr_90" |
|
long_result3 = estimate(f'{e1_key}.joblib',f'{e1_key}_scaler.joblib',features_values.copy(),False) |
|
|
|
|
|
|
|
def average(values): |
|
flat_values=[] |
|
for value in values: |
|
flat_values += [flatten_for(value)] |
|
|
|
|
|
import average |
|
data={ |
|
"hgbr-15":flatten_for(short_result), |
|
"hgbr-45":flatten_for(middle_result), |
|
"hgbr-90":flatten_for(long_result), |
|
"lgbm-15dart":(short_result2a), |
|
"lgbm-15":(short_result2), |
|
"lgbm-45":(middle_result2), |
|
"lgbm-90":(long_result2), |
|
} |
|
|
|
|
|
stack_x = short_result2a+short_result2+middle_result2+long_result2+flatten_for(short_result)+flatten_for(middle_result)+flatten_for(long_result)+long_result3 |
|
|
|
|
|
average_data=stacking8_model.predict(np.array([stack_x])) |
|
|
|
|
|
|
|
|
|
def yxz_xyz_to_yxz(euler): |
|
return [euler[1],euler[0],euler[2]] |
|
|
|
average_data = change_euler_order(yxz_xyz_to_yxz(flatten_for(average_data)),"yxz",order) |
|
short_result = change_euler_order(yxz_xyz_to_yxz(flatten_for(short_result)),"yxz",order) |
|
middle_result = change_euler_order(yxz_xyz_to_yxz(flatten_for(middle_result)),"yxz",order) |
|
long_result = change_euler_order(yxz_xyz_to_yxz(flatten_for(long_result)),"yxz",order) |
|
short_result2a = change_euler_order(yxz_xyz_to_yxz(short_result2a),"yxz",order) |
|
short_result2 = change_euler_order(yxz_xyz_to_yxz(short_result2),"yxz",order) |
|
middle_result2 = change_euler_order(yxz_xyz_to_yxz(middle_result2),"yxz",order) |
|
long_result2 = change_euler_order(yxz_xyz_to_yxz(long_result2),"yxz",order) |
|
long_result3 = change_euler_order(yxz_xyz_to_yxz(long_result3),"yxz",order) |
|
|
|
|
|
|
|
|
|
|
|
return average_data,short_result,middle_result,long_result,(short_result2a),(short_result2),(middle_result2),(long_result2),long_result3 |
|
|
|
|
|
css=""" |
|
#col-left { |
|
margin: 0 auto; |
|
max-width: 640px; |
|
} |
|
#col-right { |
|
margin: 0 auto; |
|
max-width: 640px; |
|
} |
|
.grid-container { |
|
display: flex; |
|
align-items: center; |
|
justify-content: center; |
|
gap:10px |
|
} |
|
|
|
.image { |
|
width: 128px; |
|
height: 128px; |
|
object-fit: cover; |
|
} |
|
|
|
.text { |
|
font-size: 16px; |
|
} |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=css, elem_id="demo-container") as demo: |
|
with gr.Column(): |
|
gr.HTML(read_file("demo_header.html")) |
|
gr.HTML(read_file("demo_tools.html")) |
|
with gr.Row(): |
|
with gr.Column(): |
|
image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Image") |
|
|
|
with gr.Row(elem_id="prompt-container", equal_height=False): |
|
with gr.Row(): |
|
btn = gr.Button("Head-Pose Estimate", elem_id="run_button",variant="primary") |
|
order = gr.Dropdown(label="Order",value="xyz",choices=["xyz","xzy","yxz","yzx","zxy","zyx"],info="returened array order is same as label") |
|
|
|
|
|
|
|
with gr.Accordion(label="Advanced Settings", open=True): |
|
|
|
base_image = gr.Image(sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Image",visible=False) |
|
|
|
with gr.Row( equal_height=True): |
|
|
|
double_check = gr.Checkbox(label="Double Check",value=True,info="move center-index and detect again(usually more accurate).recommend choose 195") |
|
center_index = gr.Slider(info="center-index", |
|
label="Center-index", |
|
minimum=0, |
|
maximum=467, |
|
step=1, |
|
value=195) |
|
z_multiply = gr.Slider(info="nose height", |
|
label="Depth-Multiply", |
|
minimum=0.1, |
|
maximum=1.5, |
|
step=0.01, |
|
value=0.8) |
|
|
|
with gr.Row( equal_height=True): |
|
draw_mediapipe_mesh = gr.Checkbox(label="Draw mediapipe mesh",value=True) |
|
draw_mediapipe_angle = gr.Checkbox(label="Draw mediapipe angle(green)",value=True) |
|
with gr.Row( equal_height=True): |
|
draw_hozizontal_line = gr.Checkbox(label="Draw horizontal line(red)",value=True) |
|
draw_vertical_line = gr.Checkbox(label="Draw vertical line(blue)",value=True) |
|
draw_faceratio_line = gr.Checkbox(label="Draw Face-Ratio line(blue)",value=False) |
|
|
|
with gr.Column(): |
|
result_image = gr.Image(height=760,label="Result", elem_id="output-animation",image_mode='RGB') |
|
with gr.Row( equal_height=True): |
|
mediapipe_last_text = gr.Textbox(label=f"2nd or last mediapipe result",) |
|
mediapipe_first_text = gr.Textbox(label=f"first mediapipe result") |
|
|
|
with gr.Row( equal_height=True): |
|
z_angle_text = gr.Textbox(label="Z angle by horizontal-line",info="start with 0,exactly Z-Angle") |
|
y_ratio_text = gr.Textbox(label="Y Left-Right length ratio",info="start 0.49-0.51") |
|
x_ratio_text = gr.Textbox(label="X Up-down length ratio",info="start near 0.49,look at nose-hole-shape") |
|
with gr.Accordion(label="Angle Ratio Details", open=False): |
|
with gr.Row( equal_height=True): |
|
z_angle_detail_text = gr.TextArea(label="Z-angle detail") |
|
y_ratio_detail = gr.TextArea(label="Y-ratio detail") |
|
x_ratio_detail = gr.TextArea(label="X-ratio detail",value="") |
|
with gr.Row( equal_height=True): |
|
face_ratio_info = gr.Text(label="Face Ratio",info="Average philtrum:1.82(std 0.13),straight:0.82(std 0.04),face:0.91(std 0.02),r-eyes:0.86(std 0.03),r-contour:0.77(std 0.05),l-eyes:0.86(std 0.03),l-contour:0.75(std 0.05),lips:1.43(std 0.16),mouth-eye:1.21(std 0.07)") |
|
gr.HTML("<h5>For Rotation sometime differenct to mediapipe's result</h5>") |
|
with gr.Row( equal_height=True): |
|
bt_test = gr.Button("Estimate by Models") |
|
average_result = gr.Text(label="stacking(mix of 8 results with trained weight)") |
|
gr.HTML("<p>number is max training angle,usually stacking is works well.slow because of etr</p>") |
|
with gr.Row( equal_height=True): |
|
short_result = gr.Text(label="hgbr-15") |
|
middle_result = gr.Text(label="hgbr-45") |
|
long_result = gr.Text(label="hgbr-90") |
|
long_result3 = gr.Text(label="etr-90") |
|
with gr.Row( equal_height=True): |
|
short_result2a = gr.Text(label="lgbm-15dart") |
|
short_result2 = gr.Text(label="lgbm-15") |
|
middle_result2 = gr.Text(label="lgbm-45") |
|
long_result2 = gr.Text(label="lgbm-90") |
|
|
|
bt_test.click(fn=find_angles,inputs=[image,order],outputs=[average_result,short_result,middle_result,long_result,short_result2a,short_result2,middle_result2,long_result2,long_result3]) |
|
|
|
btn.click(fn=process_images, inputs=[image,base_image,order, |
|
double_check,center_index, |
|
draw_mediapipe_mesh,z_multiply,draw_mediapipe_angle,draw_hozizontal_line,draw_vertical_line,draw_faceratio_line, |
|
],outputs=[result_image,mediapipe_first_text,mediapipe_last_text,z_angle_text,y_ratio_text,x_ratio_text,z_angle_detail_text,y_ratio_detail,x_ratio_detail,face_ratio_info] ,api_name='infer') |
|
|
|
example_images = [ |
|
["examples/02316230.jpg"], |
|
["examples/00003245_00.jpg"], |
|
["examples/00827009.jpg"], |
|
["examples/00002062.jpg"], |
|
["examples/00824008.jpg"], |
|
["examples/00825000.jpg"], |
|
["examples/00826007.jpg"], |
|
["examples/00824006.jpg"], |
|
["examples/00828003.jpg"], |
|
["examples/00002200.jpg"], |
|
["examples/00005259.jpg"], |
|
["examples/00018022.jpg"], |
|
["examples/img-above.jpg"], |
|
["examples/00100265.jpg"], |
|
["examples/00039259.jpg"], |
|
|
|
] |
|
example1=gr.Examples( |
|
examples = example_images,label="Image", |
|
inputs=[image],examples_per_page=8 |
|
) |
|
gr.HTML(read_file("demo_footer.html")) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|