File size: 3,238 Bytes
38e56ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# This file is provided by DOF Studio on huggingface.co
# with model animeGender-dvgg-0.7.
# Link: https://huggingface.co/DOFOFFICIAL/animeGender-dvgg-0.7

import cv2
import torch
import torchvision
import numpy as np
from torchvision import transforms
from PIL import Image

from transformers import undefined

num_cls = 2
classes = ['female', 'male']

#############################
# model struct
def model_struct():
    model = torchvision.models.vgg16(pretrained=True) 

    last_dim = len(model.classifier) - 1
    num_fc = model.classifier[last_dim].in_features 
    
    model.classifier[last_dim] = torch.nn.Linear(num_fc, num_cls)

    model.classifier.append(torch.nn.Softmax())

    for param in model.parameters():
        param.requires_grad = False

    for param in model.classifier[last_dim].parameters():
        param.requires_grad = True
    for param in model.classifier[last_dim + 1].parameters():
        param.requires_grad = True

    return model


#############################
# graphic lib
def dim(imgpath):
    img = cv2.imread(imgpath, 1)
    height, width, channels = img.shape
    return height, width, channels

def crop(imgfrom, imgto, x = 0, w = 64, y = 0, h = 64):
    img = cv2.imread(imgfrom, 1)
    img2 = img[y:y+h, x:x+w]
    return cv2.imwrite(imgto, img2)

def resize(imgfrom, imgto, width, height):
    img = cv2.imread(imgfrom, 1)
    img2 = cv2.resize(img, (width, height))
    return cv2.imwrite(imgto, img2)

def rgb32to24(imgfrom, imgto):
    img = cv2.imread(imgfrom, 1)
    img2 = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
    return cv2.imwrite(imgto, img2)
    
def cmpgraph_64x64(imgfrom, imgto):
    height, width, channels = dim(imgfrom)
    img = cv2.imread(imgfrom, 1)
    img2 = []
    if height > width:
        hnew = int(np.round(64 / width * height))
        wnew = 64
        img2 = cv2.resize(img, (wnew, hnew))
        img2 = img2[0:64, 0:64]
    elif width > height:
        wnew = int(np.round(64 / height * width))
        hnew = 64
        img2 = cv2.resize(img, (wnew, hnew))
        img2 = img2[0:64, 0:64]
    else:
        img2 = cv2.resize(img, (64,64))
    img3 = cv2.cvtColor(img2, cv2.COLOR_BGRA2BGR)
    return cv2.imwrite(imgto, img3)
    

#############################
# model usage
def predict_class(img_path, model):
    img = Image.open(img_path)
    transform = transforms.Compose([transforms.ToTensor()])
    img = transform(img)
    img = torch.unsqueeze(img, dim=0)
    out = model(img)
    max = torch.max(out).item()
    pmax = torch.max(out, 1)[1].item()
    cls = classes[pmax]
    print('This is ' + cls + ' with confidence of ' + str(np.round(max, 3)))

def modelload(modelpath):
    model = model_struct()
    save = torch.load(modelpath)
    model.load_state_dict(save)
    return model

def predictmain(model, filepath):
    img = filepath
    predict_class(img, model)


if __name__ == '__main__':
    # transfomer usage
    model = undefined.from_pretrained("undefined")
    model.load_adapter("DOFOFFICIAL/animeGender-dvgg-0.7", source="hf")

    # local usage
    model = modelload("model_animeGender-dvgg-0.7.pth")
    
    # use your picture to interfere
    cmpgraph_64x64("path.png", "path(1).png")
    predictmain(model, "path(1).png")