|
|
|
import os |
|
import pandas as pd |
|
import cv2 |
|
import math |
|
import argparse |
|
|
|
dic = {"images": [], "gender": [], "age": []} |
|
|
|
|
|
def highlightFace(net, frame, conf_threshold=0.7): |
|
frameOpencvDnn = frame.copy() |
|
frameHeight = frameOpencvDnn.shape[0] |
|
frameWidth = frameOpencvDnn.shape[1] |
|
blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False) |
|
|
|
net.setInput(blob) |
|
detections = net.forward() |
|
faceBoxes = [] |
|
for i in range(detections.shape[2]): |
|
confidence = detections[0, 0, i, 2] |
|
if confidence > conf_threshold: |
|
x1 = int(detections[0, 0, i, 3] * frameWidth) |
|
y1 = int(detections[0, 0, i, 4] * frameHeight) |
|
x2 = int(detections[0, 0, i, 5] * frameWidth) |
|
y2 = int(detections[0, 0, i, 6] * frameHeight) |
|
faceBoxes.append([x1, y1, x2, y2]) |
|
cv2.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight / 150)), 8) |
|
return frameOpencvDnn, faceBoxes |
|
|
|
|
|
def process_image(image): |
|
|
|
|
|
|
|
|
|
|
|
faceProto = "opencv_face_detector.pbtxt" |
|
faceModel = "opencv_face_detector_uint8.pb" |
|
ageProto = "age_deploy.prototxt" |
|
ageModel = "age_net.caffemodel" |
|
genderProto = "gender_deploy.prototxt" |
|
genderModel = "gender_net.caffemodel" |
|
|
|
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746) |
|
ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)'] |
|
genderList = ['Male', 'Female'] |
|
|
|
faceNet = cv2.dnn.readNet(faceModel, faceProto) |
|
ageNet = cv2.dnn.readNet(ageModel, ageProto) |
|
genderNet = cv2.dnn.readNet(genderModel, genderProto) |
|
|
|
video = cv2.VideoCapture(image) |
|
padding = 20 |
|
while cv2.waitKey(1) < 0: |
|
try: |
|
hasFrame, frame = video.read() |
|
if not hasFrame: |
|
cv2.waitKey() |
|
break |
|
|
|
resultImg, faceBoxes = highlightFace(faceNet, frame) |
|
if not faceBoxes: |
|
print("No face detected") |
|
|
|
for faceBox in faceBoxes: |
|
face = frame[max(0, faceBox[1] - padding): |
|
min(faceBox[3] + padding, frame.shape[0] - 1), max(0, faceBox[0] - padding) |
|
:min(faceBox[2] + padding, |
|
frame.shape[1] - 1)] |
|
|
|
blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False) |
|
genderNet.setInput(blob) |
|
genderPreds = genderNet.forward() |
|
gender = genderList[genderPreds[0].argmax()] |
|
print(f'Gender: {gender}') |
|
|
|
ageNet.setInput(blob) |
|
agePreds = ageNet.forward() |
|
age = ageList[agePreds[0].argmax()] |
|
|
|
print(f'Age: {age[1:-1]} years') |
|
dic['images'].append(image) |
|
dic['gender'].append(gender) |
|
dic['age'].append(age[1:-1]) |
|
|
|
|
|
|
|
except Exception as e: |
|
continue |
|
|
|
|
|
|
|
import boto3 |
|
s3 = boto3.resource( |
|
service_name = 's3', |
|
region_name = 'ap-south-1', |
|
aws_access_key_id = 'AKIAYNE4X3VIWUPXM75R', |
|
aws_secret_access_key ='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' |
|
) |
|
def download_s3_folder(bucket, folder, local_dir='./images'): |
|
bucket = s3.Bucket(bucket) |
|
for obj in bucket.objects.filter(Prefix=folder): |
|
target = obj.key if local_dir is None \ |
|
else os.path.join(local_dir, os.path.relpath(obj.key, folder)) |
|
if not os.path.exists(os.path.dirname(target)): |
|
os.makedirs(os.path.dirname(target)) |
|
if obj.key[-1] == '/': |
|
continue |
|
bucket.download_file(obj.key, target) |
|
|
|
|
|
def predict_age_gender(): |
|
image = os.listdir('images') |
|
|
|
for img in image: |
|
img = './images/' + img |
|
process_image(img) |
|
print(dic) |
|
df = pd.DataFrame.from_dict(dic, orient='index').transpose() |
|
df.head() |
|
df.to_excel("./output/result_s3.xls") |
|
|
|
download_s3_folder('genderagedata','input_images') |
|
predict_age_gender() |
|
|
|
s3.Bucket('genderagedata').upload_file(Filename='./output/result_s3.xls', Key='output_images/result.xls') |
|
|