/serialize_features.py
Python | 111 lines | 80 code | 17 blank | 14 comment | 12 complexity | e191442f037e5b4a5cfbf87cfe244396 MD5 | raw file
- import sys
- import os
- import glob
- import dlib
- import pickle
- import argparse
- import sqlite3
- import shutil
- from scipy.misc import imread
- from face_models import Face, Embedding, mod_sqlite3
- from time import perf_counter
- def main(args):
- sqlite3.register_converter("face_pos_t", Face.convert)
- sqlite3.register_converter("emb_t", Embedding.convert)
- # Load all the models we need: a detector to find the faces, a shape predictor
- # to find face landmarks so we can precisely localize the face, and finally the
- # face recognition model.
- detector = dlib.cnn_face_detection_model_v1(args.detector)
- sp = dlib.shape_predictor(args.predictor)
- facerec = dlib.face_recognition_model_v1(args.recognizer)
- descriptors = []
- positions = []
- files_with_faces = []
- chips = []
- face_count = 0
- if not args.append:
- if os.path.isdir(args.all_faces_output):
- shutil.rmtree(args.all_faces_output)
- if os.path.exists(args.data_base):
- os.remove(args.data_base)
- os.makedirs(args.all_faces_output)
- if args.append:
- con = mod_sqlite3.connect(args.data_base, detect_types=sqlite3.PARSE_DECLTYPES)
- r, = zip(*con.execute('SELECT Count(*) FROM face'))
- r = r[0]
- face_count = r
- con.close()
- globs = glob.glob(os.path.join(args.input, "*.jpg"))
- if args.num_of_faces:
- globs = globs[:args.num_of_faces]
- files = sorted(globs)
- # Start the timing of the procedure
- start = perf_counter()
-
- # Now find all the faces and compute 128D face descriptors for each face.
- for f in files:
- img = imread(f, mode='RGB')
- # Ask the detector to find the bounding boxes of each face. The 1 in the
- # second argument indicates that we should upsample the image 1 time. This
- # will make everything bigger and allow us to detect more faces.
- mmdets = detector(img, 1)
- dets = dlib.rectangles()
- dets.extend([d.rect for d in mmdets])
- # print("Number of faces detected: {}".format(len(dets)))
- if 10 * face_count / len(files) in range(10,101,10):
- print(100 * face_count / len(files), "%% completed")
- # print(10 * face_count / len(files), "%% completed")
- # Now process each face we found.
- for k, d in enumerate(dets):
- # Get the landmarks/parts for the face in box d.
- shape = sp(img, d)
- # Compute the 128D vector that describes the face in img identified by
- # shape.
- file_path = os.path.join(args.all_faces_output, 'face_' + str(face_count))
- dlib.save_face_chip(img, shape, file_path)
- try:
- face_descriptor = facerec.compute_face_descriptor(img, shape)
- descriptors.append(Embedding(list(face_descriptor)))
- positions.append(Face(mmdets[k].rect)) # images.append((img, shape))
- files_with_faces.append(f)
- chips.append(file_path+'.jpg')
- face_count += 1
- except:
- print("Failed with image: ", f)
- print("Image Shape: ", img.shape)
- print('Total time:', perf_counter() - start)
- print(('Processed {} files in total').format(len(files)))
- con = mod_sqlite3.connect(args.data_base, detect_types=sqlite3.PARSE_DECLTYPES)
- if not args.append:
- con.execute('create table face(origin, chip, position face_pos_t, embedding emb_t, cluster_id, cluster_id2)')
-
- con.executemany('insert into face(origin, chip, position, embedding) values(?,?,?,?)', zip(files_with_faces, chips, positions, descriptors))
- con.commit()
- con.close()
- if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('-a', '--append', help='Whether to append new data or rewrite existing data', action="store_true")
- parser.add_argument('-n', '--num_of_faces', help='The amount of faces to process', type=int)
- parser.add_argument('-p', '--predictor', help='Path to the landmark predictor model', default='./dats/shape_predictor_5_face_landmarks.dat')
- parser.add_argument('-r', '--recognizer', help='Path to the face recognition model', default='./dats/dlib_face_recognition_resnet_model_v1.dat')
- parser.add_argument('-i', '--input', help='Input folder containing images', default='./fotis/meri')
- parser.add_argument('-b', '--data_base', help='The SQLite database file to store information', default='./face_db.db')
- parser.add_argument('-d', '--detector', help='Path to the face detector model', default='./dats/mmod_human_face_detector.dat')
- parser.add_argument('-o', '--all_faces_output', help='Path to new folder where to save all face chips', default='./found_faces')
- args = parser.parse_args()
- main(args)