diff --git a/.gitattributes b/.gitattributes index c7d9f3332a950355d5a77d85000f05e6f45435ea..152e3af6d9fead045d4a11ef7bfc92d9a28aab3d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +pretrained_models/angle_model.hdf5 filter=lfs diff=lfs merge=lfs -text +pretrained_models/length_model.hdf5 filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 0c642d6f1d146e6f661c2c730c2fb6faf69f5d23..f2631885e86ba3ee31bc0fe148fed72346ad618f 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,34 @@ ---- -title: Deep Blind Motion Deblurring -emoji: 🐠 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# Blind Motion Deblurring for Legible License Plates using Deep Learning + +This project uses deep learning techniques to estimate a length and angle parameter for the point-spread function responsible for motion-deblurring of an image. This estimation is achieved by training a deep CNN model on the fast-fourier transformation of the blurred images. By using enough random examples of motion blurred images, the model learns how to estimate any kind of motion blur (upto a certain blur degree), making this approach a truly blind motion deblurring example. Once a length and angle of motion blur is estimated by the model, one can easily deblur the image using Wiener Deconvolution. This technique can have many applications, but we used it specifically for deblurring and making license plates legible. As seen below, the images demonstrate our model in action. With the introduction of some artifacts, the model manages to deblur the images to a point where the license plates are legible. + + + + + + + +## Package Requirements:- +1. Python3 +2. Numpy +3. OpenCV 4 +4. Tensorflow 2 +5. H5py +6. Imutils +7. Progressbar +8. Scikit-Learn +## How to Run Code:- + +### Training the length and angle models:- + +1. Download the dataset of images from [here](https://cocodataset.org/#download). Download atleast 20000 images to train models optimally. (We used the COCO dataset to train our model. But any other dataset of general images will also suffice) +2. Use the create_blurred.py to generate the motion blurred dataset as ```python create_blurred.py -i -o [-m ```. The output directory to store images must exist. The script randomly blurs the images using a random blur length and angle. The range of blur length and angle can be changes on lines 38-39. The script also generates a json file to store the labels for blur length and angle. Note that for blur angle we consider all angle over 180 degrees to be cyclic and wrap around (example 240 is 240-180=60) as it doesn't affect the PSF and significantly reduces the number of classes. +3. Use the create_fft.py to generate the fast-fourier transform images of the blurred images to use for training. Run the script as ```python create_fft.py -i -o ```. The input directory is the folder where the blurred images are stored. The output directory must be created manually. +4. Use the build_dataset.py to generate the hdf5 dataset to train. We use this to overcome the bottleneck of working with a large number of images in memory. Run the script as ```python build_dataset.py -m -i -to -vo -l ```. We have resized our images to (224x224) to facilitate training. If you plan to use a different size change the lines 51 and 64. Before this script is run, make sure to delete any previously present .hdf5 files. +5. Use the angle_model_train.py script to train the model to estimate the angle parameter of the blur. Change the path to the train and val hdf5 files on lines 17 and 18 and run the script as ```python angle_model_train -o [-m [-e ```. +6. Similarly, length_model_train.py can be used to train the length model. +7. Remember to properly modify all variables in the train files + +### Testing the models to deblur images + +Run the deblur_img.py script as ```python deblur_img.py -i -a -l ```. The final deblurred image is saved as result.jpg on the same directory as the script. diff --git a/angle_model_train.py b/angle_model_train.py new file mode 100644 index 0000000000000000000000000000000000000000..00d699e0805126981aa52156fae2dbd83a26d99e --- /dev/null +++ b/angle_model_train.py @@ -0,0 +1,55 @@ +from sidekick.nn.conv.angle_model import MiniVgg +from sidekick.io.hdf5datagen import Hdf5DataGen +from sidekick.callbs.manualcheckpoint import ManualCheckpoint +from sidekick.callbs.trainmonitor import TrainMonitor +from sidekick.prepro.process import Process +from sidekick.prepro.imgtoarrayprepro import ImgtoArrPrePro +from tensorflow.keras.optimizers import SGD +from tensorflow.keras.models import load_model +import argparse + +ap= argparse.ArgumentParser() +ap.add_argument('-o','--output', type=str, required=True ,help="Path to output directory to store metrics") +ap.add_argument('-m', '--model', help='Path to checkpointed model') +ap.add_argument('-e','--epoch', type=int, default=0, help="Starting epoch of training") +args= vars(ap.parse_args()) + +hdf5_train_path= "train.hdf5" +hdf5_val_path= "val.hdf5" +epochs= 50 +lr= 1e-2 +batch_size= 32 +num_classes= 180 +fig_path= args['output']+"train_plot.jpg" +json_path= args['output']+"train_values.json" + +print('[NOTE]:- Building Dataset...\n') +pro= Process(224, 224) +i2a= ImgtoArrPrePro() + +train_gen= Hdf5DataGen(hdf5_train_path, batch_size, num_classes, preprocessors=[pro, i2a]) +val_gen= Hdf5DataGen(hdf5_val_path, batch_size, num_classes, preprocessors=[pro, i2a]) + + +if args['model'] is None: + print("[NOTE]:- Building model from scratch...") + model= MiniVgg.build(224, 224, 1, num_classes) + opt= SGD(learning_rate=lr, momentum=0.9, nesterov=True) + model.compile(loss="categorical_crossentropy", metrics=['accuracy'], optimizer=opt) +else: + print("[NOTE]:- Building model {}\n".format(args['model'])) + model= load_model(args['model']) + +callbacks= [ManualCheckpoint(args['output'], save_at=1, start_from=args['epoch']), + TrainMonitor(figPath= fig_path, jsonPath= json_path, startAt=args['epoch'])] + +print("[NOTE]:- Training model...\n") + +model.fit_generator(train_gen.generator(), + steps_per_epoch=train_gen.data_length//batch_size, + validation_data= val_gen.generator(), + validation_steps= val_gen.data_length//batch_size, + epochs=epochs, + max_queue_size=10, + callbacks= callbacks, + initial_epoch=args['epoch']) diff --git a/build_dataset.py b/build_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..370187ee619d2c38e131b76d2c23729a0d10e9ef --- /dev/null +++ b/build_dataset.py @@ -0,0 +1,73 @@ +import numpy as np +from sklearn.preprocessing import LabelEncoder, LabelBinarizer +from sklearn.model_selection import train_test_split +from sidekick.io.hdf5_writer import Hdf5Writer +from imutils import paths +import cv2 +import os +import progressbar +import json +import argparse + +ap= argparse.ArgumentParser() +ap.add_argument('--model_training', '-m', required=True, help='Flag to determine which model is trained. Choose from "angle" and "length".') +ap.add_argument('--input_dir', '-i', required=True, help='Path to input dir for images') +ap.add_argument('--train_output_file', '-to', required=True, help='Path to train output file. Must not exist by default.') +ap.add_argument('--val_output_file', '-vo', required=True, help='Path to val output file. Must not exist by default.') +ap.add_argument('--label_file', '-l', required=True, help='Path to input training labels.') + +args= vars(ap.parse_args()) + +model_flag= args['model_training'] +data_path= args['input_dir'] +hdf5_train= args['train_output_file'] +hdf5_test= args['val_output_file'] +label_file= args['label_file'] + +class_to_use= [] +f= open(label_file, 'r') +label_dict= json.loads(f.read()) + + +train_paths= list(paths.list_images(data_path)) +train_labels= [label_dict[t.split(os.path.sep)[-1]] for t in train_paths] + +if model_flag=='angle': + le= LabelEncoder() + train_labels= le.fit_transform(train_labels) + print(le.classes_) + print("Number of classes are: {}".format(len(le.classes_))) + +train_paths, test_paths, train_labels, test_labels= train_test_split(train_paths,train_labels, + test_size=0.2) + +print(train_paths[10], train_labels[10], test_paths[10], test_labels[10]) + +files= [('train', train_paths, train_labels, hdf5_train), + ('val', test_paths, test_labels, hdf5_test)] + +for optype, paths, labels, output_path in files: + + dat_writer= Hdf5Writer((len(paths), 224, 224), output_path) + + # Initializing the progress bar display + display=["Building Dataset: ", progressbar.Percentage(), " ", + progressbar.Bar(), " ", progressbar.ETA()] + + # Start the progress bar + progress= progressbar.ProgressBar(maxval=len(paths), widgets=display).start() + + # Iterate through each img path + for (i, (p, l)) in enumerate(zip(paths,labels)): + img= cv2.imread(p) + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img = cv2.resize(img, (224, 224)) + img= img.astype('float') / 255.0 + + + dat_writer.add([img], [l]) + progress.update(i) + + # Finish the progress for one type + progress.finish() + dat_writer.close() \ No newline at end of file diff --git a/create_blurred.py b/create_blurred.py new file mode 100644 index 0000000000000000000000000000000000000000..f1eaf459990d1539b42e8055052527cc511bdd34 --- /dev/null +++ b/create_blurred.py @@ -0,0 +1,59 @@ +import numpy as np +import os +import cv2 +import random +import json +import argparse + +ap= argparse.ArgumentParser() +ap.add_argument('--input_dir', '-i', required=True, help='Path to input dir for images') +ap.add_argument('--output_dir', '-o', required=True, help='Path to output dir to store files. Must be created') +ap.add_argument('--max_imgs', '-m', default=20000, type=int, help='Max number of images to generate') + +args= vars(ap.parse_args()) + +def apply_motion_blur(image, size, angle): + k = np.zeros((size, size), dtype=np.float32) + k[ (size-1)// 2 , :] = np.ones(size, dtype=np.float32) + k = cv2.warpAffine(k, cv2.getRotationMatrix2D( (size / 2 -0.5 , size / 2 -0.5 ) , angle, 1.0), (size, size) ) + k = k * ( 1.0 / np.sum(k) ) + return cv2.filter2D(image, -1, k) + + +folder = args['input_dir'] +folder_save = args['output_dir'] +max_images = args['max_imgs'] + +print(max_images) + + +labels_angle = {} +labels_length= {} +images_done = 0 +for filename in os.listdir(folder): + img = cv2.imread(os.path.join(folder,filename)) + if img is not None and img.shape[1] > img.shape[0]: + img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img_resized = cv2.resize(img_gray, (640,480), interpolation = cv2.INTER_AREA) + length = random.randint(20,40) + angle = random.randint(0,359) + blurred = apply_motion_blur(img_resized, length, angle) + cv2.imwrite(os.path.join(folder_save,filename), blurred) + if angle>=180: + angle_a= angle - 180 + else: + angle_a= angle + labels_angle[filename] = angle_a + labels_length[filename]= length + images_done += 1 + print("%s done"%images_done) + if(images_done == max_images): + print('Done!!!') + break + +with open('angle_labels.json', 'w') as file: + json.dump(labels_angle, file) +with open('length_labels.json', 'w') as file: + json.dump(labels_length, file) + + diff --git a/create_fft.py b/create_fft.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb82425cabf442d8858b94eb392ac80e1b73a03 --- /dev/null +++ b/create_fft.py @@ -0,0 +1,31 @@ +import numpy as np +import os +import cv2 +import argparse + +ap = argparse.ArgumentParser() +ap.add_argument('--input_dir', '-i', required=True, help='Path to input dir for images') +ap.add_argument('--output_dir', '-o', required=True, help='Path to output dir to store files. Must be created') + +args= vars(ap.parse_args()) + + +folder = args['input_dir'] +folder_save = args['output_dir'] + + +labels = {} +images_done = 0 +for filename in os.listdir(folder): + img = cv2.imread(os.path.join(folder,filename)) + if img is not None: + img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img_gray = np.float32(img_gray) / 255.0 + f = np.fft.fft2(img_gray) + fshift = np.fft.fftshift(f) + mag_spec = 20 * np.log(np.abs(fshift)) + mag_spec = np.asarray(mag_spec, dtype=np.uint8) + cv2.imwrite(os.path.join(folder_save,filename), mag_spec) + images_done += 1 + print("%s done"%images_done) + diff --git a/deblur_img.py b/deblur_img.py new file mode 100644 index 0000000000000000000000000000000000000000..ef495f7c57314f81092f2070d31dacae17e03d29 --- /dev/null +++ b/deblur_img.py @@ -0,0 +1,85 @@ +import cv2 +import numpy as np +from tensorflow.keras.models import load_model +from tensorflow.keras.preprocessing.image import img_to_array +import argparse + +ap= argparse.ArgumentParser() +ap.add_argument('--image', '-i', required=True, help='Path to input blurred image') +ap.add_argument('--angle_model', '-a', required=True, help='Path to trained angle model') +ap.add_argument('--length_model', '-l', required=True, help='Path to trained length model') +args= vars(ap.parse_args()) + +def process(ip_image, length, deblur_angle): + noise = 0.01 + size = 200 + length= int(length) + angle = (deblur_angle*np.pi) /180 + + psf = np.ones((1, length), np.float32) #base image for psf + costerm, sinterm = np.cos(angle), np.sin(angle) + Ang = np.float32([[-costerm, sinterm, 0], [sinterm, costerm, 0]]) + size2 = size // 2 + Ang[:,2] = (size2, size2) - np.dot(Ang[:,:2], ((length-1)*0.5, 0)) + psf = cv2.warpAffine(psf, Ang, (size, size), flags=cv2.INTER_CUBIC) #Warp affine to get the desired psf +# cv2.imshow("PSF",psf) +# cv2.waitKey(0) +# cv2.destroyAllWindows() + + gray = ip_image + gray = np.float32(gray) / 255.0 + gray_dft = cv2.dft(gray, flags=cv2.DFT_COMPLEX_OUTPUT) #DFT of the image + psf /= psf.sum() #Dividing by the sum + psf_mat = np.zeros_like(gray) + psf_mat[:size, :size] = psf + psf_dft = cv2.dft(psf_mat, flags=cv2.DFT_COMPLEX_OUTPUT) #DFT of the psf + PSFsq = (psf_dft**2).sum(-1) + imgPSF = psf_dft / (PSFsq + noise)[...,np.newaxis] #H in the equation for wiener deconvolution + gray_op = cv2.mulSpectrums(gray_dft, imgPSF, 0) + gray_res = cv2.idft(gray_op,flags = cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT) #Inverse DFT + gray_res = np.roll(gray_res, -size//2,0) + gray_res = np.roll(gray_res, -size//2,1) + + return gray_res + + +# Function to visualize the Fast Fourier Transform of the blurred images. +def create_fft(img): + img = np.float32(img) / 255.0 + f = np.fft.fft2(img) + fshift = np.fft.fftshift(f) + mag_spec = 20 * np.log(np.abs(fshift)) + mag_spec = np.asarray(mag_spec, dtype=np.uint8) + + return mag_spec + +# Change this variable with the name of the trained models. +angle_model_name= args['angle_model'] +length_model_name= args['length_model'] +model1= load_model(angle_model_name) +model2= load_model(length_model_name) + +# read blurred image +ip_image = cv2.imread(args['image']) +ip_image= cv2.cvtColor(ip_image, cv2.COLOR_BGR2GRAY) +ip_image= cv2.resize(ip_image, (640, 480)) +# FFT visualization of the blurred image +fft_img= create_fft(ip_image) + +# Predicting the psf parameters of length and angle. +img= cv2.resize(create_fft(ip_image), (224,224)) +img= np.expand_dims(img_to_array(img), axis=0)/ 255.0 +preds= model1.predict(img) +# angle_value= np.sum(np.multiply(np.arange(0, 180), preds[0])) +angle_value = np.mean(np.argsort(preds[0])[-3:]) + +print("Predicted Blur Angle: ", angle_value) +length_value= model2.predict(img)[0][0] +print("Predicted Blur Length: ",length_value) + +op_image = process(ip_image, length_value, angle_value) +op_image = (op_image*255).astype(np.uint8) +op_image = (255/(np.max(op_image)-np.min(op_image))) * (op_image-np.min(op_image)) + +cv2.imwrite("result.jpg", op_image) + diff --git a/length_model_train.py b/length_model_train.py new file mode 100644 index 0000000000000000000000000000000000000000..bfff43a11fbcb7dd139488564f4dabab05769207 --- /dev/null +++ b/length_model_train.py @@ -0,0 +1,52 @@ +from sidekick.nn.conv.length_model import MiniVgg +from sidekick.io.hdf5datagen import Hdf5DataGen +from sidekick.callbs.manualcheckpoint import ManualCheckpoint +from tensorflow.keras.models import load_model +from sidekick.prepro.process import Process +from sidekick.prepro.imgtoarrayprepro import ImgtoArrPrePro +from tensorflow.keras.optimizers import SGD +import argparse + +ap= argparse.ArgumentParser() +ap.add_argument('-o','--output', type=str, required=True ,help="Path to output directory") +ap.add_argument('-m', '--model', help='Path to checkpointed model') +ap.add_argument('-e','--epoch', type=int, default=0, help="Starting epoch of training") +args= vars(ap.parse_args()) + +hdf5_train_path= "train.hdf5" +hdf5_val_path= "val.hdf5" +epochs= 50 +lr= 1e-2 +batch_size= 32 +num_classes= 1 +fig_path= args['output']+"train_plot.jpg" +json_path= args['output']+"train_values.json" + +print('[NOTE]:- Building Dataset...\n') +pro= Process(224, 224) +i2a= ImgtoArrPrePro() + +train_gen= Hdf5DataGen(hdf5_train_path, batch_size, num_classes, encode=False, preprocessors=[pro, i2a]) +val_gen= Hdf5DataGen(hdf5_val_path, batch_size, num_classes, encode=False, preprocessors=[pro, i2a]) + +if args['model'] is None: + print("[NOTE]:- Building model from scratch...") + model= MiniVgg.build(224, 224, 1, num_classes) + opt= SGD(learning_rate=lr, momentum=0.9, nesterov=True) + model.compile(loss="mean_absolute_percentage_error", optimizer=opt) +else: + print("[NOTE]:- Building model {}\n".format(args['model'])) + model= load_model(args['model']) + +callbacks= [ManualCheckpoint(args['output'], save_at=1, start_from=args['epoch'])] + +print("[NOTE]:- Training model...\n") + +model.fit_generator(train_gen.generator(), + steps_per_epoch=train_gen.data_length//batch_size, + validation_data= val_gen.generator(), + validation_steps= val_gen.data_length//batch_size, + epochs=epochs, + max_queue_size=10, + callbacks=callbacks, + initial_epoch=args['epoch']) diff --git a/pretrained_models/angle_model.hdf5 b/pretrained_models/angle_model.hdf5 new file mode 100644 index 0000000000000000000000000000000000000000..0971da570547aadb7c939af8f6e19d974d8b1160 --- /dev/null +++ b/pretrained_models/angle_model.hdf5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fbe443169ad14a6799704313d16f9066ad8643be0b90806df89311d916c0746 +size 49559272 diff --git a/pretrained_models/length_model.hdf5 b/pretrained_models/length_model.hdf5 new file mode 100644 index 0000000000000000000000000000000000000000..97e2a5bb8de2f579af6eff87e2b6b5fb0080e797 --- /dev/null +++ b/pretrained_models/length_model.hdf5 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d6fd68204c720d0ed2643d8ef8f6dc68c677307c9c1a877edef427c5f466ac7 +size 48092168 diff --git a/readme_imgs/img1.jpg b/readme_imgs/img1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acb5cfa266f8311b3c50f1c871f83a3df4aef0d2 Binary files /dev/null and b/readme_imgs/img1.jpg differ diff --git a/readme_imgs/img1_result.jpg b/readme_imgs/img1_result.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce398a5b225840b43d3fde62c58c33a7ed482203 Binary files /dev/null and b/readme_imgs/img1_result.jpg differ diff --git a/readme_imgs/img2.jpg b/readme_imgs/img2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cfee774667f8f66fb0e99e753cc9e6e4721abe4 Binary files /dev/null and b/readme_imgs/img2.jpg differ diff --git a/readme_imgs/img2_result.jpg b/readme_imgs/img2_result.jpg new file mode 100644 index 0000000000000000000000000000000000000000..716d31921fd1c4c69000084abb1f1871ea542b20 Binary files /dev/null and b/readme_imgs/img2_result.jpg differ diff --git a/readme_imgs/img3.jpg b/readme_imgs/img3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b8333199dc320fdb4c283e91333a140b625722d Binary files /dev/null and b/readme_imgs/img3.jpg differ diff --git a/readme_imgs/img3_result.jpg b/readme_imgs/img3_result.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f599477299463b006390dd28e8f77d465f240d1 Binary files /dev/null and b/readme_imgs/img3_result.jpg differ diff --git a/sidekick/__init__.py b/sidekick/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/sidekick/__pycache__/__init__.cpython-36.pyc b/sidekick/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..230db984b97e9b6d6afd28a0490e354c01d53aad Binary files /dev/null and b/sidekick/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/__pycache__/__init__.cpython-37.pyc b/sidekick/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6f8a41d927a897a472be1b9e1cb0f47ea2a50fb Binary files /dev/null and b/sidekick/__pycache__/__init__.cpython-37.pyc differ diff --git a/sidekick/callbs/__init__.py b/sidekick/callbs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47004369ea2452c1349caeb62986f257f7cb979e --- /dev/null +++ b/sidekick/callbs/__init__.py @@ -0,0 +1,2 @@ +from .trainmonitor import TrainMonitor +from .manualcheckpoint import ManualCheckpoint \ No newline at end of file diff --git a/sidekick/callbs/__pycache__/__init__.cpython-36.pyc b/sidekick/callbs/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2cc8f0e54f82814bbf55b7ed752b48ed9603194 Binary files /dev/null and b/sidekick/callbs/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/callbs/__pycache__/__init__.cpython-37.pyc b/sidekick/callbs/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72b9d9eff3fe0541dca1eca24d0f5df3fc2a12fc Binary files /dev/null and b/sidekick/callbs/__pycache__/__init__.cpython-37.pyc differ diff --git a/sidekick/callbs/__pycache__/manualcheckpoint.cpython-36.pyc b/sidekick/callbs/__pycache__/manualcheckpoint.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22a6caa57874e704df721e2ff50ca925bd768be7 Binary files /dev/null and b/sidekick/callbs/__pycache__/manualcheckpoint.cpython-36.pyc differ diff --git a/sidekick/callbs/__pycache__/manualcheckpoint.cpython-37.pyc b/sidekick/callbs/__pycache__/manualcheckpoint.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd8732e6e8431e9cbac7d4c5cffaf9b5293a8bda Binary files /dev/null and b/sidekick/callbs/__pycache__/manualcheckpoint.cpython-37.pyc differ diff --git a/sidekick/callbs/__pycache__/trainmonitor.cpython-36.pyc b/sidekick/callbs/__pycache__/trainmonitor.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdffa8221c747700c365d086fdd883e71d8572c5 Binary files /dev/null and b/sidekick/callbs/__pycache__/trainmonitor.cpython-36.pyc differ diff --git a/sidekick/callbs/__pycache__/trainmonitor.cpython-37.pyc b/sidekick/callbs/__pycache__/trainmonitor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a35c04fb32c47dced8e29365d602b73152510fc Binary files /dev/null and b/sidekick/callbs/__pycache__/trainmonitor.cpython-37.pyc differ diff --git a/sidekick/callbs/manualcheckpoint.py b/sidekick/callbs/manualcheckpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc869104795abc5eda111f3af46311f0b05ecbb --- /dev/null +++ b/sidekick/callbs/manualcheckpoint.py @@ -0,0 +1,19 @@ +from tensorflow.keras.callbacks import Callback +import os + +class ManualCheckpoint(Callback): + def __init__(self, output, save_at=3, start_from=0): + super(Callback, self).__init__() + + self.output= output + self.save_at= save_at + self.initial_epoch= start_from + + def on_epoch_end(self, epoch, logs={}): + if (self.initial_epoch+1) % self.save_at==0: + save_path= os.path.sep.join([self.output, + "weights-epoch {}.hdf5".format(self.initial_epoch+1)]) + + self.model.save(save_path, overwrite=True) + + self.initial_epoch+=1 diff --git a/sidekick/callbs/trainmonitor.py b/sidekick/callbs/trainmonitor.py new file mode 100644 index 0000000000000000000000000000000000000000..9132ebf4f93495aebbd10e3da4a8662ce1dbca5d --- /dev/null +++ b/sidekick/callbs/trainmonitor.py @@ -0,0 +1,52 @@ +from tensorflow.keras import callbacks +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import numpy as np +import json +import os + +class TrainMonitor(callbacks.BaseLogger): + def __init__(self, figPath, jsonPath=None, startAt=0): + super(TrainMonitor, self).__init__() + + self.figPath= figPath + self.jsonPath= jsonPath + self.startAt= startAt + + def on_train_begin(self, logs={}): + self.H={} + + if self.jsonPath is not None: + if os.path.exists(self.jsonPath): + self.H = json.loads(open(self.jsonPath).read()) + + if self.startAt > 0: + for k in self.H.keys(): + self.H[k] = self.H[k][:self.startAt] + + def on_epoch_end(self, epoch, logs={}): + for keys, values in logs.items(): + l= self.H.get(keys, []) + l.append(float(values)) + self.H[keys] = l + + if self.jsonPath is not None: + with open(self.jsonPath, 'w') as f: + f.write(json.dumps(self.H)) + f.close() + + if len(self.H["loss"]) > 1: + N = np.arange(0, len(self.H["loss"]), 1) + plt.style.use("ggplot") + plt.figure() + plt.plot(N, self.H["loss"], label="train_loss") + plt.plot(N, self.H["val_loss"], label="val_loss") + plt.plot(N, self.H["accuracy"], label="train_acc") + plt.plot(N, self.H["val_accuracy"], label="val_acc") + plt.title("Training Loss and Accuracy [Epoch {}]".format(len(self.H["loss"]))) + plt.xlabel("Epoch #") + plt.ylabel("Loss/Accuracy") + plt.legend() + plt.savefig(self.figPath) + plt.close() \ No newline at end of file diff --git a/sidekick/datah/__init__.py b/sidekick/datah/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f6d584392958cb43c77c94468c1d5feb053fe60a --- /dev/null +++ b/sidekick/datah/__init__.py @@ -0,0 +1 @@ +from .loader import Loader diff --git a/sidekick/datah/__pycache__/__init__.cpython-36.pyc b/sidekick/datah/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90183a22b982e080c5f97cad45cd06e8ccf395c4 Binary files /dev/null and b/sidekick/datah/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/datah/__pycache__/__init__.cpython-37.pyc b/sidekick/datah/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49538a3019b2702e6daabf7502c115f37533b5c0 Binary files /dev/null and b/sidekick/datah/__pycache__/__init__.cpython-37.pyc differ diff --git a/sidekick/datah/__pycache__/loader.cpython-36.pyc b/sidekick/datah/__pycache__/loader.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad2b3a984dfc8d9b830a2fabd787faf77aa5ef3e Binary files /dev/null and b/sidekick/datah/__pycache__/loader.cpython-36.pyc differ diff --git a/sidekick/datah/__pycache__/loader.cpython-37.pyc b/sidekick/datah/__pycache__/loader.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e7b28edfcaef17ddc859064b2095f4c2974ebfd Binary files /dev/null and b/sidekick/datah/__pycache__/loader.cpython-37.pyc differ diff --git a/sidekick/datah/loader.py b/sidekick/datah/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..3df19304f4d5c8c9fc83f7d19d722f7aa784e530 --- /dev/null +++ b/sidekick/datah/loader.py @@ -0,0 +1,30 @@ +import cv2 +import numpy as np +import os + +class Loader: + def __init__(self,preprocessors=None): + self.preprocessors=preprocessors + if self.preprocessors is None: + self.preprocessors=[] + + def load(self,imgpaths,verbose=-1): + data=[] + labels=[] + + for (i,imgpath) in enumerate(imgpaths): + image=cv2.imread(imgpath) + label= imgpath.split(os.path.sep)[-2] + + if self.preprocessors is not None: + for p in self.preprocessors: + image=p.preprocess(image) + + data.append(image) + labels.append(label) + + if verbose > 0 and i >0 and (i+1)% verbose==0: + print("processed:{}/{}".format(i+1,len(imgpaths))) + + print("Done!") + return (np.array(data),np.array(labels)) diff --git a/sidekick/eval/__init__.py b/sidekick/eval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5737db4f171e0b21ee733cb0d420e0147f3f2190 --- /dev/null +++ b/sidekick/eval/__init__.py @@ -0,0 +1 @@ +from .calc_ranked import calculate_ranked \ No newline at end of file diff --git a/sidekick/eval/__pycache__/__init__.cpython-36.pyc b/sidekick/eval/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4f585683879d33f376f8adb4ae5da735745d78 Binary files /dev/null and b/sidekick/eval/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/eval/__pycache__/calc_ranked.cpython-36.pyc b/sidekick/eval/__pycache__/calc_ranked.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aee9297c5d14522c363752f2b621873caedb40ea Binary files /dev/null and b/sidekick/eval/__pycache__/calc_ranked.cpython-36.pyc differ diff --git a/sidekick/eval/calc_ranked.py b/sidekick/eval/calc_ranked.py new file mode 100644 index 0000000000000000000000000000000000000000..3eec2d8f40d6191aa03adba9252b044b68ba279f --- /dev/null +++ b/sidekick/eval/calc_ranked.py @@ -0,0 +1,23 @@ +import numpy as np + +def calculate_ranked(preds, labels): + rank1=0 + rank5=0 + + for p,l in zip(preds, labels): + #sort preds in descending order of their confidence and return the indices of these + p= np.argsort(p)[::-1] + + # checking for rank5 + if l in p[:5]: + rank5+=1 + # checking rank1 + if l==p[0]: + rank1+=1 + + + # Final accuracies + rank1= rank1/len(labels) + rank5= rank5/len(labels) + + return rank1,rank5 \ No newline at end of file diff --git a/sidekick/io/__init__.py b/sidekick/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f5abcc8ba83d045cb5bc392cc0e3a5992b6f9b63 --- /dev/null +++ b/sidekick/io/__init__.py @@ -0,0 +1 @@ +from .hdf5_writer import Hdf5Writer \ No newline at end of file diff --git a/sidekick/io/__pycache__/__init__.cpython-36.pyc b/sidekick/io/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..381933676cfa0af208ca4f27e1f1522a74168ebf Binary files /dev/null and b/sidekick/io/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/io/__pycache__/__init__.cpython-37.pyc b/sidekick/io/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6f8cdf9c5b7408533c267c20668893067b1e67c Binary files /dev/null and b/sidekick/io/__pycache__/__init__.cpython-37.pyc differ diff --git a/sidekick/io/__pycache__/hdf5_writer.cpython-36.pyc b/sidekick/io/__pycache__/hdf5_writer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddbaf33c56de6778168ab5eb52b9767fef416dc2 Binary files /dev/null and b/sidekick/io/__pycache__/hdf5_writer.cpython-36.pyc differ diff --git a/sidekick/io/__pycache__/hdf5_writer.cpython-37.pyc b/sidekick/io/__pycache__/hdf5_writer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a039ca22c5c1f2fe816f555e58643a2e081b855b Binary files /dev/null and b/sidekick/io/__pycache__/hdf5_writer.cpython-37.pyc differ diff --git a/sidekick/io/__pycache__/hdf5datagen.cpython-36.pyc b/sidekick/io/__pycache__/hdf5datagen.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dac3afc7b5dc0c67d848a70511e28eb45fc1155 Binary files /dev/null and b/sidekick/io/__pycache__/hdf5datagen.cpython-36.pyc differ diff --git a/sidekick/io/__pycache__/hdf5datagen.cpython-37.pyc b/sidekick/io/__pycache__/hdf5datagen.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f29b9e2d4c9df5b309b77334242bb3964b229b76 Binary files /dev/null and b/sidekick/io/__pycache__/hdf5datagen.cpython-37.pyc differ diff --git a/sidekick/io/hdf5_writer.py b/sidekick/io/hdf5_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..bf2277d0336d647b5457c03cd647707da6bf1e09 --- /dev/null +++ b/sidekick/io/hdf5_writer.py @@ -0,0 +1,53 @@ +import h5py +import os + +class Hdf5Writer: + def __init__(self, dims, outputPath, dbName='Images', buffSize= 1000): + + # throw an error if the file already exists + if os.path.exists(outputPath): + raise ValueError("PATH ALREADY PRESENT. PLEASE DELETE FILES" + "BEFORE PROCEEDING.") + + # database to store data + self.db= h5py.File(outputPath, 'w') + # define dataset containers to store data and labels + self.data= self.db.create_dataset(dbName, dims, dtype='float') + self.labels= self.db.create_dataset('Labels', shape=(dims[0],), dtype='int') + + # defining a buffer and index variable for the buffer + self.buffSize= buffSize + self.buffer= {"data": [], "labels": []} + self.idx= 0 + + def add(self, values, labels): + self.buffer['data'].extend(values) + self.buffer['labels'].extend(labels) + + if len(self.buffer['data'])>=self.buffSize: + self.flush() + + def flush(self): + # When buffer size is reached flush data to dataset container + temp_idx= self.idx + len(self.buffer['data']) + # index from prev_idx to new_idx + self.data[self.idx:temp_idx]= self.buffer['data'] + self.labels[self.idx:temp_idx]= self.buffer['labels'] + # update new_idx + self.idx=temp_idx + # reinitialize the buffer + self.buffer={'data': [], 'labels': []} + + def flushClassNames(self, classNames): + + # Creating a special + labelNames= self.db.create_dataset('Label_Names', (len(classNames),), + dtype=h5py.special_dtype(vlen=unicode)) + labelNames[:]= classNames + + def close(self): + + if len(self.buffer['data'])>0: + self.flush() + + self.db.close() \ No newline at end of file diff --git a/sidekick/io/hdf5datagen.py b/sidekick/io/hdf5datagen.py new file mode 100644 index 0000000000000000000000000000000000000000..a68ecdfe40e76319c3dc7dcf534bddcfc11b80ed --- /dev/null +++ b/sidekick/io/hdf5datagen.py @@ -0,0 +1,50 @@ +from tensorflow.keras.utils import to_categorical +import h5py +import numpy as np + +class Hdf5DataGen: + def __init__(self, dbPath, batchSize, classes, encode=True, aug=None, preprocessors=None): + + self.db= h5py.File(dbPath, 'r') + self.batchSize= batchSize + self.num_classes= classes + self.encode= encode + self.aug= aug + self.preprocessors= preprocessors + + self.data_length= self.db['Images'].shape[0] + + def generator(self, counter=np.inf): + start=0 + + while start< counter: + for i in np.arange(0, self.data_length, self.batchSize): + data = self.db['Images'][i:i+self.batchSize] + labels = self.db['Labels'][i:i + self.batchSize] + + if self.encode: + labels= to_categorical(labels, self.num_classes) + + if self.preprocessors is not None: + processed_data=[] + + for d in data: + for p in self.preprocessors: + d= p.preprocess(d) + + processed_data.append(d) + + data= np.array(processed_data) + + if self.aug is not None: + # Notice the next to get next value from generator + data, labels= next(self.aug.flow( + data, labels, batch_size= self.batchSize + )) + + yield (data, labels) + + start+=1 + + def close(self): + self.db.close() \ No newline at end of file diff --git a/sidekick/nn/conv/__pycache__/__init__.cpython-37.pyc b/sidekick/nn/conv/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c9de9f53795ec0dc6f572922f8ce1e83a2c5d4 Binary files /dev/null and b/sidekick/nn/conv/__pycache__/__init__.cpython-37.pyc differ diff --git a/sidekick/nn/conv/__pycache__/angle_model.cpython-37.pyc b/sidekick/nn/conv/__pycache__/angle_model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f4b90b1db4beaeabf8a1747f23b46df74e170f0 Binary files /dev/null and b/sidekick/nn/conv/__pycache__/angle_model.cpython-37.pyc differ diff --git a/sidekick/nn/conv/__pycache__/length_model.cpython-37.pyc b/sidekick/nn/conv/__pycache__/length_model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4abdd58765b3f0bd39271beb070e7826e62085ea Binary files /dev/null and b/sidekick/nn/conv/__pycache__/length_model.cpython-37.pyc differ diff --git a/sidekick/nn/conv/angle_model.py b/sidekick/nn/conv/angle_model.py new file mode 100644 index 0000000000000000000000000000000000000000..77eea234799d8947ff7072c4b1ed2c0f4d443be3 --- /dev/null +++ b/sidekick/nn/conv/angle_model.py @@ -0,0 +1,81 @@ +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Conv2D, Activation, BatchNormalization, Dropout, MaxPool2D +from tensorflow.keras.layers import Flatten, Dense +from tensorflow import nn as tfn +import tensorflow.keras.backend as K + +class MiniVgg: + @staticmethod + def build(width,height,depth,classes): + model=Sequential() + inputShape=(height,width,depth) + chanDim=-1 + + if K.image_data_format()=="channel_first": + inputShape=(depth,height,width) + chanDim=1 + + model.add(Conv2D(32,(5,5),input_shape=inputShape)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(32, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(32, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(MaxPool2D(pool_size=(2,2))) + model.add(Dropout(0.25)) +#-----------------------------------# + model.add(Conv2D(32, (5, 5), input_shape=inputShape)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(32, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(MaxPool2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + +#-----------------------------# + + model.add(Conv2D(64, (5, 5), input_shape=inputShape)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(MaxPool2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + #-----------------------------# + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Flatten()) + model.add(Dense(1024)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization()) + model.add(Dropout(0.5)) + model.add(Dense(classes)) + model.add(Activation(tfn.softmax)) + + return model diff --git a/sidekick/nn/conv/length_model.py b/sidekick/nn/conv/length_model.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc4f43f82fef3bdb33e874c2431240c6ea0944a --- /dev/null +++ b/sidekick/nn/conv/length_model.py @@ -0,0 +1,81 @@ +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Conv2D, Activation, BatchNormalization, Dropout, MaxPool2D +from tensorflow.keras.layers import Flatten, Dense +from tensorflow import nn as tfn +import tensorflow.keras.backend as K + +class MiniVgg: + @staticmethod + def build(width,height,depth,classes): + model=Sequential() + inputShape=(height,width,depth) + chanDim=-1 + + if K.image_data_format()=="channel_first": + inputShape=(depth,height,width) + chanDim=1 + + model.add(Conv2D(32,(5,5),input_shape=inputShape)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(32, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(32, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(MaxPool2D(pool_size=(2,2))) + model.add(Dropout(0.25)) +#-----------------------------------# + model.add(Conv2D(32, (5, 5), input_shape=inputShape)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(32, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(MaxPool2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + +#-----------------------------# + + model.add(Conv2D(64, (5, 5), input_shape=inputShape)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(MaxPool2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + #-----------------------------# + + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + model.add(Conv2D(64, (5, 5))) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization(chanDim)) + + model.add(Flatten()) + model.add(Dense(1024)) + model.add(Activation(tfn.relu)) + model.add(BatchNormalization()) + model.add(Dropout(0.5)) + model.add(Dense(classes)) + model.add(Activation(tfn.relu)) + + return model diff --git a/sidekick/plot/__init__.py b/sidekick/plot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..988f72cf31f66ac601d1ba622bcd8d7a0c3bd689 --- /dev/null +++ b/sidekick/plot/__init__.py @@ -0,0 +1 @@ +from .plot_graph import plot_graph \ No newline at end of file diff --git a/sidekick/plot/__pycache__/__init__.cpython-36.pyc b/sidekick/plot/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa5961c340b2a248491232514617770b58ed02eb Binary files /dev/null and b/sidekick/plot/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/plot/__pycache__/plot_graph.cpython-36.pyc b/sidekick/plot/__pycache__/plot_graph.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebcec9b5712c6f41a3660f308364109f6b01bcf6 Binary files /dev/null and b/sidekick/plot/__pycache__/plot_graph.cpython-36.pyc differ diff --git a/sidekick/plot/__pycache__/plot_model.cpython-36.pyc b/sidekick/plot/__pycache__/plot_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da1996bd9c39e61258a687b1879df69eaf90fe9b Binary files /dev/null and b/sidekick/plot/__pycache__/plot_model.cpython-36.pyc differ diff --git a/sidekick/plot/plot_graph.py b/sidekick/plot/plot_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..c14928f6ba2c1b2804f38ddec96699bb2c0fd11a --- /dev/null +++ b/sidekick/plot/plot_graph.py @@ -0,0 +1,17 @@ +import matplotlib.pyplot as plt +import numpy as np + +def plot_graph(epochs, H, save=False): + plt.style.use('ggplot') + plt.figure() + plt.plot(np.arange(0, epochs, 1), H.history['loss'], label='train_loss') + plt.plot(np.arange(0, epochs, 1), H.history['val_loss'], label='val_loss') + plt.plot(np.arange(0, epochs, 1), H.history['accuracy'], label='train_acc') + plt.plot(np.arange(0, epochs, 1), H.history['val_accuracy'], label='val_acc') + plt.title('Training Loss & Accuracy') + plt.xlabel('# Epochs') + plt.ylabel('Metric Values') + plt.legend() + if save==True: + plt.savefig(fname= "./train_plot.jpg") + plt.show() \ No newline at end of file diff --git a/sidekick/plot/plot_model.py b/sidekick/plot/plot_model.py new file mode 100644 index 0000000000000000000000000000000000000000..076362c19c66b9444437b24e142d0909fe9b5d95 --- /dev/null +++ b/sidekick/plot/plot_model.py @@ -0,0 +1,4 @@ +from tensorflow.keras.utils import plot_model + +def visualize_model(model, filename): + plot_model(model, to_file=filename, show_shapes=True) \ No newline at end of file diff --git a/sidekick/prepro/__init__.py b/sidekick/prepro/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1755a5e42616aa194b7f9f111cf0a0e113767f5 --- /dev/null +++ b/sidekick/prepro/__init__.py @@ -0,0 +1,4 @@ +from .process import Process +from .meanprocess import MeanProcess +from .aspectprocess import AspectProcess +from .imgtoarrayprepro import ImgtoArrPrePro diff --git a/sidekick/prepro/__pycache__/__init__.cpython-36.pyc b/sidekick/prepro/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4a6763a3b26ea084e308d4eca068395478302dc Binary files /dev/null and b/sidekick/prepro/__pycache__/__init__.cpython-36.pyc differ diff --git a/sidekick/prepro/__pycache__/__init__.cpython-37.pyc b/sidekick/prepro/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d26170d127a952aa9a63c383f19816a2bd69e831 Binary files /dev/null and b/sidekick/prepro/__pycache__/__init__.cpython-37.pyc differ diff --git a/sidekick/prepro/__pycache__/aspectprocess.cpython-36.pyc b/sidekick/prepro/__pycache__/aspectprocess.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54474bb91212e0d1e2655ef656dad85a5855b000 Binary files /dev/null and b/sidekick/prepro/__pycache__/aspectprocess.cpython-36.pyc differ diff --git a/sidekick/prepro/__pycache__/aspectprocess.cpython-37.pyc b/sidekick/prepro/__pycache__/aspectprocess.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6c1ebd299e63ee7de50c6578c9ad3e4ad4d9ec3 Binary files /dev/null and b/sidekick/prepro/__pycache__/aspectprocess.cpython-37.pyc differ diff --git a/sidekick/prepro/__pycache__/imgtoarrayprepro.cpython-36.pyc b/sidekick/prepro/__pycache__/imgtoarrayprepro.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb5c7b7770cfebe5487a1c1bf95457066e1042e8 Binary files /dev/null and b/sidekick/prepro/__pycache__/imgtoarrayprepro.cpython-36.pyc differ diff --git a/sidekick/prepro/__pycache__/imgtoarrayprepro.cpython-37.pyc b/sidekick/prepro/__pycache__/imgtoarrayprepro.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ef9d74d9fbde5f61d481d17316a36091dd58118 Binary files /dev/null and b/sidekick/prepro/__pycache__/imgtoarrayprepro.cpython-37.pyc differ diff --git a/sidekick/prepro/__pycache__/meanprocess.cpython-36.pyc b/sidekick/prepro/__pycache__/meanprocess.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dd6a7c39d150265368d9385e296cd4f32bfc57a Binary files /dev/null and b/sidekick/prepro/__pycache__/meanprocess.cpython-36.pyc differ diff --git a/sidekick/prepro/__pycache__/meanprocess.cpython-37.pyc b/sidekick/prepro/__pycache__/meanprocess.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4742b05b4a2f28ad854e097bdebb821f0eb1bf11 Binary files /dev/null and b/sidekick/prepro/__pycache__/meanprocess.cpython-37.pyc differ diff --git a/sidekick/prepro/__pycache__/process.cpython-36.pyc b/sidekick/prepro/__pycache__/process.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..751a74b0636fbce30a16bab199b2b3af40d8484c Binary files /dev/null and b/sidekick/prepro/__pycache__/process.cpython-36.pyc differ diff --git a/sidekick/prepro/__pycache__/process.cpython-37.pyc b/sidekick/prepro/__pycache__/process.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad62c01368e875fff1995be2ea645162c1469ce1 Binary files /dev/null and b/sidekick/prepro/__pycache__/process.cpython-37.pyc differ diff --git a/sidekick/prepro/aspectprocess.py b/sidekick/prepro/aspectprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..6b04f9f49ed529a6ba07d462d49a37f672d5529d --- /dev/null +++ b/sidekick/prepro/aspectprocess.py @@ -0,0 +1,28 @@ +import imutils +import cv2 + +class AspectProcess: + def __init__(self, width, height, inter=cv2.INTER_AREA): + self.width= width + self.height= height + self.inter= inter + + def preprocess(self, image): + h,w= image.shape[0:2] + dh=0 + dw=0 + + # Resize the smaller dimension and calculate the offset of the other dim + # The offset is chosen such that a centre crop is formed + if w