diff --git a/assets/Megamind.avi b/assets/Megamind.avi new file mode 100644 index 0000000..86351eb Binary files /dev/null and b/assets/Megamind.avi differ diff --git a/assets/cat_damaged.png b/assets/cat_damaged.png new file mode 100644 index 0000000..5af2762 Binary files /dev/null and b/assets/cat_damaged.png differ diff --git a/assets/cat_mask.png b/assets/cat_mask.png new file mode 100644 index 0000000..3465a8a Binary files /dev/null and b/assets/cat_mask.png differ diff --git a/outputs/cat_inpainted.png b/outputs/cat_inpainted.png new file mode 100644 index 0000000..dc76e56 Binary files /dev/null and b/outputs/cat_inpainted.png differ diff --git a/tutorial10.py b/tutorial10.py new file mode 100644 index 0000000..1817a6f --- /dev/null +++ b/tutorial10.py @@ -0,0 +1,33 @@ +import cv2 +import numpy as np +import os + +# Playing video from file: +cap = cv2.VideoCapture('assets/Megamind.avi') + +try: + if not os.path.exists('data-single'): + os.makedirs('data-single') +except OSError: + print ('Error: Creating directory of data') + +currentFrame = 0 +while(True): + # Capture frame-by-frame + ret, frame = cap.read() + + # Saves image of the current frame in jpg file + name = './data-single/frame' + str(currentFrame) + '.jpg' + print ('Creating...' + name) + cv2.imwrite(name, frame) + + # To stop duplicate images + currentFrame += 1 + +# When everything done, release the capture +cap.release() +cv2.destroyAllWindows() + + +#lucciffer + diff --git a/tutorial11.py b/tutorial11.py new file mode 100644 index 0000000..eb770a5 --- /dev/null +++ b/tutorial11.py @@ -0,0 +1,51 @@ +import cv2 +import numpy as np + + +def read_file(filename): + img = cv2.imread(filename) + cv2_imshow(img) + return img + +def color_quantization(img, k): +# Transform the image + data = np.float32(img).reshape((-1, 3)) + +# Determine criteria + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001) + +# Implementing K-Means + ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) + center = np.uint8(center) + result = center[label.flatten()] + result = result.reshape(img.shape) + return result + +def edge_mask(img, line_size, blur_value): + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + gray_blur = cv2.medianBlur(gray, blur_value) + edges = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, line_size, blur_value) + return edges + + +img = read_file(file_path) + + +line_size = 5 +blur_value = 13 + +edges = edge_mask(img, line_size, blur_value) +cv2_imshow(edges) + +total_color = 7 + +img = color_quantization(img, total_color) +cv2_imshow(img) +cv2.imwrite('./output_minus1.png', img) + +blurred = cv2.bilateralFilter(img, d=7, sigmaColor=200,sigmaSpace=200) +cv2_imshow(blurred) + +cartoon = cv2.bitwise_and(blurred, blurred, mask=edges) +cv2.imwrite('./output.png', cartoon) +cv2_imshow(cartoon) diff --git a/tutorial9.py b/tutorial9.py new file mode 100644 index 0000000..add7a49 --- /dev/null +++ b/tutorial9.py @@ -0,0 +1,14 @@ +import numpy as np +import cv2 + +# Open the image. +img = cv2.imread('assets/cat_damaged.png') + +# Load the mask. +mask = cv2.imread('assets/cat_mask.png', 0) + +# Inpaint. +dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS) + +# Write the output. +cv2.imwrite('outputs/cat_inpainted.png', dst)