Skip to content
Snippets Groups Projects
Unverified Commit ea606880 authored by GustavAls's avatar GustavAls Committed by GitHub
Browse files

Merge pull request #2 from GustavAls/Prepocessing

Prepocessing
parents 3eebd6ed edc55ac2
No related branches found
No related tags found
No related merge requests found
import numpy as np
import cv2
def fill_border(image, border_width):
dimension = 1
if len(image.shape)== 2:
y_height, x_height = image.shape
out_image = np.zeros((y_height + border_width * 2, x_height + border_width * 2, dimension))
y_height -= 1
x_height -= 1
else:
y_height, x_height, dimension = image.shape
out_image = np.zeros((y_height + border_width * 2, x_height + border_width * 2, dimension))
y_height -= 1
x_height -= 1
#border_width -= 1
border_mat = np.ones((border_width,border_width))
for i in range(dimension):
# Setting entire corners equal to corner values in image
out_image[:border_width,:border_width,i]=border_mat*image[0,0,i]
out_image[border_width+y_height+1:2*border_width+y_height+1,:border_width, i]=border_mat*image[y_height,0,i]
out_image[:border_width,border_width+x_height+1:2*border_width+x_height+1,i] =border_mat*image[0,x_height,i]
out_image[border_width+y_height+1:2*border_width+y_height+1,border_width+x_height+1:2*border_width+x_height+1,i]=border_mat*image[y_height,x_height,i]
# Setting the inner values equal to original image
out_image[border_width:border_width+y_height+1,border_width:border_width+x_height+1,i]=image[:,:,i]
# Copying and extending the values of the outer rows and columns of the original image
out_image[:border_width,border_width:border_width+x_height+1,i]= np.tile(image[0,:,i],(border_width,1))
out_image[border_width+y_height+1:2*border_width+y_height+1,border_width:border_width+x_height+1,i] = np.tile(image[y_height,:,i],(border_width,1))
out_image[border_width:border_width+y_height+1,:border_width,i]=np.transpose(np.tile(image[:,0,i],(border_width,1)))
out_image[border_width:border_width+y_height+1,border_width+x_height+1:2*border_width+x_height+1,i]=np.transpose(np.tile(image[:,x_height,i],(border_width,1)))
return out_image
test_matrix = np.random.normal(0,1,(10,10,1))
out_test = fill_border(test_matrix,3)
print(out_test)
print(out_test[:,-1])
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
from scipy import signal
from scipy import ndimage
plt.close('all')
def dilation33(image):
# Makes a 3 by 3 dilation of the a 2d image, program crashes if not provided as such
y_height, x_height = image.shape
out_image = np.zeros((y_height, x_height, 3))
out_image[:, :, 0] = np.row_stack((image[1:, :], image[-1, :]))
out_image[:, :, 1] = image
out_image[:, :, 2] = np.row_stack((image[0, :], image[:(y_height-1), :]))
out_image2 = np.max(out_image, axis=2)
out_image[:, :, 0] = np.column_stack(([out_image2[:, 1:], out_image2[:, -1]]))
out_image[:, :, 1] = out_image2
out_image[:, :, 2] = np.column_stack(([out_image2[:, 0], out_image2[:, 0:(x_height-1)]]))
out_image = np.max(out_image, axis=2)
return out_image
def fill_border(image, border_width):
dimension = 1
if len(image.shape) == 2:
y_height, x_height = image.shape
out_image = np.zeros((y_height + border_width * 2, x_height + border_width * 2))
else:
y_height, x_height, dimension = image.shape
out_image = np.zeros((y_height + border_width * 2, x_height + border_width * 2, dimension))
border_mat = np.ones((border_width,border_width))
if dimension == 1:
out_image[:border_width, :border_width] = border_mat * image[0, 0]
out_image[border_width + y_height:2 * border_width + y_height, :border_width] = border_mat * image[y_height - 1, 0]
out_image[:border_width, border_width + x_height:2 * border_width + x_height] = border_mat * image[0, x_height - 1]
out_image[border_width + y_height:2 * border_width + y_height, border_width + x_height:2 * border_width + x_height] = border_mat * image[y_height - 1, x_height - 1]
# Setting the inner values equal to original image
out_image[border_width:border_width + y_height, border_width:border_width + x_height] = image[:, :]
# Copying and extending the values of the outer rows and columns of the original image
out_image[:border_width, border_width:border_width + x_height] = np.tile(image[0, :], (border_width, 1))
out_image[border_width + y_height:2 * border_width + y_height, border_width:border_width + x_height] = np.tile(image[y_height - 1, :], (border_width, 1))
out_image[border_width:border_width + y_height, :border_width] = np.transpose(np.tile(image[:, 0], (border_width, 1)))
out_image[border_width:border_width + y_height, border_width + x_height:2 * border_width + x_height] = np.transpose(np.tile(image[:, x_height - 1], (border_width, 1)))
else:
for i in range(dimension):
# Setting entire corners equal to corner values in image
out_image[:border_width,:border_width,i]=border_mat*image[0,0,i]
out_image[border_width+y_height:2*border_width+y_height,:border_width, i]=border_mat*image[y_height-1,0,i]
out_image[:border_width,border_width+x_height:2*border_width+x_height,i] =border_mat*image[0,x_height-1,i]
out_image[border_width+y_height:2*border_width+y_height,border_width+x_height:2*border_width+x_height,i]=border_mat*image[y_height-1,x_height-1,i]
# Setting the inner values equal to original image
out_image[border_width:border_width+y_height,border_width:border_width+x_height,i]=image[:,:,i]
# Copying and extending the values of the outer rows and columns of the original image
out_image[:border_width,border_width:border_width+x_height,i]= np.tile(image[0,:,i],(border_width,1))
out_image[border_width+y_height:2*border_width+y_height,border_width:border_width+x_height,i] = np.tile(image[y_height-1,:,i],(border_width,1))
out_image[border_width:border_width+y_height,:border_width,i]=np.transpose(np.tile(image[:,0,i],(border_width,1)))
out_image[border_width:border_width+y_height,border_width+x_height:2*border_width+x_height,i]=np.transpose(np.tile(image[:,x_height-1,i],(border_width,1)))
return out_image
def gaussian_derivative(image, sigma, i_order, j_order,build_in = True):
# Calculates the Gaussian derivative of the i'th order and of the j'th order along the second axis
maximum_sigma = float(3)
filter_size = int(maximum_sigma*sigma+0.5) # unclear as to the point of this
x = np.asarray([i for i in range(-filter_size, filter_size+1)])
gaussian_distribution = 1/(np.sqrt(2*np.pi)*sigma)*np.exp((x**2)/(-2*sigma**2))
# first making the gaussian in convolution in the x direction
if not build_in:
image = fill_border(image, filter_size)
if i_order == 0:
gaussian = gaussian_distribution/np.sum(gaussian_distribution)
elif i_order == 1:
gaussian = -(x/sigma**2)*gaussian_distribution
gaussian = gaussian/(np.sum(x*gaussian))
elif i_order == 2:
gaussian = (x**2/sigma**4-1/sigma**2)*gaussian_distribution
gaussian = gaussian - sum(gaussian)/(len(x)) #shape of x may also be used but has only one dimension
gaussian = gaussian/np.sum(0.5*x*x*gaussian)
gaussian = gaussian.reshape(gaussian.shape + (1,))
out_image = signal.convolve2d(image, gaussian, mode='valid')
# subsequently in the y direction
if j_order == 0:
gaussian = gaussian_distribution / np.sum(gaussian_distribution)
elif j_order == 1:
gaussian = -(x / sigma ** 2) * gaussian_distribution
gaussian = gaussian / (np.sum(x * gaussian))
elif j_order == 2:
gaussian = (x ** 2 / sigma ** 4 - 1 / sigma ** 2) * gaussian_distribution
gaussian = gaussian - np.sum(gaussian) / (len(x)) # shape of x may also be used but has only one dimension
gaussian = gaussian / np.sum(0.5 * x * x * gaussian)
gaussian = gaussian.reshape(gaussian.shape + (1,))
out_image = signal.convolve2d(out_image, gaussian.T, mode='valid')
else:
if i_order == 0:
out_image = ndimage.gaussian_filter1d(image, sigma, axis = 0,mode = 'reflect')
if i_order == 1:
out_image = ndimage.gaussian_filter1d(image,sigma, axis = 0, order = 1, mode = 'reflect')
if i_order == 2:
out_image = ndimage.gaussian_filter1d(image,sigma, axis = 0, order = 2, mode= 'reflect')
if j_order == 0:
out_image = ndimage.gaussian_filter1d(out_image, sigma, axis=1, mode='reflect')
if j_order == 1:
out_image = ndimage.gaussian_filter1d(out_image, sigma, axis=1, order=1, mode='reflect')
if j_order == 2:
out_image = ndimage.gaussian_filter1d(out_image, sigma, axis=1, order=2, mode='reflect')
return out_image
def norm_derivative(image, sigma, order = 1, build_ind = True):
R = image[:, :, 0]
G = image[:, :, 1]
B = image[:, :, 2]
if order == 1:
Rx = gaussian_derivative(R, sigma, order, 0,build_ind)
Ry = gaussian_derivative(R, sigma, 0, order,build_ind)
Rw = np.sqrt(Rx ** 2 + Ry ** 2)
Gx = gaussian_derivative(G, sigma, order, 0,build_ind)
Gy = gaussian_derivative(G, sigma, 0, order,build_ind)
Gw = np.sqrt(Gx ** 2 + Gy ** 2)
Bx = gaussian_derivative(B, sigma, order, 0,build_ind)
By = gaussian_derivative(B, sigma, 0, order,build_ind)
Bw = np.sqrt(Bx ** 2 + By ** 2)
elif order == 2:
Rx = gaussian_derivative(R, sigma, order, 0,build_ind)
Ry = gaussian_derivative(R, sigma, 0, order,build_ind)
Rxy = gaussian_derivative(R, sigma, order // 2, order // 2,build_ind)
Rw = np.sqrt(Rx ** 2 + Ry ** 2 + 4 * Rxy ** 2)
Gx = gaussian_derivative(G, sigma, order, 0,build_ind)
Gy = gaussian_derivative(G, sigma, 0, order,build_ind)
Gxy = gaussian_derivative(G, sigma, order // 2, order // 2,build_ind)
Gw = np.sqrt(Gx ** 2 + Gy ** 2 + 4 * Gxy ** 2)
Bx = gaussian_derivative(B, sigma, order, 0,build_ind)
By = gaussian_derivative(B, sigma, 0, order,build_ind)
Bxy = gaussian_derivative(B, sigma, order // 2, order // 2,build_ind)
Bw = np.sqrt(Bx ** 2 + By ** 2 + 4 * Bxy ** 2)
return Rw, Gw, Bw
def set_border(image, width, method = 0):
y_height, x_height = image.shape
temp = np.ones((y_height, x_height))
y, x = np.meshgrid(np.arange(1, y_height+1), np.arange(1, x_height+1), indexing='ij')
temp = temp * ((x < (x_height - width + 1)) * (x > width))
temp = temp * ((y < (y_height - width + 1)) * (y > width))
out = temp * image
if method == 1:
out = out + (np.sum(out) / np.sum(temp)) * (np.ones((y_height, x_height)) - temp)
return out
def general_color_constancy(image, gaussian_differentiation=0, minkowski_norm=5, sigma=1, mask_image=0):
y_height, x_height, dimension = image.shape
if mask_image == 0:
mask_image = np.zeros((y_height, x_height))
#Removing saturated points
saturation_threshold = 255
mask_image2 = mask_image + (dilation33(np.max(image, axis=2)) >= saturation_threshold)
mask_image2 = (mask_image2 == 0)
mask_image2 = set_border(mask_image2, sigma + 1)
image_copy = np.ndarray.copy(image).astype(int)
if gaussian_differentiation == 0:
if sigma != 0:
image_copy = gaussian_derivative(image_copy, sigma, 0, 0)
elif gaussian_differentiation > 0:
Rx, Gx, Bx = norm_derivative(image_copy, sigma, gaussian_differentiation, build_ind=False)
image_copy[:, :, 0] = Rx
image_copy[:, :, 1] = Gx
image_copy[:, :, 2] = Bx
image = np.fabs(image)
if minkowski_norm != -1: #Minkowski norm = (1, infinity [
kleur = np.float_power(image_copy, minkowski_norm)
white_R = np.float_power(np.sum(kleur[:, :, 0] * mask_image2), (1/minkowski_norm))
white_G = np.float_power(np.sum(kleur[:, :, 1] * mask_image2), (1/minkowski_norm))
white_B = np.float_power(np.sum(kleur[:, :, 2] * mask_image2), (1/minkowski_norm))
som = np.sqrt(white_R ** 2.0 + white_G ** 2.0 + white_B ** 2.0)
white_R = white_R / som
white_G = white_G / som
white_B = white_B / som
else: #Minkowski norm is infinite, hence the max algorithm is applied
R = image_copy[:, :, 0]
G = image_copy[:, :, 1]
B = image_copy[:, :, 2]
white_R = np.max(R * mask_image2)
white_G = np.max(G * mask_image2)
white_B = np.max(B * mask_image2)
som = np.sqrt(white_R ** 2 + white_G ** 2 + white_B ** 2)
white_R = white_R / som
white_G = white_G / som
white_B = white_B / som
out_image = np.ndarray.copy(image).astype(int)
out_image[:, :, 0] = image[:, :, 0] / (white_R * np.sqrt(3.0))
out_image[:, :, 1] = image[:, :, 1] / (white_G * np.sqrt(3.0))
out_image[:, :, 2] = image[:, :, 2] / (white_B * np.sqrt(3.0))
#Makes sure there is no overflow
out_image[out_image >= 255] = 255
return white_R, white_G, white_B, out_image
#
# test_img = cv2.imread(r'C:\Users\Bruger\Pictures\building1.jpg', 1)
# # test_img = cv2.imread(r'C:\Users\ptrkm\OneDrive\Dokumenter\TestFolder\ISIC_0000001.jpg', 1)
# im_rgb = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)
# # imtest = np.random.normal(100,10, (250,250,3))
#
# R, G, B, test_img1 = general_color_constancy(im_rgb, gaussian_differentiation=1, minkowski_norm=5, sigma=2)
#
# fig = plt.figure(figsize=(9,12))
# fig.add_subplot(1,2,1)
# plt.imshow(im_rgb)
#
# fig.add_subplot(1,2,2)
# plt.imshow(test_img1)
#
# plt.show()
#
import numpy as np
import argparse
import cv2
def max_rgb_filter(image):
# split the image into its BGR components
(B, G, R) = cv2.split(image)
# find the maximum pixel intensity values for each
# (x, y)-coordinate,, then set all pixel values less
# than M to zero
M = np.maximum(np.maximum(R, G), B)
R[R < M] = 0
G[G < M] = 0
B[B < M] = 0
# merge the channels back together and return the image
return cv2.merge([B, G, R])
# construct the argument parse and parse the arguments
image = cv2.imread(r'C:\Users\ptrkm\Downloads\Billedanalyse\Billedanalyse\Exercises\DTUSign1.jpg', 1)
filtered = max_rgb_filter(image)
cv2.imshow('image1',filtered)
cv2.waitKey(0)
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from skimage.filters import threshold_otsu
from skimage import measure
from scipy import ndimage, signal
import heapq
import color_constancy as cc
import os
import time
import pandas as pd
plt.close('all')
time_zero = time.time()
width = 600
height = 450
preserve_size = 600
paths = [r'C:\Users\ptrkm\OneDrive\Dokumenter\Bachelor deep learning\Data ISIC\ISIC_2019_Training_Input\\']
return_folder = r'C:\Users\ptrkm\OneDrive\Dokumenter\TestFolder\return\\'
# paths = [r'C:\Users\Bruger\OneDrive\DTU - General engineering\6. Semester\Bachelor\ISBI2016_ISIC_Part2B_Training_Data\TestRunImages\\']
# return_folder = r'C:\Users\Bruger\OneDrive\DTU - General engineering\6. Semester\Bachelor\ISBI2016_ISIC_Part2B_Training_Data\TestRunImagesOutput\\'
standard_size = np.asarray([height, width])
preserve_ratio = True
margin = 0.1
crop_black = True
k = 200
threshold = 0.7
resize = True
use_color_constancy = True
write_to_png = False
write = True
ind = 1
all_heights = 0
all_width = 0
use_cropping = False
errors = []
area_threshold = 0.80
for i, j in enumerate(os.listdir(paths[0])):
# if j == 'ISIC_0000031_downsampled.jpg':
if i > 2900:
if i == 2901:
t2 = time.time()
print("i have started"+ str(t2-time_zero))
try:
image = cv2.imread(paths[0]+j)
except:
print("File " + j + "Could not read :(")
errors.append(j)
continue
if crop_black:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = gray_image ** 1.5
threshold_level = threshold_otsu(gray_image)
gray_image = ndimage.gaussian_filter(gray_image, sigma=np.sqrt(2))
binary_image = gray_image < threshold_level
n, m, _ = image.shape
mean_left = np.mean(image[n // 2 - k // 2:n // 2 + k // 2, :])
mean_right = np.mean(image[n // 2 - k // 2:n // 2 + k // 2, m-k:])
mean_top = np.mean(image[:,m // 2 - m // 2:m // 2 + k // 2])
mean_bottom = np.mean(image[n-k:,m // 2 - m // 2:m // 2 + k // 2])
mean_middle = np.mean(image[n // 2 - k:n // 2 + k,m // 2 - k:m // 2 + k])
if mean_middle > np.max([mean_left,mean_top]):
binary_image = gray_image > threshold_level
# We now find features in the binarised blobs
blob_labels = measure.label(binary_image)
blob_features = measure.regionprops(blob_labels)
if blob_features:
largest_blob_idx = np.argmax(np.asarray([blob_features[i].area for i in range(len(blob_features))]))
largest_blob = blob_features[largest_blob_idx]
radius = np.mean([largest_blob.major_axis_length, largest_blob.minor_axis_length]) / 2
equivalent_diameter = largest_blob.equivalent_diameter
x_min = (largest_blob.centroid[1] - radius + margin * radius).astype(int)
x_max = (largest_blob.centroid[1] + radius - margin * radius).astype(int)
y_min = (largest_blob.centroid[0] - radius + margin * radius).astype(int)
y_max = (largest_blob.centroid[0] + radius - margin * radius).astype(int)
use_cropping = True
else:
use_cropping = False
if x_min < 0 or x_max > image.shape[1] or y_min < 0 or y_max > image.shape[0]:
x_center = largest_blob.centroid[1]
y_center = largest_blob.centroid[0]
radii = np.arange(0,radius,radius/20)
passed = False
for rad in radii:
rad = rad.astype(int)
x_min = (largest_blob.centroid[1] - rad + margin * rad).astype(int)
x_max = (largest_blob.centroid[1] + rad - margin * rad).astype(int)
y_min = (largest_blob.centroid[0] - rad + margin * rad).astype(int)
y_max = (largest_blob.centroid[0] + rad - margin * rad).astype(int)
if x_min < 0 or x_max > image.shape[1] or y_min < 0 or y_max > image.shape[0]:
break
area_coefficient = np.sum(binary_image[(y_center-rad).astype(int):(y_center + rad).astype(int),
(x_center-rad).astype(int):(x_center+rad).astype(int)])/largest_blob.area
if area_coefficient >= area_threshold:
passed = True
radius = rad
x_min = (largest_blob.centroid[1] - radius + margin * radius).astype(int)
x_max = (largest_blob.centroid[1] + radius - margin * radius).astype(int)
y_min = (largest_blob.centroid[0] - radius + margin * radius).astype(int)
y_max = (largest_blob.centroid[0] + radius - margin * radius).astype(int)
use_cropping = True
if len(blob_features) > 1 and not passed:
indices = np.where(np.arange(len(blob_features)) != largest_blob_idx)[0].astype(int)
without_largest = [blob_features[idx] for idx in indices]
second_largest_idx = np.argmax(
np.asarray([without_largest[i].area for i in range(len(without_largest))]))
second_largest = without_largest[second_largest_idx]
radius = np.mean([second_largest.major_axis_length, second_largest.minor_axis_length]) / 2
x_min = (second_largest.centroid[1] - radius + margin * radius).astype(int)
x_max = (second_largest.centroid[1] + radius - margin * radius).astype(int)
y_min = (second_largest.centroid[0] - radius + margin * radius).astype(int)
y_max = (second_largest.centroid[0] + radius - margin * radius).astype(int)
if x_min < 0 or x_max > image.shape[1] or y_min < 0 or y_max > image.shape[0]:
use_cropping = False
else:
use_cropping = True
if use_cropping:
mean_inside = np.mean(image[y_min:y_max, x_min:x_max, :])
exclude_x = np.ones(image.shape[1],dtype=int)
exclude_y = np.ones(image.shape[0],dtype=int)
mean_outside = (np.mean(image[:y_min,:,:])+np.mean(image[y_min:y_max,:x_min,:])+
np.mean(image[y_max:,:,:])+np.mean(image[y_min:y_max,x_max:,:]))/4
if np.sum(binary_image)/(n*m)<0.05 or np.sum(binary_image)/(n*m)>0.95:
use_cropping = False
if use_cropping:
image = image[y_min:y_max, x_min:x_max, :]
if image.shape[0] > 0 and image.shape[1] > 0 and image.shape[2] > 0:
if resize:
if preserve_ratio:
if image.shape[0] > image.shape[1]:
image = np.moveaxis(image, [0, 1, 2], [1, 0, 2])
if image.shape[1] != preserve_size:
ratio = preserve_size / image.shape[1]
try:
image = cv2.resize(image, dsize=(round(image.shape[0] * ratio), preserve_size))
except:
print("resize problem on image" + j)
errors.append(j)
continue
else:
if image.shape[0] > image.shape[1]:
image = np.moveaxis(image, [0, 1, 2], [1, 0, 2])
if image.shape[0] != standard_size[0] or image.shape[1] != standard_size[1]:
image = cv2.resize(image, dsize=(standard_size[0], standard_size[1]))
if use_color_constancy:
try:
R, G, B, new_image = cc.general_color_constancy(image, 0, 6, 0)
new_image = np.uint8(new_image)
except:
print("resize problem on image" + j)
errors.append(j)
continue
else:
new_image = image
if write:
if write_to_png:
im = Image.fromarray(new_image.astype('uint8')).convert('RGB')
im.save(return_folder + j.name.replace('.jpg', '.png'))
else:
im = Image.fromarray(new_image.astype('uint8')).convert('RGB')
im.save(return_folder + j)
else:
errors.append(j)
if i % 100==0: print(i)
time_one = time.time()
errors_total = pd.DataFrame()
errors_total['all_errors'] = errors
errors_total.to_excel(r'C:\Users\ptrkm\OneDrive\Dokumenter\TestFolder\return\errors.xlsx')
print(time_one-time_zero)
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from skimage.filters import threshold_otsu
from skimage import measure
from scipy import ndimage, signal
import heapq
import color_constancy as cc
import os
import time
import pandas as pd
width = 600
height = 450
preserve_size = 600
paths = [r'C:\Users\ptrkm\OneDrive\Dokumenter\Bachelor deep learning\Data ISIC\ISIC_2019_Training_Input\\']
return_folder = r'C:\Users\ptrkm\OneDrive\Dokumenter\TestFolder\return\\'
# paths = [r'C:\Users\Bruger\OneDrive\DTU - General engineering\6. Semester\Bachelor\ISBI2016_ISIC_Part2B_Training_Data\TestRunImages\\']
# return_folder = r'C:\Users\Bruger\OneDrive\DTU - General engineering\6. Semester\Bachelor\ISBI2016_ISIC_Part2B_Training_Data\TestRunImagesOutput\\'
standard_size = np.asarray([height, width])
preserve_ratio = True
margin = 0.1
crop_black = True
k = 200
threshold = 0.7
resize = True
use_color_constancy = True
write_to_png = False
write = True
ind = 1
all_heights = 0
all_width = 0
use_cropping = False
errors = []
area_threshold = 0.80
full_data = os.listdir(paths[0])
cropped_data = os.listdir(return_folder)
unused_data = list(set(full_data)-set(cropped_data))
for i,images in enumerate(cropped_data):
try:
image = cv2.imread(return_folder+images)
if image.shape[0] < 50 or image.shape[1] < 50:
image = cv2.imread(paths[0]+images)
if resize:
if preserve_ratio:
if image.shape[0] > image.shape[1]:
image = np.moveaxis(image, [0, 1, 2], [1, 0, 2])
if image.shape[1] != preserve_size:
ratio = preserve_size / image.shape[1]
try:
image = cv2.resize(image, dsize=(round(image.shape[0] * ratio), preserve_size))
except:
print("resize problem on image" + images)
errors.append(images)
continue
R, G, B, new_image = cc.general_color_constancy(image, 0, 6, 0)
new_image = np.uint8(new_image)
im = Image.fromarray(new_image.astype('uint8')).convert('RGB')
im.save(return_folder + images)
except:
print(images)
continue
if i % 100 == 0: print(i)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment