NPRC24 / IVL /blocks.py
Artyom
IVL
e91104d verified
import cv2
import numpy as np
import skimage.color as cl
from Grayness_Index import GPconstancy_GI
from scipy.ndimage.filters import gaussian_filter
from skimage import exposure
from skimage.restoration import denoise_nl_means, estimate_sigma
_RGB_TO_YCBCR = np.array([[0.257, 0.504, 0.098],
[-0.148, -0.291, 0.439],
[0.439, -0.368, -0.071]])
_YCBCR_OFF = np.array([0.063, 0.502, 0.502])
def _mul(coeffs, image):
r = image[:, :, 0]
g = image[:, :, 1]
b = image[:, :, 2]
r0 = np.repeat(r[:, :, np.newaxis], 3, 2) * coeffs[:, 0]
r1 = np.repeat(g[:, :, np.newaxis], 3, 2) * coeffs[:, 1]
r2 = np.repeat(b[:, :, np.newaxis], 3, 2) * coeffs[:, 2]
return r0 + r1 + r2
def rgb2ycbcr(rgb):
"""sRGB to YCbCr conversion."""
clip_rgb = False
if clip_rgb:
rgb = np.clip(rgb, 0, 1)
return _mul(_RGB_TO_YCBCR, rgb) + _YCBCR_OFF
def ycbcr2rgb(rgb):
"""YCbCr to sRGB conversion."""
clip_rgb = False
rgb = _mul(np.linalg.inv(_RGB_TO_YCBCR), rgb - _YCBCR_OFF)
if clip_rgb:
rgb = np.clip(rgb, 0, 1)
return rgb
def normalize(raw_image, black_level, white_level):
if type(black_level) is list and len(black_level) == 1:
black_level = float(black_level[0])
if type(white_level) is list and len(white_level) == 1:
white_level = float(white_level[0])
black_level_mask = black_level
if type(black_level) is list and len(black_level) == 4:
if type(black_level[0]) is Ratio:
black_level = ratios2floats(black_level)
if type(black_level[0]) is Fraction:
black_level = fractions2floats(black_level)
black_level_mask = np.zeros(raw_image.shape)
idx2by2 = [[0, 0], [0, 1], [1, 0], [1, 1]]
step2 = 2
for i, idx in enumerate(idx2by2):
black_level_mask[idx[0]::step2, idx[1]::step2] = black_level[i]
normalized_image = raw_image.astype(np.float32) - black_level_mask
# if some values were smaller than black level
normalized_image[normalized_image < 0] = 0
normalized_image = normalized_image / (white_level - black_level_mask)
return normalized_image
class LCC():
def __init__(self, sigma=None):
super(LCC, self).__init__()
if sigma is None:
sigma = np.sqrt(512 ** 2 + 512 ** 2) * 0.01
self.sigma = sigma
def __call__(self, image):
ycbcr = cl.rgb2ycbcr(image)
y = (ycbcr[:, :, 0] - 16) / 219
cb = ycbcr[:, :, 1]
cr = ycbcr[:, :, 2]
blurred_y = gaussian_filter(y, sigma=self.sigma)
mask = 1 - blurred_y
mean_intensity = np.mean(y)
alpha_lower = np.log(mean_intensity) / np.log(0.5)
alpha_upper = np.log(0.5) / np.log(mean_intensity)
condition = mean_intensity < 0.5
alpha = np.zeros(mask.shape)
alpha = np.where(condition, alpha_lower, alpha_upper)
gamma = alpha ** ((0.5 - mask) / 0.5)
new_y = y ** gamma
new_y = new_y * 219 + 16
new_ycbcr = np.stack([new_y, cb, cr], 2)
im_rgb = cl.ycbcr2rgb(new_ycbcr)
# im_rgb = np.clip(im_rgb, 0, 1)
im_out = contrast_saturation_fix(im_rgb, image)
return im_out
def contrast_saturation_fix(enhanced_image, input_image, mode="LCC", n_bits=8):
im_ycbcr = rgb2ycbcr(enhanced_image)
or_ycbcr = rgb2ycbcr(input_image)
y_new = im_ycbcr[:, :, 0];
cb_new = im_ycbcr[:, :, 1];
cr_new = im_ycbcr[:, :, 2];
y = or_ycbcr[:, :, 0];
cb = or_ycbcr[:, :, 1];
cr = or_ycbcr[:, :, 2];
# dark pixels percentage
mask = np.logical_and(y < (35 / 255), (((cb - 0.5) * 2 +
(cr - 0.5) * 2) / 2) < (20 / 255))
dark_pixels = mask.flatten().sum()
if dark_pixels > 0:
ipixelCount, _ = np.histogram(y.flatten(), 256, range=(0, 1))
cdf = np.cumsum(ipixelCount)
idx = np.argmin(abs(cdf - (dark_pixels * 0.3)))
b_input30 = idx
ipixelCount, _ = np.histogram(y_new.flatten(), 256, range=(0, 1))
cdf = np.cumsum(ipixelCount)
idx = np.argmin(abs(cdf - (dark_pixels * 0.3)))
b_output30 = idx
bstr = (b_output30 - b_input30)
else:
bstr = np.floor(np.quantile(y_new.flatten(), 0.002) * 255)
if bstr > 50:
bstr = 50
dark_bound = bstr / 255
bright_b = np.floor(np.quantile(y_new.flatten(), 1 - 0.002) * 255)
if (255 - bright_b) > 50:
bright_b = 255 - 50
bright_bound = bright_b / 255
# y_new = (y_new - dark_bound) / (bright_bound - dark_bound)
y_new = exposure.rescale_intensity(y_new, in_range=(
y_new.min(), y_new.max()), out_range=(dark_bound, bright_bound))
y_new = y_new.clip(0, 1)
im_ycbcr[:, :, 0] = y_new
im_new = ycbcr2rgb(im_ycbcr)
im_new = im_new.clip(0, 1)
# Saturation
im_tmp = input_image
r = im_tmp[:, :, 0]
g = im_tmp[:, :, 1]
b = im_tmp[:, :, 2]
r_new = 0.5 * (((y_new / (y + 1e-40)) * (r + y)) + r - y)
g_new = 0.5 * (((y_new / (y + 1e-40)) * (g + y)) + g - y)
b_new = 0.5 * (((y_new / (y + 1e-40)) * (b + y)) + b - y)
im_new[:, :, 0] = r_new
im_new[:, :, 1] = g_new
im_new[:, :, 2] = b_new
return im_new
def gamma_correction(img, exp):
return img ** exp
def black_stretch(img, perc=0.2):
im_hsv = cl.rgb2hsv(img.clip(0, 1))
v = im_hsv[:, :, 2]
dark_bound = np.quantile(v.flatten(), perc, method='closest_observation')
v_new = (v - dark_bound) / (1 - dark_bound)
im_hsv[:, :, 2] = v_new.clip(0, 1)
out = cl.hsv2rgb(im_hsv)
return out.clip(0, 1)
def saturation_scale(img, scale=2.):
img_hsv = cl.rgb2hsv(img.clip(0, 1))
s = img_hsv[:, :, 1]
s *= scale
img_hsv[:, :, 1] = s
return cl.hsv2rgb(np.clip(img_hsv, 0, 1))
def global_mean_contrast(x, beta=0.5):
x_mean = np.mean(np.mean(x, 0), 0)
x_mean = np.expand_dims(np.expand_dims(x_mean, 0), 0)
x_mean = np.repeat(np.repeat(x_mean, x.shape[1], 1), x.shape[0], 0)
# scale all channels
out = x_mean + beta * (x - x_mean)
return out
def sharpening(image, sigma=2.0, scale=1):
gaussian = cv2.GaussianBlur(image, (0, 0), sigma)
unsharp_image = image + scale * (image - gaussian)
return unsharp_image.clip(0, 1)
def illumination_parameters_estimation(current_image, illumination_estimation_option):
ie_method = illumination_estimation_option.lower()
if ie_method == "gw":
ie = np.mean(current_image, axis=(0, 1))
ie /= ie[1]
return ie
elif ie_method == "sog":
sog_p = 4.
ie = np.mean(current_image**sog_p, axis=(0, 1))**(1 / sog_p)
ie /= ie[1]
return ie
elif ie_method == "wp":
ie = np.max(current_image, axis=(0, 1))
ie /= ie[1]
return ie
elif ie_method == "iwp":
samples_count = 20
sample_size = 20
rows, cols = current_image.shape[:2]
data = np.reshape(current_image, (rows * cols, 3))
maxima = np.zeros((samples_count, 3))
for i in range(samples_count):
maxima[i, :] = np.max(data[np.random.randint(
low=0, high=rows * cols, size=(sample_size)), :], axis=0)
ie = np.mean(maxima, axis=0)
ie /= ie[1]
return ie
else:
raise ValueError(
'Bad illumination_estimation_option value! Use the following options: "gw", "wp", "sog", "iwp"')
def wb(demosaic_img, as_shot_neutral):
as_shot_neutral = np.asarray(as_shot_neutral)
# transform vector into matrix
if as_shot_neutral.shape == (3,):
as_shot_neutral = np.diag(1. / as_shot_neutral)
assert as_shot_neutral.shape == (3, 3)
white_balanced_image = np.dot(demosaic_img, as_shot_neutral.T)
white_balanced_image = np.clip(white_balanced_image, 0.0, 1.0)
return white_balanced_image
def white_balance(img, n=0.1, th=1e-4, denoise_first=False):
uint = False
if np.issubdtype(img.dtype, np.uint8):
uint = True
if uint:
img = img.astype(np.float32) / 255
tot_pixels = img.shape[0] * img.shape[1]
# compute number of gray pixels
num_gray_pixels = int(np.floor(n * tot_pixels / 100))
# denoise if necessary
if denoise_first:
sigma_est = 1# np.mean(estimate_sigma(img, channel_axis=-1))
img_ = cv2.GaussianBlur(img,(0,0),5)
else:
img_ = img
# compute global illuminant values
lumTriplet = GPconstancy_GI(img_, num_gray_pixels, th)
lumTriplet /= lumTriplet.max()
out = wb(img, lumTriplet)
if uint:
return (out * 255).astype(np.uint8)
else:
return out
def scurve(img, alpha=None, lmbd=1 / 1.8, blacks=False):
x = img
if alpha is None:
im_hsv = cl.rgb2hsv(img.clip(0, 1))
v = im_hsv[:, :, 2]
alpha = np.quantile(v.flatten(), 0.02, method='closest_observation')
if not blacks:
out = np.where(x <= alpha,
x, # alpha - alpha * (1 - x / alpha) ** lmbd,
alpha + (1 - alpha) *
((x - alpha) / (1 - alpha)).clip(min=0) ** lmbd
)
else:
out = np.where(x <= alpha,
alpha - alpha * (1 - x / alpha) ** lmbd,
x
)
# out = out.clip(0, 1)
return out
def scurve_central(img, lmbd=1 / 1.4, blacks=False):
x = img
im_hsv = cl.rgb2hsv(img.clip(0, 1))
v = im_hsv[:, :, 2]
alpha1 = np.quantile(v.flatten(), 0.2, method='closest_observation')
alpha2 = np.quantile(v.flatten(), 0.9, method='closest_observation')
out = np.where(x <= alpha1,
x,
np.where(x >= alpha2,
x,
alpha1 + (alpha2 - alpha1) *
((x - alpha1) / (alpha2 - alpha1)).clip(min=0) ** lmbd
)
)
return out
def imadjust(img, hi=0.9999, pi=0.0001):
'''
Python version of matlab imadjust
'''
im_hsv = cl.rgb2hsv(img.clip(0, 1))
v = im_hsv[:, :, 2]
hi = np.quantile(v.flatten(), hi, method='closest_observation')
li = np.quantile(v.flatten(), pi, method='closest_observation')
if hi < 0.7:
hi = np.quantile(v.flatten(), 0.995, method='closest_observation')
if hi == 1:
v_tmp = v.flatten()
v_tmp = v_tmp[v_tmp != 1]
hi = np.quantile(v_tmp, 0.9995, method='closest_observation')
if li == 0:
v_tmp = v.flatten()
v_tmp = v_tmp[v_tmp != 0]
li = np.quantile(v_tmp, 0.0001, method='closest_observation')
x = img
li = li
hi = hi
lo = 0
ho = 0.9
gamma = 1
out = ((x - li) / (hi - li)) ** gamma
out = out * (ho - lo) + lo
return out
def denoise_raw(image, l_w=3, ch_w=20):
im_yuv = cl.rgb2yuv(image)
# Separately process luma and choma
patch_kw = dict(patch_size=5,
patch_distance=6
)
sigma_est = np.mean(estimate_sigma(im_yuv[:, :, 0]))
den_y = denoise_nl_means(im_yuv[:, :, 0], h=l_w * sigma_est, fast_mode=True,
**patch_kw)
patch_kw = dict(patch_size=5,
patch_distance=6,
channel_axis=-1
)
sigma_est = np.mean(estimate_sigma(im_yuv[:, :, 1:2], channel_axis=-1))
den_uv = denoise_nl_means(im_yuv[:, :, 1:3], h=ch_w * sigma_est, fast_mode=True,
**patch_kw)
out = im_yuv
out[:, :, 0] = den_y
out[:, :, 1:3] = den_uv
del den_y
del den_uv
out = cl.yuv2rgb(out)
return out
def denoise_rgb(image, l_w=3, ch_w=None):
#patch_kw = dict(patch_size=5,
# patch_distance=6
# )
patch_kw = {}
sigma_est = np.mean(estimate_sigma(image, channel_axis=2))
out = denoise_nl_means(image, h=l_w * sigma_est, fast_mode=True,
**patch_kw, channel_axis=2)
return out