File size: 4,043 Bytes
1b5ee0e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import time
import random
import glog as log
import numpy as np
from typing import List

from utils.io import read_image, write_processed_as_jpg, write_illuminant_estimation
import ips

expected_landscape_img_height = 768  # 6144
expected_landscape_img_width = 1024  # 8192


# Flash TMO works better with a=20 and Leap_35 for night images.
# Storm TMO tends to have higher a value than the default one. Leap is must. 
# Not stable for different illuminant settings, so the scale parameter should be adaptive to something.
# Luma and Color statistics could be the best option we have to make it adaptive.
# Higher number of kernels higher details visible in local areas, however too large numbers produces flares or makes it unrealistic.
def single_run(
    base_dir: str,
    img_names: List,
    out_dir: str,
    wb_method: str = "iwp",
    tmo_type: str = "nite",
    tv_weight: int = 20
):
    log.info(
        "Parameters:\n"
        f"WB Method: {wb_method}\n"
        f"TMO Type: {tmo_type}\n"
        f"Luma TV weight : {tv_weight}\n"
    )
    os.makedirs("./" + out_dir, exist_ok=True)
    # random.shuffle(img_names)
    infer_times = list()

    for i, img_name in enumerate(img_names):
        p = round(100 * (i+1) / len(img_names), 2)
        log.info(f"({p:.2f}%) Processing {i+1} of {len(img_names)} images, image name: {img_name}")
        path = os.path.join(base_dir, img_name)
        assert os.path.exists(path)

        raw_image, metadata = read_image(path)
        save_ill_est = metadata["wb_estimation"] is None
        metadata["exp_height"] = expected_landscape_img_height
        metadata["exp_width"] = expected_landscape_img_width
        metadata["wb_method"] = wb_method
        metadata["tv_weight"] = tv_weight
        metadata["tmo_type"] = tmo_type
        if tmo_type.lower() in ["flash", "storm"]:
            metadata["tmo_scale"] = 10  # 20 can be also used, 10 better for some images, but 20 for some others depending on the variety of the illuminant source.
        if tmo_type.lower() in ["storm", "nite"]:
            metadata["tmo_kernels"] = (1, 2, 4, 8, 16, 32)  # more than 16, produce flares in dark regions in the case of occlusion.
        metadata["tmo_do_leap"] = True  # Leap is must for Flash, Storm and Nite.
        metadata["global_mc_beta"] = 1.2
        metadata["scc_alpha"] = 0.5
        metadata["scc_lambda"] = 0.9
        
        out_path = os.path.join(out_dir, img_name.replace("png", "jpg"))
        if os.path.exists(out_path):
            continue
        start_time = time.time()
        out = ips.process(raw_image=raw_image, metadata=metadata)
        end_time = time.time()
        infer_times.append(end_time - start_time)

        if save_ill_est:
            ill_est_path = os.path.join(out_dir, img_name.replace(".png", "_wb.json"))
            write_illuminant_estimation(metadata["wb_estimation"], ill_est_path)
        write_processed_as_jpg(out, out_path)
    print(f"Average inference time: {np.mean(infer_times)} seconds")


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description='Night Photography Rendering Challenge - Team VGL OzU')
    parser.add_argument('-d', '--data_dir', type=str, default="data/", help="data directory")
    parser.add_argument('-o', '--output_dir', type=str, default="results/", help="output directory")
    parser.add_argument('-s', '--submission_name', type=str, default="vgl-ozu", help='submission name')
    args = parser.parse_args()

    data_dir = args.data_dir
    if not os.path.exists(data_dir) or len(os.listdir(data_dir)) == 0:
        log.info(f"Data does not exist, please put the data from given link into '{data_dir}'...")
        os.makedirs(data_dir, exist_ok=True)
        log.info("After this, please re-run.")
    else:
        base_dir = args.data_dir
        out_dir = args.output_dir
        img_names = os.listdir(base_dir)
        img_names = [img_name for img_name in img_names if ".png" in img_name]
        single_run(base_dir, img_names, out_dir)