File size: 4,837 Bytes
9446d15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import numpy as np

def get_depth_map(depth_map_raw: np.ndarray) -> np.ndarray:
    """
    Converts a raw RGB-encoded depth map to actual one channel depth map.

    Parameters:
        depth_map_raw (np.ndarray): The raw RGB depth map where each pixel's RGB values encode depth information.
                                    The shape should be (height, width, 3).

    Returns:
        np.ndarray: The converted depth map where each pixel value represents the depth at that point in [0, 1000] meters.
                    The shape of the returned array is the same as the input but with only one channel.

    Description:
        The depth map camera captures a scene by encoding the distance of each pixel to the camera (also known as the depth buffer or z-buffer) into a 24-bit floating point precision image, codified across the RGB color space channels in the order of R --> G -> B .

        R	        G	        B	        int24	
        00000000	00000000	00000000	0	        min (near)
        11111111	11111111	11111111	16777215	max (far)

        The depth map is calculated by converting the RGB values to a single floating-point number using the formula:
        depth = (R + G * 256 + B * 256^2) / (256^3 - 1) * 1000
        Here R, G, B are the red, green, and blue channel values respectively, and the formula maps these to a range from 0 to 1000.

        For more details, please refer to: https://carla.readthedocs.io/en/latest/ref_sensors/#depth-camera
    """
    R = depth_map_raw[..., 0].astype(np.float32)
    G = depth_map_raw[..., 1].astype(np.float32)
    B = depth_map_raw[..., 2].astype(np.float32)
    depth_map = (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1) * 1000
    return depth_map


def get_segmentation_map(segmentation_map_raw: np.ndarray, colorize=False) -> np.ndarray:
    """
    Extracts a segmentation map from a raw color segmentation image. Depending on the 'colorize' flag,
    this function either returns a single-channel map or a colorized segmentation map where each label
    is mapped to a specific RGB color defined by the label_colors dictionary.

    Parameters:
        segmentation_map_raw (np.ndarray): The raw color segmentation image, with the shape (height, width, 3).
                                           Typically, the first channel (R) is used to represent segmentation information.
        colorize (bool): If True, returns a colorized segmentation map. If False, returns the original
                         segmentation map's R channel as a float32 array.

    Returns:
        np.ndarray: If colorize is False, returns the extracted single-channel segmentation map,
                    where values are floating-point, with the shape (height, width). If colorize is True,
                    returns a 3-channel RGB image where each segmentation label is mapped to a predefined color.

    Description:
        The semantic segmentation camera classifies every object in the view by displaying it in a different color
        according to the object class. For example, pedestrians appear in a different color than vehicles.
        It provides an image with the tag information encoded in the red channel. A pixel with a red value of x
        displays an object with tag x. When 'colorize' is True, each pixel's label is converted to a specific color
        based on a predefined dictionary mapping labels to colors.

        For more details, please refer to: https://carla.readthedocs.io/en/latest/ref_sensors/#semantic-segmentation-camera
    """
    if colorize:
        label_colors = {
            0: (0, 0, 0),
            1: (128, 64, 128),
            2: (244, 35, 232),
            3: (70, 70, 70),
            4: (102, 102, 156),
            5: (190, 153, 153),
            6: (153, 153, 153),
            7: (250, 170, 30),
            8: (220, 220, 0),
            9: (107, 142, 35),
            10: (152, 251, 152),
            11: (70, 130, 180),
            12: (220, 20, 60),
            13: (255, 0, 0),
            14: (0, 0, 142),
            15: (0, 0, 70),
            16: (0, 60, 100),
            17: (0, 60, 100),
            18: (0, 0, 230),
            19: (119, 11, 32),
            20: (110, 190, 160),
            21: (170, 120, 50),
            22: (55, 90, 80),
            23: (45, 60, 150),
            24: (157, 234, 50),
            25: (81, 0, 81),
            26: (150, 100, 100),
            27: (230, 150, 140),
            28: (180, 165, 180)
        }
        
        height, width = segmentation_map_raw.shape[:2]
        rgb_image = np.zeros((height, width, 3), dtype=np.uint8)
        
        for label, color in label_colors.items():
            rgb_image[segmentation_map_raw[..., 0] == label] = color

        return rgb_image
    else:
        return segmentation_map_raw[..., 0].astype(np.float32)