Drawing contours from label_image

Question:

I have a label_image array and I am deriving the outlines/boundaries of the objects on that array. Currently I am doing that by getting all unique labels, iterating over them and then find the contours of each object. Like in the loop below, where I am populating a dict with keys the label and values the contours

import cv2
import pandas as pd
import numpy as np


def extract_borders(label_image):
    labels = np.unique(label_image[label_image > 0])
    d = {}
    for label in labels:
        y = label_image == label
        y = y * 255
        y = y.astype('uint8')
        contours, hierarchy = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = np.squeeze(contours)
        d[label] = contours.tolist()
    df = pd.DataFrame([d]).T
    df = df.reset_index()
    df.columns = ['label', 'coords']
    return df


if __name__ == "__main__":
    label_img = np.array([
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
        [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    ])

    res = extract_borders(label_img)
    print(res)

When labels are thousands this can be a real bottleneck. Is there a more efficient way to do this please? Maybe there is a function I am not aware of… I want to be able to assign the label to the corresponding contours.

The code above prints:

   label                                             coords
0      1                   [[5, 6], [5, 9], [9, 9], [9, 6]]
1      2  [[3, 3], [3, 12], [11, 12], [11, 10], [5, 10],...
2      3  [[12, 5], [11, 6], [10, 6], [10, 9], [11, 9], ...
3      4  [[12, 3], [12, 4], [14, 4], [15, 5], [15, 10],...
Asked By: Aenaon

||

Answers:

I had a try as follows with multiprocessing and got a 2.5x speedup using 6 CPUs:

#!/usr/bin/env python3

from multiprocessing import Pool, cpu_count, freeze_support
from functools import partial
import pandas as pd
import numpy as np
import cv2
import os

def worker(labels, label_image):
    """One worker started per CPU. Receives the label image once and a list of the labels to look for."""
    pid = os.getpid()
    print(f'Worker pid: {pid}, processing {len(labels)} labels')
    d = {}
    for label in labels:
        y = label_image == label
        y = y * 255
        y = y.astype('uint8')
        contours, _ = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = np.squeeze(contours)
        d[label] = contours.tolist()
    return d

if __name__ == '__main__':

    freeze_support()

    # Synthesize label image: 2500 labels arranged as 50x50 image, then scaled up to some realistic size
    label_image = np.arange(2500, dtype=np.uint16).reshape((50,50))
    label_image = cv2.resize(label_image, (4000,4000), interpolation=cv2.INTER_NEAREST)

    # Get number of cores and split labels across that many workers
    processes = cpu_count()

    # UNCOMMENT FOLLOWING LINE FOR SINGLE CPU
    # processes = 1
    print(f'Using {processes} processes')

    labels = np.unique(label_image[label_image > 0])
    print(f'Unique labels detected: {labels}')

    # Chunk up the labels across the processes
    chunks = np.array_split(labels, processes)

    # Map the labels across the processes
    with Pool(processes=processes) as pool:
        result = pool.map(partial(worker, label_image=label_image), chunks)

    #print(result)

Sample Output with 12 cores

Using 12 processes
Unique labels detected: [   1    2    3 ... 2497 2498 2499]
Worker pid: 76884, processing 209 labels
Worker pid: 76886, processing 209 labels
Worker pid: 76885, processing 209 labels
Worker pid: 76888, processing 208 labels
Worker pid: 76887, processing 208 labels
Worker pid: 76889, processing 208 labels
Worker pid: 76890, processing 208 labels
Worker pid: 76893, processing 208 labels
Worker pid: 76891, processing 208 labels
Worker pid: 76892, processing 208 labels
Worker pid: 76895, processing 208 labels
Worker pid: 76894, processing 208 labels
Answered By: Mark Setchell

The DIPlib library has a function to extract the chain code for each object in the image. It does require, however, that each object is connected (the pixels with the same label must form a connected component). Using Mark’s large example image, this takes computation time from 154.8s to 0.781s, 200 times faster. And most of that time, I think, is dedicated to converting the chain code into a polygon, into a numpy array, into a list, and finally into a pandas table. Lots of conversions…

One thing to note: the chain codes returned by dip.GetImageChainCodes are as you’d expect: they trace the outer pixels of each object. However, converting these to a polygon does something different: the polygon doesn’t link the outer pixels, but goes around them, following the "crack" between the pixels. And it cuts pixel corners doing so. This leads to a polygon that much better describes the actual object, its area is exactly half a pixel smaller than the number of pixels in the object, and its length is much closer to the perimeter of the underlying object (before discretizing it into a set of pixels). This idea comes from Steve Eddins at the MathWorks.

import pandas as pd
import numpy as np
import diplib as dip
import cv2
import time

def extract_borders(label_image):
    labels = np.unique(label_image[label_image > 0])
    d = {}
    for label in labels:
        y = label_image == label
        y = y * 255
        y = y.astype('uint8')
        contours, hierarchy = cv2.findContours(y, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours = np.squeeze(contours)
        d[label] = contours.tolist()
    df = pd.DataFrame([d]).T
    df = df.reset_index()
    df.columns = ['label', 'coords']
    return df

def extract_borders_dip(label_image):
    cc = dip.GetImageChainCodes(label_img) # input must be an unsigned integer type
    d = {}
    for c in cc:
        d[c.objectID] = np.array(c.Polygon()).tolist()
    df = pd.DataFrame([d]).T
    df = df.reset_index()
    df.columns = ['label', 'coords']
    return df

if __name__ == "__main__":
    label_img = np.arange(2500, dtype=np.uint16).reshape((50,50))
    label_img = cv2.resize(label_img, (4000,4000), interpolation=cv2.INTER_NEAREST)
    start = time.process_time()
    res = extract_borders(label_img)
    print('OP code:', time.process_time() - start)
    print(res)
    start = time.process_time()
    res = extract_borders_dip(label_img)
    print('DIPlib code: ', time.process_time() - start)
    print(res)
Answered By: Cris Luengo