Alternate faster method for RGB to self-defined color-space image conversion in real-time in Python

Question:

Question:

I have defined my own colorspace (Yellow-Blue) using some loops, and want to convert a standard HD image from RGB to YB in real-time, with some post-processing filters, but the method I wrote performs the favorable task at a slow speed.


Context:

I was wondering what colors would dogs see, and found that they cannot distinguish between green and red:
image

So I decided to define my own YB colorspace, as shown in this scheme:
enter image description here

calculating.py

bits = 8
values = 2 ** bits - 1
color_count = values * 6


def hues():
    lst = []
    for i in range(color_count):
        r = g = b = 0

        turn = (i // values) + 1

        if turn == 1:
            r = values
            g = i % values
            b = 0

        elif turn == 2:
            r = values - i % values
            g = values
            b = 0

        elif turn == 3:
            r = 0
            g = values
            b = i % values

        elif turn == 4:
            r = 0
            g = values - i % values
            b = values

        elif turn == 5:
            r = i % values
            g = 0
            b = values

        elif turn == 6:
            r = values
            g = 0
            b = values - i % values

        r = round(r / values * 255)
        g = round(g / values * 255)
        b = round(b / values * 255)

        lst.append((r, g, b))

    return lst


def dues():
    lst = []
    for i in range(color_count):
        r = g = b = 0

        turn = (i // values) + 1

        if turn == 1:
            r = values
            g = values
            b = round((values - i % values) / 2)

        elif turn == 2:
            r = values
            g = values
            b = round((i % values) / 2)

        elif turn == 3:
            if i % values < values / 2:
                r = values
                g = values
                b = round((values / 2 + i % values))
            else:
                r = round((3 / 2 * values - i % values))
                g = round((3 / 2 * values - i % values))
                b = values

        elif turn == 4:
            r = round((values - i % values) / 2)
            g = round((values - i % values) / 2)
            b = values

        elif turn == 5:
            r = round((i % values) / 2)
            g = round((i % values) / 2)
            b = values

        elif turn == 6:
            if i % values < values / 2:
                r = round((values / 2 + i % values))
                g = round((values / 2 + i % values))
                b = values
            else:
                r = values
                g = values
                b = round((3 / 2 * values - i % values))

        r = round(r / values * 255)
        g = round(g / values * 255)
        b = round(b / values * 255)

        lst.append((r, g, b))

    return lst


def rgb_to_hsl(color: tuple):
    r, g, b = color

    r /= 255
    g /= 255
    b /= 255

    cmax = max(r, g, b)
    cmin = min(r, g, b)
    delta = cmax - cmin

    h = 0
    l = (cmax + cmin) / 2

    if delta == 0:
        h = 0
    elif cmax == r:
        h = ((g - b) / delta) % 6
    elif cmax == g:
        h = ((b - r) / delta) + 2
    elif cmax == b:
        h = ((r - g) / delta) + 4

    h *= 60

    if delta == 0:
        s = 0
    else:
        s = delta / (1 - abs(2 * l - 1))

    return h, s, l

def hsl_to_rgb(color: tuple):
    h, s, l = color

    c = (1 - abs(2 * l - 1)) * s
    x = c * (1 - abs((h / 60) % 2 - 1))
    m = l - c / 2

    r = g = b = 0

    if 0 <= h < 60:
        r = c
        g = x
    elif 60 <= h < 120:
        r = x
        g = c
    elif 120 <= h < 180:
        g = c
        b = x
    elif 180 <= h < 240:
        g = x
        b = c
    elif 240 <= h < 300:
        r = x
        b = c
    elif 300 <= h < 360:
        r = c
        b = x

    r = round((r + m) * 255)
    g = round((g + m) * 255)
    b = round((b + m) * 255)

    return r, g, b

On saving the list values I obtained the expected Hues:
enter image description here
enter image description here


Now the main processing includes pixel-by-pixel conversion of color in this order:

  1. Obtaining RGB
  2. RGB –> HSL
  3. Change value of hue to corresponding value in dues_hsl list
  4. New HSL –> RGB
  5. Set new RGB value at same coordinates in another array

This is repeated for every pixel in the image, and took about 58 seconds on a test image of dimensions 481 x 396 pixels

Input and output:

enter image description here enter image description here

Code for the same:

defining.py

from PIL import Image
import numpy as np
from calculating import hues, dues
from calculating import rgb_to_hsl as hsl
from calculating import hsl_to_rgb as rgb


hues = hues()
dues = dues()

# Hues = human hues
# Dues = dog hues


hues_hsl = [hsl(i) for i in hues]
dues_hsl = [hsl(i) for i in dues]

img = np.array(Image.open('dog.png').convert('RGB'))

arr_blank = np.zeros(img.shape[0:3])
print(arr_blank.shape)
print(img.shape[0:3])

total = img.shape[0] * img.shape[1]

for i in range(img.shape[0]):
    for j in range(img.shape[1]):
        hsl_val = hsl(tuple(img[i, j]))
        h = dues_hsl[hues_hsl.index(min(hues_hsl, key=lambda x: abs(x[0] - hsl_val[0])))][0]
        pixel = np.array(rgb((h, hsl_val[1], hsl_val[2])))
        arr_blank[i, j, :] = pixel

        print(f'{i * img.shape[1] + j} / {total}  ---  {(i * img.shape[1] + j)/total*100} %')

print(arr_blank)
data = Image.fromarray(arr_blank.astype('uint8'), 'RGB')
data.save('dog_color.png')

Conclusion:

After this I want to add a gaussian blur filter too, post-conversion in real-time, but this is taking long for just one frame. Is there a way the speed can be improved?

Machine info:

If this info is helpful: i7-10750H @ 2.6Ghz, SSD, 16 gigs ram


Thanks!

Asked By: Ishan Jindal

||

Answers:

I had forgotten Pillow also does HSV just as well, so no need for OpenCV.

This executes in about 0.45 seconds on my machine.

from PIL import Image
import numpy as np

values = 2 ** 8 - 1
color_count = values * 6


def dog_hues():
    # ... from original post, removed for brevity...
    return lst

# Convert the dog_hues() list into an image of size 256x1
hue_map_img = Image.new("RGB", (color_count, 1))
hue_map_img.putdata(dog_hues())
hue_map_img = hue_map_img.resize((256, 1), Image.LANCZOS)
# Get the hues out of it
hsv_array = np.array(hue_map_img.convert("HSV"))
hue_map = hsv_array[:, :, 0].flatten()
# Read in the dog, convert it to HSV
img = np.array(Image.open("dog.jpg").convert("HSV"))
# Remap hue
img[:, :, 0] = hue_map[img[:, :, 0]]
# Convert back to RGB and save
img = Image.fromarray(img, "HSV").convert("RGB")
img.save("dog_hsv.jpg")
Answered By: AKX

1st remark: you can’t really change colorspace like this. Because when you see a color, interpreted by human eye (and therefore by human rgb image formats) as yellow, like (255,255,0), you can’t know whether that is made of a yellow frequency (570 nm for example) that excite both our red and green cones, but not the blue ones. Of if it is made of a mixture of red frequencies (690 nm for example) and green frequencies (530 nm) or any other spectrum that lead to the same red and green cones saturated (255, 255) and blue one not touched (0).

And you need that information to deduce how the two dog cones are impacted.

In other words there isn’t any mapping between human color and dog color. In math words, there is a projection between real color space (∞ dimension, a spectrum) and human color space (3D, to simplify: r, g, and b). There is another projection between real color space and dog colorspace (2D, also to simplify). But those projection axes are not included one in the other. So, there isn’t any projection between the 3d human color space and the 2d dog colorspace. There is no way to know how dog sees a color with only the knowledge of how human sees it; you need to know the real color. You could do this with hyperspectral cameras (and do both projections to compute both human rgb image, and dog yb image). And that is assuming the quite naive (but correct in first approximation) idea that those color follows elementary college-level linear algebra, which, in reality, it doesn’t exactly.

That being said, PIL or OpenCV based solutions are a solution. But more generally speaking, if you don’t trust PIL or OpenCV, or any existing library color model and really want to invent your wheel (I respect that; there is no better way to understand things that to reinvent the wheel), then one rule you must abide with is never ever iterate over pixels. If you do that, you have lost the performance match. Python is very very slow. The only reason why it is still a popular language, and why there are still fast programs made with python, is because python coder do whatever it takes so that the computation heavy loops (in image processing, those are the loops over the pixels) are not really made in python.

So you must rely on numpy to perform your operation on all pixels, and not write the for loops yourself.

For example, here a rewrite of your rgb_to_hsl making batch computation with numpy. That is, rgb_to_hsl is not made to be called with a single color, but with a whole array (a 2d array) of colors, that is an image

def rgb_to_hsl(image):
    # rgb is the r,g,b channels between 0 and 1 (as you did for individual
    # r,g,b variables, but it is easier (see below) to keep them as a single 
    # array. Rgb is not just a triplet (unlike your r,g,b) but a 2d-array of
    # triplets (so a 3d-array)
    rgb = image/255

    # Likewise, cmax, cmin, delta are not scalar as in your code, but 
    # 2d array of such scalar
    cmax = rgb.max(axis=2) # axis=2 means that axis 0 and 1 are kept, and max 
                           # is computed along axis 2, that is along the 3 
                           # values of each triplets. So rgb is a HxWx3 
                           # 3d-array (axis 0 = y, axis 1=x, axis 2=color 
                           # channel). cmax is a HxW 2d-array
    cmin = rgb.min(axis=2) # likewise
    delta = cmax - cmin    # same code. But this is done on all HxW cmax and cmin

    h = zeros_like(delta) # 2d-array of 0 
    l = (cmax + cmin) / 2 # 2d array of (cmax+cmin)/2

    # Here come a trickier part. We need to separate cases, and do computation
    # in each subsets concerning those cases
    case1= delta==0
    h[case1] = 0 # In reality, we could skip those, since h is already 0 everywhere

    case2 = cmax==r
    h[case2] = (rgb[case2,1]-rgb[case2,2])/delta[case2] % 6

    case3 = cmax==g
    h[case3] = (rgb[case3,2]-rgb[case3,0])/delta[case3] + 2

    case4 = cmax==b
    h[case4] = (rgb[case4,0]-rgb[case4,1])/delta[case4] + 4

    h *= 60 # Same code, applies on all HxW values of h

    s=np.zeros_like(h)
    s[case1] = 0  # same remark. I just mimick your code as much as possible
                  # but that is already the default value
    s[~case1] = delta[~case1] / (1-abs(2*l[~case1]-1))
    # ~case1 is the opposite of case1. So, equivalent of the else in your code

    # returns 3 2d HxW arrays for h, s and l
    return h, s, l
Answered By: chrslg