1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
|
import numpy as np, scipy.optimize as optimize
from colour import *
from colour.difference import *
# The same wavelength grid is used throughout
wvl = np.arange(360, 830, 1)
wvlp = (wvl - 360) / (830 - 360)
# This is the model of spectral reflectivity described in the article.
def model(wvlp, ccp):
yy = ccp[0] * wvlp ** 2 + ccp[1] * wvlp + ccp[2]
return 1 / 2 + yy / (2 * np.sqrt(1 + yy ** 2))
# Create a SpectralDistribution using given coefficients
def model_sd(ccp):
# FIXME: don't hardcode the wavelength grid; there should be a way
# of creating a SpectralDistribution from the function alone
return SpectralDistribution(model(wvlp, ccp), wvl, name="Model")
# The goal is to minimize the color difference between a given distrbution
# and the one computed from the model above.
def error_function(ccp, target, ill_sd, ill_xy):
# One thing they didn't mention in the text is that their polynomial
# is nondimensionalized (mapped from 360--800 nm to 0--1).
sd = model_sd(ccp)
Lab = XYZ_to_Lab(sd_to_XYZ(sd, illuminant=ill_sd) / 100, ill_xy)
return delta_E_CIE1976(target, Lab)
# Map primed (dimensionless) coefficients to normal
# FIXME: is this even useful for anything?
def cc_from_primed(ccp):
return np.array([
ccp[0] / 220900,
ccp[1] / 470 - 36/11045 * ccp[0],
ccp[2] - 36/47 * ccp[1] + 1296/2209 * ccp[0]
])
# This callback to scipy.optimize.basinhopping tells the solver to stop once
# the error is small enough. The threshold was chosen arbitrarily, as a small
# fraction of the JND (about 2.3 with this metric).
def cb_basinhopping(x, f, accept):
return f < 0.1
# Finds the parameters for Jakob and Hanika's model
def jakob_hanika(target, ill_sd, ill_xy, ccp0=(0, 0, 0), verbose=True):
# First, a conventional solver is called. For 'yellow green' this
# actually fails: gets stuck at a local minimum that's far away
# from the global one.
# FIXME: better parameters, a better x0, a better method?
# FIXME: stop iterating as soon as delta E is negligible
opt = optimize.minimize(
error_function, ccp0, (target, ill_sd, ill_xy),
bounds=[[-300, 300]] * 3,
options={"disp": verbose}
)
if verbose:
print(opt)
error = error_function(opt.x, target, ill_sd, ill_xy)
if verbose:
print("Delta E is %g" % error)
# Basin hopping is far more likely to find the actual minimum we're
# looking for, but it's extremely slow in comparison.
if error > 0.1:
if verbose:
print("Error too large, trying global optimization")
opt = optimize.basinhopping(
lambda cc: error_function(cc, target, ill_sd, ill_xy),
x0, disp=verbose, callback=cb_basinhopping
)
if verbose:
print(opt)
error = error_function(opt.x, target, ill_sd, ill_xy)
if verbose:
print("Global delta E is %g" % error)
return opt.x, error
|