summaryrefslogtreecommitdiff
path: root/jakob_hanika.py
diff options
context:
space:
mode:
Diffstat (limited to 'jakob_hanika.py')
-rw-r--r--jakob_hanika.py66
1 files changed, 38 insertions, 28 deletions
diff --git a/jakob_hanika.py b/jakob_hanika.py
index f881afd..f9629d4 100644
--- a/jakob_hanika.py
+++ b/jakob_hanika.py
@@ -6,28 +6,40 @@ from colour.difference import *
# The same wavelength grid is used throughout
wvl = np.arange(360, 830, 1)
-
-# Map wavelenghts from 360--830 nm to 0-1
-def remap(wvl):
- return (wvl - 360) / (830 - 360)
+wvlp = (wvl - 360) / (830 - 360)
# This is the model of spectral reflectivity described in the article.
-def model(wvl, cc):
- x = cc[0] * wvl ** 2 + cc[1] * wvl + cc[2]
- return 1 / 2 + x / (2 * np.sqrt(1 + x ** 2))
+def model(wvlp, ccp):
+ yy = ccp[0] * wvlp ** 2 + ccp[1] * wvlp + ccp[2]
+ return 1 / 2 + yy / (2 * np.sqrt(1 + yy ** 2))
+
+# Create a SpectralDistribution using given coefficients
+def model_sd(ccp):
+ # FIXME: don't hardcode the wavelength grid; there should be a way
+ # of creating a SpectralDistribution from the function alone
+ return SpectralDistribution(model(wvlp, ccp), wvl, name="Model")
# The goal is to minimize the color difference between a given distrbution
# and the one computed from the model above.
-def error_function(cc, target, ill_sd, ill_xy):
+def error_function(ccp, target, ill_sd, ill_xy):
# One thing they didn't mention in the text is that their polynomial
# is nondimensionalized (mapped from 360--800 nm to 0--1).
- ev = model((wvl - 360) / (830 - 360), cc)
- sd = SpectralDistribution(ev, wvl)
+ sd = model_sd(ccp)
Lab = XYZ_to_Lab(sd_to_XYZ(sd, illuminant=ill_sd) / 100, ill_xy)
return delta_E_CIE1976(target, Lab)
+# Map primed (dimensionless) coefficients to normal
+# FIXME: is this even useful for anything?
+def cc_from_primed(ccp):
+ return np.array([
+ ccp[0] / 220900,
+ ccp[1] / 470 - 36/11045 * ccp[0],
+ ccp[2] - 36/47 * ccp[1] + 1296/2209 * ccp[0]
+ ])
+
+
# This callback to scipy.optimize.basinhopping tells the solver to stop once
# the error is small enough. The threshold was chosen arbitrarily, as a small
# fraction of the JND (about 2.3 with this metric).
@@ -35,39 +47,37 @@ def cb_basinhopping(x, f, accept):
return f < 0.1
# Finds the parameters for Jakob and Hanika's model
-def jakob_hanika(target, ill_sd, ill_xy, x0=(0, 0, 0)):
+def jakob_hanika(target, ill_sd, ill_xy, ccp0=(0, 0, 0), verbose=True):
# First, a conventional solver is called. For 'yellow green' this
# actually fails: gets stuck at a local minimum that's far away
# from the global one.
# FIXME: better parameters, a better x0, a better method?
# FIXME: stop iterating as soon as delta E is negligible
opt = optimize.minimize(
- error_function, x0, (target, ill_sd, ill_xy),
- method="L-BFGS-B", options={"disp": True, "ftol": 1e-5}
+ error_function, ccp0, (target, ill_sd, ill_xy),
+ bounds=[[-300, 300]] * 3,
+ options={"disp": verbose}
)
- print(opt)
+ if verbose:
+ print(opt)
error = error_function(opt.x, target, ill_sd, ill_xy)
- print("Delta E is %g" % error)
+ if verbose:
+ print("Delta E is %g" % error)
# Basin hopping is far more likely to find the actual minimum we're
# looking for, but it's extremely slow in comparison.
if error > 0.1:
- print("Error too large, trying global optimization")
+ if verbose:
+ print("Error too large, trying global optimization")
opt = optimize.basinhopping(
lambda cc: error_function(cc, target, ill_sd, ill_xy),
- x0, disp=True, callback=cb_basinhopping
+ x0, disp=verbose, callback=cb_basinhopping
)
- print(opt)
+ if verbose:
+ print(opt)
error = error_function(opt.x, target, ill_sd, ill_xy)
- print("Global delta E is %g" % error)
-
- # Map back to dimensional coefficients
- cc = np.array([
- opt.x[0] / 220900,
- opt.x[1] / 470 - 36/11045 * opt.x[0],
- opt.x[2] - 36/47 * opt.x[1] + 1296/2209 * opt.x[0]
- ])
- matched_sd = SpectralDistribution(model(wvl, cc), wvl, name="Model")
+ if verbose:
+ print("Global delta E is %g" % error)
- return cc, matched_sd, error
+ return opt.x, error