summaryrefslogtreecommitdiff
path: root/jakob_hanika.py
diff options
context:
space:
mode:
authorPaweł Redman <pawel.redman@gmail.com>2020-06-12 10:13:05 +0200
committerPaweł Redman <pawel.redman@gmail.com>2020-06-12 10:13:05 +0200
commit08b0124c835c9705cccf266fa933dea942ba02f3 (patch)
tree23367b8c7f01e04be21ca9155d13f579c1ef85a0 /jakob_hanika.py
parent552877042007a18128db4313e35221a8faf1b5c0 (diff)
Make sure the solver works for all inputs
This is achieved by using feedback. In case of a failure, a color with known coefficients is used to solve for a point somewhere between the target and the color. The process is repeated, getting closer and closer to the target until it finally converges.
Diffstat (limited to 'jakob_hanika.py')
-rw-r--r--jakob_hanika.py103
1 files changed, 56 insertions, 47 deletions
diff --git a/jakob_hanika.py b/jakob_hanika.py
index f9629d4..af55c94 100644
--- a/jakob_hanika.py
+++ b/jakob_hanika.py
@@ -1,16 +1,17 @@
import numpy as np, scipy.optimize as optimize
from colour import *
-from colour.difference import *
+from colour.difference import delta_E_CIE1976
+from colour.colorimetry import *
# The same wavelength grid is used throughout
-wvl = np.arange(360, 830, 1)
+wvl = np.arange(360, 830, 5)
wvlp = (wvl - 360) / (830 - 360)
# This is the model of spectral reflectivity described in the article.
def model(wvlp, ccp):
- yy = ccp[0] * wvlp ** 2 + ccp[1] * wvlp + ccp[2]
+ yy = ccp[0] * wvlp**2 + ccp[1] * wvlp + ccp[2]
return 1 / 2 + yy / (2 * np.sqrt(1 + yy ** 2))
# Create a SpectralDistribution using given coefficients
@@ -30,54 +31,62 @@ def error_function(ccp, target, ill_sd, ill_xy):
-# Map primed (dimensionless) coefficients to normal
-# FIXME: is this even useful for anything?
-def cc_from_primed(ccp):
- return np.array([
- ccp[0] / 220900,
- ccp[1] / 470 - 36/11045 * ccp[0],
- ccp[2] - 36/47 * ccp[1] + 1296/2209 * ccp[0]
- ])
-
+# Finds the parameters for Jakob and Hanika's model
+def jakob_hanika(target_XYZ, ill_sd, ill_xy, ccp0=(0, 0, 0), verbose=True, try_hard=True):
+ def do_optimize(XYZ, ccp0):
+ Lab = XYZ_to_Lab(XYZ, ill_xy)
+ opt = optimize.minimize(
+ error_function, ccp0, (Lab, ill_sd, ill_xy),
+ method="Nelder-Mead", options={"disp": verbose}
+ )
+ if verbose:
+ print(opt)
+ return opt
-# This callback to scipy.optimize.basinhopping tells the solver to stop once
-# the error is small enough. The threshold was chosen arbitrarily, as a small
-# fraction of the JND (about 2.3 with this metric).
-def cb_basinhopping(x, f, accept):
- return f < 0.1
+ # A special case that's hard to solve numerically
+ if np.allclose(target_XYZ, [0, 0, 0]):
+ return np.array([0, 0, -1e+9]), 0 # FIXME: dtype?
-# Finds the parameters for Jakob and Hanika's model
-def jakob_hanika(target, ill_sd, ill_xy, ccp0=(0, 0, 0), verbose=True):
- # First, a conventional solver is called. For 'yellow green' this
- # actually fails: gets stuck at a local minimum that's far away
- # from the global one.
- # FIXME: better parameters, a better x0, a better method?
- # FIXME: stop iterating as soon as delta E is negligible
- opt = optimize.minimize(
- error_function, ccp0, (target, ill_sd, ill_xy),
- bounds=[[-300, 300]] * 3,
- options={"disp": verbose}
- )
if verbose:
- print(opt)
+ print("Trying the target directly, XYZ=%s" % target_XYZ)
+ opt = do_optimize(target_XYZ, ccp0)
+ if opt.fun < 0.1 or not try_hard:
+ return opt.x, opt.fun
- error = error_function(opt.x, target, ill_sd, ill_xy)
- if verbose:
- print("Delta E is %g" % error)
+ good_XYZ = (1/3, 1/3, 1/3)
+ good_ccp = (2.1276356, -1.07293026, -0.29583292) # FIXME: valid only for D65
- # Basin hopping is far more likely to find the actual minimum we're
- # looking for, but it's extremely slow in comparison.
- if error > 0.1:
+ divisions = 3
+ while divisions < 10:
if verbose:
- print("Error too large, trying global optimization")
- opt = optimize.basinhopping(
- lambda cc: error_function(cc, target, ill_sd, ill_xy),
- x0, disp=verbose, callback=cb_basinhopping
- )
- if verbose:
- print(opt)
- error = error_function(opt.x, target, ill_sd, ill_xy)
- if verbose:
- print("Global delta E is %g" % error)
+ print("Trying with %d divisions" % divisions)
+
+ keep_divisions = False
+ ref_XYZ = good_XYZ
+ ref_ccp = good_ccp
+
+ ccp0 = ref_ccp
+ for i in range(1, divisions):
+ intermediate_XYZ = ref_XYZ + (target_XYZ - ref_XYZ) * i / (divisions - 1)
+ if verbose:
+ print("Intermediate step %d/%d, XYZ=%s with ccp0=%s" %
+ (i + 1, divisions, intermediate_XYZ, ccp0))
+ opt = do_optimize(intermediate_XYZ, ccp0)
+ if opt.fun > 0.1:
+ if verbose:
+ print("WARNING: intermediate optimization failed")
+ break
+ else:
+ good_XYZ = intermediate_XYZ
+ good_ccp = opt.x
+ keep_divisions = True
+
+ ccp0 = opt.x
+ else:
+ return opt.x, opt.fun
+
+ if not keep_divisions:
+ divisions += 2
- return opt.x, error
+ raise Exception("optimization failed for target_XYZ=%s, ccp0=%s" \
+ % (target_XYZ, ccp0))