Programming help: How to avoid colour banding in polynomial interpolation background extraction

Hello Everyone,
I have a code that takes some “nodes”, or “samples” in an image. With these nodes I build a synthetic background image using polynomial interpolation.
My problem is that for some images, with small dynamic, I have a colour banding in my result. However, my source does not have any colour banding.

The image source is a 16-bit unsigned short that I convert into 64-bit (double) before the main computation. If I was staying in 16bit I would understand the issue, but here I don’t understand.

Is there a way to avoid this colour banding?

Cheers,

Here an example of this banding on a monochrome image: https://i.stack.imgur.com/aKlM3.png

//C contains background function
#define C(i) (gsl_vector_get(c,(i)))

static double poly_4(gsl_vector *c, double x, double y) {
	  double value = C(0) * 1.0 + C(1) * x + C(2) * y + C(3) * x * x + C(4) * y * x
			+ C(5) * y * y + C(6) * x * x * x + C(7) * x * x * y
			+ C(8) * x * y * y + C(9) * y * y * y + C(10) * x * x * x * x
			+ C(11) * x * x * x * y + C(12) * x * x * y * y
			+ C(13) * x * y * y * y + C(14) * y * y * y * y;

	  return (value);
}

static double poly_3(gsl_vector *c, double x, double y) {
	  double value = C(0) * 1.0 + C(1) * x + C(2) * y + C(3) * x * x + C(4) * y * x
			+ C(5) * y * y + C(6) * x * x * x + C(7) * x * x * y + C(8) * x * y * y
			+ C(9) * y * y * y;

	  return (value);
}

static double poly_2(gsl_vector *c, double x, double y) {
	  double value = C(0) * 1.0 + C(1) * x + C(2) * y + C(3) * x * x + C(4) * y * x
			+ C(5) * y * y;

	  return (value);
}

static double poly_1(gsl_vector *c, double x, double y) {
	  double value = C(0) * 1.0 + C(1) * x + C(2) * y;

	  return (value);
}

static double *computeBackground(GSList *list, size_t width, size_t height, poly_order order) {
	  size_t n, i, j;
	  size_t k = 0;
	  double chisq, pixel;
	  double row, col;
	  gsl_matrix *J, *cov;
	  gsl_vector *y, *w, *c;

	  n = g_slist_length(list);

	  int nbParam;
	  switch (order) {
	  case POLY_1:
		  nbParam = NPARAM_POLY1;
		  break;
	  case POLY_2:
		  nbParam = NPARAM_POLY2;
		  break;
	  case POLY_3:
		  nbParam = NPARAM_POLY3;
		  break;
	  case POLY_4:
	  default:
		  nbParam = NPARAM_POLY4;
	  }

	  // J is the Jacobian
	  // y contains data (pixel intensity)
	  J = gsl_matrix_calloc(n, nbParam);
	  y = gsl_vector_calloc(n);
	  w = gsl_vector_calloc(n);
	  c = gsl_vector_calloc(nbParam);
	  cov = gsl_matrix_calloc(nbParam, nbParam);

	  while (list) {
		  background_sample *sample = (background_sample *) list->data;

		  col = sample->position.x;
		  row = sample->position.y;
		  pixel = sample->median;
		  // here, it is a bit sketchy in the sense that if there is not value to report in a box (because the threshold is too
		  // low for example, then I just skip the initialization of J and y. gsl automatically discard the non assigned values
		  // during the minimization. I tested it with Matlab and it works fine. The results agree.
		  if (pixel < 0)
			  continue;

		  gsl_matrix_set(J, k, 0, 1.0);
		  gsl_matrix_set(J, k, 1, col);
		  gsl_matrix_set(J, k, 2, row);

		  if (order != POLY_1) {
			  gsl_matrix_set(J, k, 3, col * col);
			  gsl_matrix_set(J, k, 4, col * row);
			  gsl_matrix_set(J, k, 5, row * row);
		  }

		  if (order == POLY_3 || order == POLY_4) {
			  gsl_matrix_set(J, k, 6, col * col * col);
			  gsl_matrix_set(J, k, 7, col * col * row);
			  gsl_matrix_set(J, k, 8, col * row * row);
			  gsl_matrix_set(J, k, 9, row * row * row);
		  }

		  if (order == POLY_4) {
			  gsl_matrix_set(J, k, 10, col * col * col * col);
			  gsl_matrix_set(J, k, 11, col * col * col * row);
			  gsl_matrix_set(J, k, 12, col * col * row * row);
			  gsl_matrix_set(J, k, 13, col * row * row * row);
			  gsl_matrix_set(J, k, 14, row * row * row * row);
		  }

		  gsl_vector_set(y, k, pixel);
		  gsl_vector_set(w, k, 1.0);

		  /* go to the next item */
		  list = list->next;
		  k++;
	  }

  // Must turn off error handler or it aborts on error
  gsl_set_error_handler_off();

	  gsl_multifit_linear_workspace *work = gsl_multifit_linear_alloc(n, nbParam);
	  int status = gsl_multifit_wlinear(J, w, y, c, cov, &chisq, work);
	  if (status != GSL_SUCCESS) {
		  printf("GSL multifit error: %s\n", gsl_strerror(status));
		  return NULL;
	  }
	  gsl_multifit_linear_free(work);
	  gsl_matrix_free(J);
	  gsl_vector_free(y);
	  gsl_vector_free(w);

        // Calculation of the background with the same dimension that the input matrix.
	  double *background = malloc(height * width * sizeof(double));

	  for (i = 0; i < height; i++) {
		  for (j = 0; j < width; j++) {
			  switch (order) {
			  case POLY_1:
				  pixel = poly_1(c, (double) j, (double) i);
				  break;
			  case POLY_2:
				  pixel = poly_2(c, (double) j, (double) i);
				  break;
			  case POLY_3:
				  pixel = poly_3(c, (double) j, (double) i);
				  break;
			  default:
			  case POLY_4:
				  pixel = poly_4(c, (double) j, (double) i);
			  }
			  background[j + i * width] = pixel;
		  }
	  }
	  return background;
}

No time to go deep into the math but if you have a narrow set if inputs you get a narrow set of outputs, and in images this often leads to banding. A common fix is to add some random noise to the input (aka “dithering”).

mmmh I know dithering but the thing is I will have a more noisy image, and I don’t want it :confused:

With the right amount you won’t get much noise and remove much banding.

@Ofnuts: any idea on how implement it with the right amount ?

Without experimenting, no, but I would start with max noise values about half the difference between consecutive input values.

I’ve started to implement a dither algorithm. Indeed, results are … surprising !!! Thanks a lot.

1 Like