Logo Search packages:      
Sourcecode: albumshaper version File versions  Download package


//  copyright            : (C) 2003-2005 by Will Stokes
//  This program is free software; you can redistribute it
//  and/or modify it under the terms of the GNU General
//  Public License as published by the Free Software
//  Foundation; either version 2 of the License, or
//  (at your option) any later version.

//Systemwide includes
#include <qimage.h>
#include <qstring.h>

//Projectwide includes
#include "sharpen.h"
#include "blur.h"
#include "../tools/imageTools.h"

// Inputs:
// -------
// QImage& image - image to blur
// float sigma - how much to blur it
// QPoint offset - offset within edge image we're working on
// QSize fullImageRes - resolution of the full size image
// QImage* edgeImage - an edge image constructing using the full size image
// bool blurEdges    - are we sharpening edges or regions
// Outputs:
// --------
// Nothing returned, we'll modify the image passed by refference in place
// Description:
// ------------
// The common approach to sharpening images is subtract a 
// blurred version of an image using the following equation:
// v' = 2*v - vBlur
// ...where v is the original value (luminance) for a given pixel, 
// vBlur is the blurred value, and v' is the end result.
// While one could apply this blur-subtraction in the individual color channels
// you will likely encounter strange artifacts at color channel boundaries where new
// colors are introducted. Sharpening in the value/luminance domain helps bright out
// image contrast without introducing color artifacts.
// Unfortunately, sharpening using this approach will magnify all image contrast, both
// somewhat strong edges and low level noise. We'd like to be able to aggressively sharpen
// images without magnifying CCD/film grain noise, but how?
// A somewhat popular solution to this problem is to use an edge image. Constructing edge images
// can be difficult, but when provided such information can tell us when to to sharpen and when not to, or
// used more wisely, how to blend the sharpend data with the unsharpened original image data seemlessly.
// Grayscale edge images can be used in this way by first blurring slightly, then dividing the 
// value component of a pixel by 255 to get an alpha value. Near edges the value will be closer to 
// 255 and the resulting alpha will be closer to 1. In between regions where we don't want to 
// enhance noise by sharpening alpha values will be close to 0, preventing aggressively 
// sharpened vaues from being used.
// Algorithm:
// ----------
// The algorithm works as follows:
// 1.) The input image is blurred using the sigma value. The large the sigma value 
//     the more the input image is blurred and the more pronounced edges will become.
// 2.) We iterate over each image pixel, fetching the color values of the original and blurred forms
//     of the image, as well as the color of the give pixel within the edge image.
//     An alpha value is computed using the edge image pixel color, which in turn is used
//     to compute the blended pixel value after sharpening:
//     alpha = edgeColor / 255
//     v' = alpha* min( max( 2*v - vBlur, 0 ), 1) + (1-alpha)*v;
//     Finally, we convert the pixel color back to RGB space and write back
//     to the resulting sharpened image.
// This algorithm works and was initially tested without the use of an edge image. When
// no edge image is provided alpha is simply set to 1 and full sharpening is applied
// to every image pixel.
// Future work:
// ------------
// Further work needs to be done regarding bluring/sharpening edges and region independently.
// The "blurEdges" param allows the algorithm to concentrate sharpening on regions instead of
// object boundaries when an edge image is provided; however, such usage is not well understood
// or used at this time.

void sharpenImage( QImage &image, float sigma,
                   QPoint offset, QSize fullImageRes,
                   QImage* edgeImage, bool blurEdges)
  //construct blur copy
  QImage blurredImage = image.copy();
  blurImage( blurredImage, sigma );
  //iterate over each pixel and adjust luminance value
  int x, y;
  QRgb *origRgb, *blurredRgb, *edgeRgb;
  uchar *origScanline;
  uchar *blurredScanline;
  uchar *edgesScanline = NULL;
  for(y=0; y<image.height(); y++)
    origScanline = image.scanLine(y);
    blurredScanline = blurredImage.scanLine(y);
    if( edgeImage != NULL )
      int edgeY = ((edgeImage->height()-1) * (y+offset.y())) / (fullImageRes.height()-1);
      edgesScanline = edgeImage->scanLine(edgeY);
    for(x=0; x<image.width(); x++)
      //get rgb triplets
      origRgb = ((QRgb*)origScanline+x);
      double r1 = ((double)qRed(*origRgb)   )/255.0;
      double g1 = ((double)qGreen(*origRgb) )/255.0;
      double b1 = ((double)qBlue(*origRgb)  )/255.0;
      blurredRgb = ((QRgb*)blurredScanline+x);
      double r2 = ((double)qRed(*blurredRgb)   )/255.0;
      double g2 = ((double)qGreen(*blurredRgb) )/255.0;
      double b2 = ((double)qBlue(*blurredRgb)  )/255.0;

      //sharpen the entire thing!
      float alpha;
      if( edgeImage == NULL)
        alpha = 1.0f;
        int edgeX = ((edgeImage->width()-1) * (x+offset.x())) / (fullImageRes.width()-1);
        edgeRgb = ((QRgb*)edgesScanline+edgeX);
        alpha = ((float) qRed( *edgeRgb )) / 255.0f;
        //blur regions, not edges
          alpha = 1.0f - alpha;
      //convert to hsv
      double h1,s1,v1;

      double h2,s2,v2;
      //reset v
      v1  = (alpha * QMIN( QMAX(2*v1 - v2, 0), 1.0 )) + (1-alpha)*v1;
      //convert adjusted color back to rgb colorspace and clamp
      HSVtoRGB( &r1,&g1,&b1, h1,s1,v1);         
      int rp = (int) QMIN( QMAX((r1*255), 0), 255 );
      int gp = (int) QMIN( QMAX((g1*255), 0), 255 );
      int bp = (int) QMIN( QMAX((b1*255), 0), 255 );
      //set adjusted color value
      *origRgb = qRgb(rp,gp,bp);
    } //x
  } //y


Generated by  Doxygen 1.6.0   Back to index