Change splitting/merging channels

main
Haohua-Lyu 3 years ago
parent 8300993446
commit 26cfbb1fa6

@ -107,8 +107,6 @@ class merge
float lambda_shot, \
float lambda_read);
std::vector<ushort> getChannels(cv::Mat input_image); //helper function
//temporal denoise
std::vector<cv::Mat> temporal_denoise(std::vector<cv::Mat> reference_tiles, std::vector<cv::Mat> reference_tiles_DFT, std::vector<float> noise_varaince);
std::vector<cv::Mat> spatial_denoise(std::vector<cv::Mat> reference_tiles, std::vector<cv::Mat> reference_tiles_DFT, std::vector<float> noise_varaince);

@ -232,5 +232,59 @@ void print_img( const cv::Mat& img, int img_height = -1, int img_width = -1 )
printf("\n");
}
/**
* @brief Extract RGB channel seprately from bayer image
*
* @tparam T data tyoe of bayer image.
* @return vector of RGB image. OpenCV internally maintain reference count.
* Thus this step won't create deep copy overhead.
*
* @example extract_rgb_fmom_bayer<uint16_t>( bayer_img, rgb_vector_container );
*/
template <typename T>
void extract_rgb_fmom_bayer( const cv::Mat& bayer_img, \
cv::Mat& red_img, cv::Mat& green_img1, cv::Mat& green_img2, cv::Mat& blue_img )
{
const T* bayer_img_ptr = (const T*)bayer_img.data;
int bayer_width = bayer_img.size().width;
int bayer_height = bayer_img.size().height;
int bayer_step = bayer_img.step1();
if ( bayer_width % 2 != 0 || bayer_height % 2 != 0 )
{
throw std::runtime_error("Bayer image data size incorrect, must be multiplier of 2\n");
}
// RGB image is half the size of bayer image
int rgb_width = bayer_width / 2;
int rgb_height = bayer_height / 2;
red_img.create( rgb_height, rgb_width, bayer_img.type() );
green_img1.create( rgb_height, rgb_width, bayer_img.type() );
green_img2.create( rgb_height, rgb_width, bayer_img.type() );
blue_img.create( rgb_height, rgb_width, bayer_img.type() );
int rgb_step = red_img.step1();
T* r_img_ptr = (T*)red_img.data;
T* g1_img_ptr = (T*)green_img1.data;
T* g2_img_ptr = (T*)green_img2.data;
T* b_img_ptr = (T*)blue_img.data;
for ( int rgb_row_i = 0; rgb_row_i < rgb_height; rgb_row_i++ )
{
int rgb_row_i_offset = rgb_row_i * rgb_step;
// Every RGB row corresbonding to two Bayer image row
int bayer_row_i_offset1 = ( rgb_row_i * 2 + 0 ) * bayer_step; // For RG
int bayer_row_i_offset2 = ( rgb_row_i * 2 + 1 ) * bayer_step; // For GB
for ( int rgb_col_j = 0; rgb_col_j < rgb_width; rgb_col_j++ )
{
r_img_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + ( rgb_col_j * 2 + 0 ) ];
g1_img_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + ( rgb_col_j * 2 + 1 ) ];
g2_img_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset2 + ( rgb_col_j * 2 + 0 ) ];
b_img_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset2 + ( rgb_col_j * 2 + 1 ) ];
}
}
}
} // namespace hdrplus

@ -3,6 +3,7 @@
#include <utility>
#include "hdrplus/merge.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
@ -19,41 +20,18 @@ namespace hdrplus
// Get padded bayer image
cv::Mat reference_image = burst_images.bayer_images_pad[burst_images.reference_image_idx];
// cv::imwrite("ref.jpg", reference_image);
cv::imwrite("ref.jpg", reference_image);
// Get raw channels
std::vector<ushort> channels[4];
std::vector<cv::Mat> channels(4, cv::Mat::zeros(reference_image.rows / 2, reference_image.cols / 2, CV_16U));
hdrplus::extract_rgb_fmom_bayer<uint16_t>(reference_image, channels[0], channels[1], channels[2], channels[3]);
for (int y = 0; y < reference_image.rows; ++y) {
for (int x = 0; x < reference_image.cols; ++x) {
if (y % 2 == 0) {
if (x % 2 == 0) {
channels[0].push_back(reference_image.at<ushort>(y, x));
}
else {
channels[1].push_back(reference_image.at<ushort>(y, x));
}
}
else {
if (x % 2 == 0) {
channels[2].push_back(reference_image.at<ushort>(y, x));
}
else {
channels[3].push_back(reference_image.at<ushort>(y, x));
}
}
}
}
/////
// For each channel, perform denoising and merge
for (int i = 0; i < 4; ++i) {
// Get channel mat
cv::Mat channel_i(reference_image.rows / 2, reference_image.cols / 2, CV_16U, channels[i].data());
cv::Mat channel_i = channels[i];
// cv::imwrite("ref" + std::to_string(i) + ".jpg", channel_i);
// Apply merging on the channel
//we should be getting the individual channel in the same place where we call the processChannel function with the reference channel in its arguments
//possibly we could add another argument in the processChannel function which is the channel_i for the alternate image. maybe using a loop to cover all the other images
@ -64,52 +42,56 @@ namespace hdrplus
//get alternate image
cv::Mat alt_image = burst_images.bayer_images_pad[j];
std::vector<ushort> alt_img_channel = getChannels(alt_image); //get channel array from alternate image
cv::Mat alt_channel_i(alt_image.rows / 2, alt_image.cols / 2, CV_16U, alt_img_channel[i].data());
std::vector<cv::Mat> alt_channels(4, cv::Mat::zeros(reference_image.rows / 2, reference_image.cols / 2, CV_16U));
hdrplus::extract_rgb_fmom_bayer<uint16_t>(alt_image, alt_channels[0], alt_channels[1], alt_channels[2], alt_channels[3]);
alternate_channel_i_list.push_back(alt_channel_i)
alternate_channel_i_list.push_back(alt_channels[i]);
}
}
/////
//cv::Mat merged_channel = processChannel(burst_images, alignments, channel_i, lambda_shot, lambda_read);
// Apply merging on the channel
cv::Mat merged_channel = processChannel(burst_images, alignments, channel_i, alternate_channel_i_list, lambda_shot, lambda_read);
// cv::imwrite("merged" + std::to_string(i) + ".jpg", merged_channel);
// Put channel raw data back to channels
channels[i] = merged_channel.reshape(1, merged_channel.total());
merged_channel.convertTo(channels[i], CV_16U);
}
// Write all channels back to a bayer mat
std::vector<ushort> merged_raw;
for (int y = 0; y < reference_image.rows; ++y) {
for (int x = 0; x < reference_image.cols; ++x) {
if (y % 2 == 0) {
if (x % 2 == 0) {
merged_raw.push_back(channels[0][(y / 2) * (reference_image.cols / 2) + (x / 2)]);
}
else {
merged_raw.push_back(channels[1][(y / 2) * (reference_image.cols / 2) + (x / 2)]);
}
cv::Mat merged(reference_image.rows, reference_image.cols, CV_16U);
int x, y;
for (y = 0; y < reference_image.rows; ++y){
uint16_t* row = merged.ptr<uint16_t>(y);
if (y % 2 == 0){
uint16_t* i0 = channels[0].ptr<uint16_t>(y / 2);
uint16_t* i1 = channels[1].ptr<uint16_t>(y / 2);
for (x = 0; x < reference_image.cols;){
//R
row[x] = i0[x / 2];
x++;
//G1
row[x] = i1[x / 2];
x++;
}
else {
if (x % 2 == 0) {
merged_raw.push_back(channels[2][(y / 2) * (reference_image.cols / 2) + (x / 2)]);
}
else {
merged_raw.push_back(channels[3][(y / 2) * (reference_image.cols / 2) + (x / 2)]);
}
}
else {
uint16_t* i2 = channels[2].ptr<uint16_t>(y / 2);
uint16_t* i3 = channels[3].ptr<uint16_t>(y / 2);
for(x = 0; x < reference_image.cols;){
//G2
row[x] = i2[x / 2];
x++;
//B
row[x] = i3[x / 2];
x++;
}
}
}
// Create merged mat
cv::Mat merged(reference_image.rows, reference_image.cols, CV_16U, merged_raw.data());
// cv::imwrite("merged.jpg", merged);
// Remove padding
std::vector<int> padding = burst_images.padding_info_bayer;
cv::Range horizontal = cv::Range(padding[2], reference_image.cols - padding[3]);
@ -203,14 +185,14 @@ namespace hdrplus
}
// TODO: 4.2 Temporal Denoising
std::vector<cv::Mat> temporal_denoised_tiles = temporal_denoise(reference_tiles, reference_tiles_DFT, noise_varaince)
//std::vector<cv::Mat> temporal_denoised_tiles = temporal_denoise(reference_tiles, reference_tiles_DFT, noise_varaince)
// TODO: 4.3 Spatial Denoising
////adding after here
std::vector<cv::Mat> spatial_denoised_tiles = spatial_denoise( reference_tiles, reference_tiles_DFT, noise_varaince)
//std::vector<cv::Mat> spatial_denoised_tiles = spatial_denoise( reference_tiles, reference_tiles_DFT, noise_varaince)
//apply the cosineWindow2D over the merged_channel_tiles_spatial and reconstruct the image
//reference_tiles = spatial_denoised_tiles; //now reference tiles are temporally and spatially denoised
////
@ -225,10 +207,6 @@ namespace hdrplus
}
reference_tiles = denoised_tiles;
// 4.4 Cosine Window Merging
// Process tiles through 2D cosine window
std::vector<cv::Mat> windowed_tiles;
@ -240,136 +218,104 @@ namespace hdrplus
return mergeTiles(windowed_tiles, channel_image.rows, channel_image.cols);
}
//Helper function to get the channels from the input image
std::vector<ushort> getChannels(cv::Mat input_image){
std::vector<ushort> channels[4];
for (int y = 0; y < input_image.rows; ++y) {
for (int x = 0; x < input_image.cols; ++x) {
if (y % 2 == 0) {
if (x % 2 == 0) {
channels[0].push_back(input_image.at<ushort>(y, x));
}
else {
channels[1].push_back(input_image.at<ushort>(y, x));
}
}
else {
if (x % 2 == 0) {
channels[2].push_back(input_image.at<ushort>(y, x));
}
else {
channels[3].push_back(input_image.at<ushort>(y, x));
}
}
}
}
return channels;
}
//we should be getting the individual channel in the same place where we call the processChannel function with the reference channel in its arguments
std::vector<cv::Mat> temporal_denoise(std::vector<cv::Mat> reference_tiles, std::vector<cv::Mat> reference_tiles_DFT, std::vector<float> noise_varaince) {
//goal: temporially denoise using the weiner filter
//input:
//1. array of 2D dft tiles of the reference image
//2. array of 2D dft tiles ocf the aligned alternate image
//3. estimated noise varaince
//4. temporal factor
//return: merged image patches dft
// std::vector<cv::Mat> temporal_denoise(std::vector<cv::Mat> reference_tiles, std::vector<cv::Mat> reference_tiles_DFT, std::vector<float> noise_varaince) {
// //goal: temporially denoise using the weiner filter
// //input:
// //1. array of 2D dft tiles of the reference image
// //2. array of 2D dft tiles ocf the aligned alternate image
// //3. estimated noise varaince
// //4. temporal factor
// //return: merged image patches dft
//tile_size = TILE_SIZE;
// //tile_size = TILE_SIZE;
double temporal_factor = 8.0 //8 by default
// double temporal_factor = 8.0 //8 by default
double temporal_noise_scaling = (pow(TILE_SIZE,2) * (1.0/16*2))*temporal_factor;
// double temporal_noise_scaling = (pow(TILE_SIZE,2) * (1.0/16*2))*temporal_factor;
//start calculating the merged image tiles fft
// //start calculating the merged image tiles fft
//get the tiles of the alternate image as a list
// //get the tiles of the alternate image as a list
std::vector<std::vector<cv::Mat>> alternate_channel_i_tile_list; //list of alt channel tiles
std::vector<std::vector<cv::Mat>> alternate_tiles_DFT_list; //list of alt channel tiles
// std::vector<std::vector<cv::Mat>> alternate_channel_i_tile_list; //list of alt channel tiles
// std::vector<std::vector<cv::Mat>> alternate_tiles_DFT_list; //list of alt channel tiles
for (auto alt_img_channel : alternate_channel_i_list) {
std::vector<ushort> alt_img_channel_tile = getReferenceTiles(alt_img_channel); //get tiles from alt image
alternate_channel_i_tile_list.push_back(alt_img_channel_tile)
// for (auto alt_img_channel : alternate_channel_i_list) {
// std::vector<uint16_t> alt_img_channel_tile = getReferenceTiles(alt_img_channel); //get tiles from alt image
// alternate_channel_i_tile_list.push_back(alt_img_channel_tile)
std::vector<cv::Mat> alternate_tiles_DFT_list;
for (auto alt_tile : alt_img_channel_tile) {
cv::Mat alt_tile_DFT;
alt_tile.convertTo(alt_tile_DFT, CV_32F);
cv::dft(alt_tile_DFT, alt_tile_DFT, cv::DFT_SCALE | cv::DFT_COMPLEX_OUTPUT);
alternate_tiles_DFT_list.push_back(alt_tile_DFT);
}
alternate_tiles_DFT_list.push_back(alternate_tiles_DFT);
}
// std::vector<cv::Mat> alternate_tiles_DFT_list;
// for (auto alt_tile : alt_img_channel_tile) {
// cv::Mat alt_tile_DFT;
// alt_tile.convertTo(alt_tile_DFT, CV_32F);
// cv::dft(alt_tile_DFT, alt_tile_DFT, cv::DFT_SCALE | cv::DFT_COMPLEX_OUTPUT);
// alternate_tiles_DFT_list.push_back(alt_tile_DFT);
// }
// alternate_tiles_DFT_list.push_back(alternate_tiles_DFT);
// }
//get the dft of the alternate image
//std::vector<cv::Mat> alternate_tiles_DFT;
// //get the dft of the alternate image
// //std::vector<cv::Mat> alternate_tiles_DFT;
//std::vector<cv::Mat> tile_differences = reference_tiles_DFT - alternate_tiles_DFT_list;
// //std::vector<cv::Mat> tile_differences = reference_tiles_DFT - alternate_tiles_DFT_list;
//find reference_tiles_DFT - alternate_tiles_DFT_list
std::vector<std::vector<cv::Mat>> tile_difference_list; //list of tile differences
for (auto individual_alternate_tile_DFT : alternate_tiles_DFT_list) {
std::vector<cv::Mat> single_tile_difference = reference_tiles_DFT - individual_alternate_tile_DFT;
tile_difference_list.push_back(single_tile_difference);
}
// //find reference_tiles_DFT - alternate_tiles_DFT_list
// std::vector<std::vector<cv::Mat>> tile_difference_list; //list of tile differences
// for (auto individual_alternate_tile_DFT : alternate_tiles_DFT_list) {
// std::vector<cv::Mat> single_tile_difference = reference_tiles_DFT - individual_alternate_tile_DFT;
// tile_difference_list.push_back(single_tile_difference);
// }
// std::vector<cv::Mat> tile_sq_asolute_diff = tile_differences; //squared absolute difference is tile_differences.real**2 + tile_differnce.imag**2; //also tile_dist
// // std::vector<cv::Mat> tile_sq_asolute_diff = tile_differences; //squared absolute difference is tile_differences.real**2 + tile_differnce.imag**2; //also tile_dist
std::vector<cv::Mat> tile_sq_asolute_diff = tile_differences; //squared absolute difference is tile_differences.real**2 + tile_differnce.imag**2; //also tile_dist
// std::vector<cv::Mat> tile_sq_asolute_diff = tile_differences; //squared absolute difference is tile_differences.real**2 + tile_differnce.imag**2; //also tile_dist
//get the real and imaginary components
/*
std::vector<std::vector<cv::Mat>> absolute_difference_list;
for (auto individual_difference : tile_difference_list) {
for (int i =0; i < individual_difference.rows; i++ ) {
std::complex<double>* row_ptr = tile_sq_asolute_diff.ptr<std::complex<double>>(i);
for (int j = 0; j< individual_difference.cols*individual_difference.channels(); j++) {
row_ptr = math.pow(individual_difference.at<std::complex<double>>(i,j).real(),2)+math.pow(individual_difference.at<std::complex<double>>(i,j).imag(),2); //.real and .imag
}
}
// //get the real and imaginary components
// /*
// std::vector<std::vector<cv::Mat>> absolute_difference_list;
// for (auto individual_difference : tile_difference_list) {
// for (int i =0; i < individual_difference.rows; i++ ) {
// std::complex<double>* row_ptr = tile_sq_asolute_diff.ptr<std::complex<double>>(i);
// for (int j = 0; j< individual_difference.cols*individual_difference.channels(); j++) {
// row_ptr = math.pow(individual_difference.at<std::complex<double>>(i,j).real(),2)+math.pow(individual_difference.at<std::complex<double>>(i,j).imag(),2); //.real and .imag
// }
// }
//std::vector<cv::Mat> single_tile_difference = individual_difference.at<std::complex<double>>(0,0).real(); //.real and .imag
absolute_difference_list.push_back(single_tile_difference);
}
*/
// //std::vector<cv::Mat> single_tile_difference = individual_difference.at<std::complex<double>>(0,0).real(); //.real and .imag
// absolute_difference_list.push_back(single_tile_difference);
// }
// */
//find the squared absolute difference across all the tiles
// //find the squared absolute difference across all the tiles
std::vector<cv::Mat> A = tile_sq_asolute_diff/(tile_sq_asolute_diff+noise_variance)
// std::vector<cv::Mat> A = tile_sq_asolute_diff/(tile_sq_asolute_diff+noise_variance)
std::vector<cv::Mat> merged_image_tiles_fft = alternate_tiles_DFT_list + A * tile_differences;
// std::vector<cv::Mat> merged_image_tiles_fft = alternate_tiles_DFT_list + A * tile_differences;
return merged_image_tiles_fft
// return merged_image_tiles_fft
}
// }
std::vector<cv::Mat> spatial_denoise(std::vector<cv::Mat> reference_tiles, std::vector<cv::Mat> reference_tiles_DFT, std::vector<float> noise_varaince) {
// std::vector<cv::Mat> spatial_denoise(std::vector<cv::Mat> reference_tiles, std::vector<cv::Mat> reference_tiles_DFT, std::vector<float> noise_varaince) {
double spatial_factor = 1; //to be added
double spatial_noise_scaling = (pow(TILE_SIZE,2) * (1.0/16*2))*spatial_factor;
// double spatial_factor = 1; //to be added
// double spatial_noise_scaling = (pow(TILE_SIZE,2) * (1.0/16*2))*spatial_factor;
//calculate the spatial denoising
spatial_tile_dist = reference_tiles.real**2 + reference_tiles.imag**2;
std::vector<cv::Mat> WienerCoeff = denoised_tiles*spatial_noise_scaling*noise_variance;
// //calculate the spatial denoising
// spatial_tile_dist = reference_tiles.real**2 + reference_tiles.imag**2;
// std::vector<cv::Mat> WienerCoeff = denoised_tiles*spatial_noise_scaling*noise_variance;
merged_channel_tiles_spatial = reference_tiles*spatial_tile_dist/(spatial_tile_dist+WienerCoeff)
// merged_channel_tiles_spatial = reference_tiles*spatial_tile_dist/(spatial_tile_dist+WienerCoeff)
}
// }
std::pair<double, double> merge::getNoiseParams( int ISO, \

Loading…
Cancel
Save