|
|
|
|
#include <jni.h>
|
|
|
|
|
#include <string>
|
|
|
|
|
#include <vector>
|
|
|
|
|
// #include "ncnn/yolov5ncnn.h"
|
|
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
|
|
#include <omp.h>
|
|
|
|
|
|
|
|
|
|
#include <android/imagedecoder.h>
|
|
|
|
|
#include <android/log.h>
|
|
|
|
|
#include <media/NdkImage.h>
|
|
|
|
|
|
|
|
|
|
#include <opencv2/opencv.hpp>
|
|
|
|
|
#include <opencv2/core.hpp>
|
|
|
|
|
#include <opencv2/imgproc.hpp>
|
|
|
|
|
|
|
|
|
|
#define HDR_TAG "HDR"
|
|
|
|
|
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, HDR_TAG,__VA_ARGS__)
|
|
|
|
|
#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, HDR_TAG,__VA_ARGS__)
|
|
|
|
|
#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, HDR_TAG, __VA_ARGS__)
|
|
|
|
|
#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, HDR_TAG, __VA_ARGS__)
|
|
|
|
|
#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, HDR_TAG,__VA_ARGS__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace cv2
|
|
|
|
|
{
|
|
|
|
|
using namespace cv;
|
|
|
|
|
|
|
|
|
|
Mat linearResponseNew(int channels)
|
|
|
|
|
{
|
|
|
|
|
Mat response = Mat(LDR_SIZE, 1, CV_MAKETYPE(CV_32F, channels));
|
|
|
|
|
for(int i = 0; i < LDR_SIZE; i++) {
|
|
|
|
|
response.at<Vec3f>(i) = Vec3f::all(static_cast<float>(i));
|
|
|
|
|
}
|
|
|
|
|
return response;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Mat triangleWeightsNew()
|
|
|
|
|
{
|
|
|
|
|
// hat function
|
|
|
|
|
Mat w(LDR_SIZE, 1, CV_32F);
|
|
|
|
|
int half = LDR_SIZE / 2;
|
|
|
|
|
for(int i = 0; i < LDR_SIZE; i++) {
|
|
|
|
|
w.at<float>(i) = i < half ? i + 1.0f : LDR_SIZE - i;
|
|
|
|
|
}
|
|
|
|
|
return w;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class CV_EXPORTS_W MergeExposuresNew
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
virtual ~MergeExposuresNew() {}
|
|
|
|
|
/** @brief Merges images.
|
|
|
|
|
|
|
|
|
|
@param src vector of input images
|
|
|
|
|
@param dst result image
|
|
|
|
|
@param times vector of exposure time values for each image
|
|
|
|
|
@param response 256x1 matrix with inverse camera response function for each pixel value, it should
|
|
|
|
|
have the same number of channels as images.
|
|
|
|
|
*/
|
|
|
|
|
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
|
|
|
|
|
InputArray times, InputArray response) = 0;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
class CV_EXPORTS_W MergeDebevecNew : public MergeExposuresNew
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
|
|
|
|
|
InputArray times, InputArray response) CV_OVERRIDE = 0;
|
|
|
|
|
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class MergeDebevecImplNew CV_FINAL : public MergeDebevecNew
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
MergeDebevecImplNew() :
|
|
|
|
|
name("MergeDebevecNew"),
|
|
|
|
|
weights(triangleWeightsNew())
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response) CV_OVERRIDE
|
|
|
|
|
{
|
|
|
|
|
// CV_INSTRUMENT_REGION();
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 1");
|
|
|
|
|
std::vector<Mat> images;
|
|
|
|
|
src.getMatVector(images);
|
|
|
|
|
Mat times = _times.getMat();
|
|
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
|
CV_Assert(images.size() == times.total());
|
|
|
|
|
checkImageDimensions(images);
|
|
|
|
|
CV_Assert(images[0].depth() == CV_8U);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
int channels = images[0].channels();
|
|
|
|
|
Size size = images[0].size();
|
|
|
|
|
int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 2");
|
|
|
|
|
|
|
|
|
|
dst.create(images[0].size(), CV_32FCC);
|
|
|
|
|
Mat result = dst.getMat();
|
|
|
|
|
|
|
|
|
|
Mat response = input_response.getMat();
|
|
|
|
|
|
|
|
|
|
if(response.empty()) {
|
|
|
|
|
response = linearResponseNew(channels);
|
|
|
|
|
response.at<Vec3f>(0) = response.at<Vec3f>(1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 3");
|
|
|
|
|
|
|
|
|
|
Mat log_response;
|
|
|
|
|
log(response, log_response);
|
|
|
|
|
CV_Assert(log_response.rows == LDR_SIZE && log_response.cols == 1 &&
|
|
|
|
|
log_response.channels() == channels);
|
|
|
|
|
|
|
|
|
|
Mat exp_values(times.clone());
|
|
|
|
|
log(exp_values, exp_values);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 4");
|
|
|
|
|
result = Mat::zeros(size, CV_32FCC);
|
|
|
|
|
std::vector<Mat> result_split;
|
|
|
|
|
split(result, result_split);
|
|
|
|
|
Mat weight_sum = Mat::zeros(size, CV_32F);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 5");
|
|
|
|
|
|
|
|
|
|
for(size_t i = 0; i < images.size(); i++) {
|
|
|
|
|
std::vector<Mat> splitted;
|
|
|
|
|
split(images[i], splitted);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 5 - 1");
|
|
|
|
|
Mat w = Mat::zeros(size, CV_32F);
|
|
|
|
|
for(int c = 0; c < channels; c++) {
|
|
|
|
|
LUT(splitted[c], weights, splitted[c]);
|
|
|
|
|
w += splitted[c];
|
|
|
|
|
}
|
|
|
|
|
w /= channels;
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 5 - 2");
|
|
|
|
|
Mat response_img;
|
|
|
|
|
LUT(images[i], log_response, response_img);
|
|
|
|
|
split(response_img, splitted);
|
|
|
|
|
|
|
|
|
|
// #pragma omp parallel for num_threads(channels)
|
|
|
|
|
for(int c = 0; c < channels; c++) {
|
|
|
|
|
//这里崩溃
|
|
|
|
|
result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i));
|
|
|
|
|
}
|
|
|
|
|
weight_sum += w;
|
|
|
|
|
ALOGD("HDR Merge 5 - 3");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 6");
|
|
|
|
|
weight_sum = 1.0f / weight_sum;
|
|
|
|
|
for(int c = 0; c < channels; c++) {
|
|
|
|
|
result_split[c] = result_split[c].mul(weight_sum);
|
|
|
|
|
}
|
|
|
|
|
ALOGD("HDR Merge 7");
|
|
|
|
|
merge(result_split, result);
|
|
|
|
|
exp(result, result);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR Merge 8");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void process(InputArrayOfArrays src, OutputArray dst, InputArray times) CV_OVERRIDE
|
|
|
|
|
{
|
|
|
|
|
// CV_INSTRUMENT_REGION();
|
|
|
|
|
|
|
|
|
|
process(src, dst, times, Mat());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
String name;
|
|
|
|
|
Mat weights;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
Ptr<MergeDebevecNew> createMergeDebevecNew()
|
|
|
|
|
{
|
|
|
|
|
return makePtr<MergeDebevecImplNew>();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class CV_EXPORTS_W TonemapReinhardNew
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;
|
|
|
|
|
|
|
|
|
|
virtual ~TonemapReinhardNew() {}
|
|
|
|
|
|
|
|
|
|
CV_WRAP virtual float getGamma() const = 0;
|
|
|
|
|
CV_WRAP virtual void setGamma(float gamma) = 0;
|
|
|
|
|
|
|
|
|
|
CV_WRAP virtual float getIntensity() const = 0;
|
|
|
|
|
CV_WRAP virtual void setIntensity(float intensity) = 0;
|
|
|
|
|
|
|
|
|
|
CV_WRAP virtual float getLightAdaptation() const = 0;
|
|
|
|
|
CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0;
|
|
|
|
|
|
|
|
|
|
CV_WRAP virtual float getColorAdaptation() const = 0;
|
|
|
|
|
CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
inline void log_(const Mat& src, Mat& dst)
|
|
|
|
|
{
|
|
|
|
|
max(src, Scalar::all(1e-4), dst);
|
|
|
|
|
log(dst, dst);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class TonemapReinhardImpl CV_FINAL : public TonemapReinhardNew
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
TonemapReinhardImpl(float _gamma, float _intensity, float _light_adapt, float _color_adapt) :
|
|
|
|
|
name("TonemapReinhardNew"),
|
|
|
|
|
gamma(_gamma),
|
|
|
|
|
intensity(_intensity),
|
|
|
|
|
light_adapt(_light_adapt),
|
|
|
|
|
color_adapt(_color_adapt)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void process(InputArray _src, OutputArray _dst) CV_OVERRIDE
|
|
|
|
|
{
|
|
|
|
|
ALOGD("HDR 1 ");
|
|
|
|
|
|
|
|
|
|
Mat src = _src.getMat();
|
|
|
|
|
CV_Assert(!src.empty());
|
|
|
|
|
_dst.create(src.size(), CV_32FC3);
|
|
|
|
|
Mat img = _dst.getMat();
|
|
|
|
|
Ptr<Tonemap> linear = createTonemap(1.0f);
|
|
|
|
|
linear->process(src, img);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR 2 ");
|
|
|
|
|
|
|
|
|
|
Mat gray_img;
|
|
|
|
|
cvtColor(img, gray_img, COLOR_RGB2GRAY);
|
|
|
|
|
Mat log_img;
|
|
|
|
|
log_(gray_img, log_img);
|
|
|
|
|
|
|
|
|
|
float log_mean = static_cast<float>(sum(log_img)[0] / log_img.total());
|
|
|
|
|
double log_min, log_max;
|
|
|
|
|
minMaxLoc(log_img, &log_min, &log_max);
|
|
|
|
|
log_img.release();
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR 3 ");
|
|
|
|
|
|
|
|
|
|
double key = static_cast<float>((log_max - log_mean) / (log_max - log_min));
|
|
|
|
|
float map_key = 0.3f + 0.7f * pow(static_cast<float>(key), 1.4f);
|
|
|
|
|
intensity = exp(-intensity);
|
|
|
|
|
Scalar chan_mean = mean(img);
|
|
|
|
|
float gray_mean = static_cast<float>(mean(gray_img)[0]);
|
|
|
|
|
|
|
|
|
|
std::vector<Mat> channels(3);
|
|
|
|
|
split(img, channels);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR 4 ");
|
|
|
|
|
|
|
|
|
|
#pragma omp parallel for num_threads(3)
|
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
|
float global = color_adapt * static_cast<float>(chan_mean[i]) + (1.0f - color_adapt) * gray_mean;
|
|
|
|
|
Mat adapt = color_adapt * channels[i] + (1.0f - color_adapt) * gray_img;
|
|
|
|
|
adapt = light_adapt * adapt + (1.0f - light_adapt) * global;
|
|
|
|
|
pow(intensity * adapt, map_key, adapt);
|
|
|
|
|
//这里崩溃
|
|
|
|
|
channels[i] = channels[i].mul(1.0f/(adapt + channels[i]));
|
|
|
|
|
}
|
|
|
|
|
gray_img.release();
|
|
|
|
|
merge(channels, img);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR 5 ");
|
|
|
|
|
linear->setGamma(gamma);
|
|
|
|
|
linear->process(img, img);
|
|
|
|
|
|
|
|
|
|
ALOGD("HDR 6 ");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
float getGamma() const CV_OVERRIDE { return gamma; }
|
|
|
|
|
void setGamma(float val) CV_OVERRIDE { gamma = val; }
|
|
|
|
|
|
|
|
|
|
float getIntensity() const CV_OVERRIDE { return intensity; }
|
|
|
|
|
void setIntensity(float val) CV_OVERRIDE { intensity = val; }
|
|
|
|
|
|
|
|
|
|
float getLightAdaptation() const CV_OVERRIDE { return light_adapt; }
|
|
|
|
|
void setLightAdaptation(float val) CV_OVERRIDE { light_adapt = val; }
|
|
|
|
|
|
|
|
|
|
float getColorAdaptation() const CV_OVERRIDE { return color_adapt; }
|
|
|
|
|
void setColorAdaptation(float val) CV_OVERRIDE { color_adapt = val; }
|
|
|
|
|
|
|
|
|
|
void write(FileStorage& fs) const
|
|
|
|
|
{
|
|
|
|
|
#if 0
|
|
|
|
|
writeFormat(fs);
|
|
|
|
|
fs << "name" << name
|
|
|
|
|
<< "gamma" << gamma
|
|
|
|
|
<< "intensity" << intensity
|
|
|
|
|
<< "light_adapt" << light_adapt
|
|
|
|
|
<< "color_adapt" << color_adapt;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void read(const FileNode& fn)
|
|
|
|
|
{
|
|
|
|
|
#if 0
|
|
|
|
|
FileNode n = fn["name"];
|
|
|
|
|
CV_Assert(n.isString() && String(n) == name);
|
|
|
|
|
gamma = fn["gamma"];
|
|
|
|
|
intensity = fn["intensity"];
|
|
|
|
|
light_adapt = fn["light_adapt"];
|
|
|
|
|
color_adapt = fn["color_adapt"];
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
String name;
|
|
|
|
|
float gamma, intensity, light_adapt, color_adapt;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
Ptr<TonemapReinhardNew> createTonemapReinhardNew(float gamma, float contrast, float sigma_color, float sigma_space)
|
|
|
|
|
{
|
|
|
|
|
return makePtr<TonemapReinhardImpl>(gamma, contrast, sigma_color, sigma_space);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool AndroidBitmap_CompressWriteFile(void *userContext, const void *data, size_t size)
|
|
|
|
|
{
|
|
|
|
|
int file = (int)((size_t)userContext);
|
|
|
|
|
int bytesWritten = write(file, data, size);
|
|
|
|
|
return bytesWritten == size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool AndroidBitmap_CompressWriteBuffer(void *userContext, const void *data, size_t size)
|
|
|
|
|
{
|
|
|
|
|
std::vector<uint8_t>* buffer = (std::vector<uint8_t>*)userContext;
|
|
|
|
|
// int bytesWritten = write(file, data, size);
|
|
|
|
|
const uint8_t* pBytes = (const uint8_t*)data;
|
|
|
|
|
buffer->insert(buffer->cend(), pBytes, pBytes + size);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ConvertDngToPng(const uint8_t* buffer, size_t bufferLength, std::vector<uint8_t>& pngData)
|
|
|
|
|
{
|
|
|
|
|
AImageDecoder* imageDecoder = NULL;
|
|
|
|
|
AImageDecoder_createFromBuffer(buffer, bufferLength, &imageDecoder);
|
|
|
|
|
|
|
|
|
|
// int fd = open("/sdcard/com.xypower.mpapp/tmp/4.dng", O_RDONLY);
|
|
|
|
|
// AImageDecoder_createFromFd(fd, &imageDecoder);
|
|
|
|
|
|
|
|
|
|
const AImageDecoderHeaderInfo* headerInfo = AImageDecoder_getHeaderInfo(imageDecoder);
|
|
|
|
|
const char *mimeType = AImageDecoderHeaderInfo_getMimeType(headerInfo);
|
|
|
|
|
AndroidBitmapInfo bmpInfo = { 0 };
|
|
|
|
|
bmpInfo.flags = AImageDecoderHeaderInfo_getAlphaFlags(headerInfo);
|
|
|
|
|
bmpInfo.width = AImageDecoderHeaderInfo_getWidth(headerInfo);
|
|
|
|
|
bmpInfo.height = AImageDecoderHeaderInfo_getHeight(headerInfo);
|
|
|
|
|
bmpInfo.format = AImageDecoderHeaderInfo_getAndroidBitmapFormat(headerInfo);
|
|
|
|
|
bmpInfo.stride = AImageDecoder_getMinimumStride(imageDecoder); // Image decoder does not
|
|
|
|
|
// use padding by default
|
|
|
|
|
int32_t fmt = ANDROID_BITMAP_FORMAT_RGBA_8888;
|
|
|
|
|
size_t stride = bmpInfo.width * 4;
|
|
|
|
|
size_t size = stride * bmpInfo.height;
|
|
|
|
|
size = bmpInfo.stride * bmpInfo.height;
|
|
|
|
|
|
|
|
|
|
int32_t dataSpace = AImageDecoderHeaderInfo_getDataSpace(headerInfo);
|
|
|
|
|
|
|
|
|
|
std::vector<uint8_t> frame;
|
|
|
|
|
frame.resize(size);
|
|
|
|
|
|
|
|
|
|
// AImageDecoder_setTargetSize(imageDecoder, 5376, 3024);
|
|
|
|
|
int result = AImageDecoder_decodeImage(imageDecoder, (void *)(&frame[0]), bmpInfo.stride, size);
|
|
|
|
|
|
|
|
|
|
// close(fd);
|
|
|
|
|
|
|
|
|
|
if (result == ANDROID_IMAGE_DECODER_SUCCESS)
|
|
|
|
|
{
|
|
|
|
|
// std::string imagePath = "/data/data/com.xypower.mppreview/files/test.png";
|
|
|
|
|
// int file = open(imagePath.c_str(), O_CREAT | O_RDWR | O_TRUNC, S_IRUSR | S_IWUSR);
|
|
|
|
|
// if (file == -1) {}
|
|
|
|
|
pngData.clear();
|
|
|
|
|
AndroidBitmap_compress(&bmpInfo, dataSpace, &frame[0], ANDROID_BITMAP_COMPRESS_FORMAT_PNG, 100, (void*)&pngData, AndroidBitmap_CompressWriteBuffer);
|
|
|
|
|
// close(file);
|
|
|
|
|
std::vector<uint8_t> empty;
|
|
|
|
|
empty.swap(frame);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AImageDecoder_delete(imageDecoder);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ConvertDngToPng(const uint8_t* buffer, size_t bufferLength, cv::Mat& rgb)
|
|
|
|
|
{
|
|
|
|
|
AImageDecoder* imageDecoder = NULL;
|
|
|
|
|
AImageDecoder_createFromBuffer(buffer, bufferLength, &imageDecoder);
|
|
|
|
|
|
|
|
|
|
// int fd = open("/sdcard/com.xypower.mpapp/tmp/4.dng", O_RDONLY);
|
|
|
|
|
// AImageDecoder_createFromFd(fd, &imageDecoder);
|
|
|
|
|
|
|
|
|
|
const AImageDecoderHeaderInfo* headerInfo = AImageDecoder_getHeaderInfo(imageDecoder);
|
|
|
|
|
const char *mimeType = AImageDecoderHeaderInfo_getMimeType(headerInfo);
|
|
|
|
|
AndroidBitmapInfo bmpInfo = { 0 };
|
|
|
|
|
bmpInfo.flags = AImageDecoderHeaderInfo_getAlphaFlags(headerInfo);
|
|
|
|
|
bmpInfo.width = AImageDecoderHeaderInfo_getWidth(headerInfo);
|
|
|
|
|
bmpInfo.height = AImageDecoderHeaderInfo_getHeight(headerInfo);
|
|
|
|
|
bmpInfo.format = AImageDecoderHeaderInfo_getAndroidBitmapFormat(headerInfo);
|
|
|
|
|
bmpInfo.stride = AImageDecoder_getMinimumStride(imageDecoder); // Image decoder does not
|
|
|
|
|
// use padding by default
|
|
|
|
|
int32_t fmt = ANDROID_BITMAP_FORMAT_RGBA_8888;
|
|
|
|
|
size_t stride = bmpInfo.width * 4;
|
|
|
|
|
size_t size = stride * bmpInfo.height;
|
|
|
|
|
size = bmpInfo.stride * bmpInfo.height;
|
|
|
|
|
|
|
|
|
|
int32_t dataSpace = AImageDecoderHeaderInfo_getDataSpace(headerInfo);
|
|
|
|
|
|
|
|
|
|
std::vector<uint8_t> frame;
|
|
|
|
|
frame.resize(size);
|
|
|
|
|
|
|
|
|
|
// AImageDecoder_setTargetSize(imageDecoder, 5376, 3024);
|
|
|
|
|
int result = AImageDecoder_decodeImage(imageDecoder, (void *)(&frame[0]), bmpInfo.stride, size);
|
|
|
|
|
|
|
|
|
|
// close(fd);
|
|
|
|
|
|
|
|
|
|
if (result == ANDROID_IMAGE_DECODER_SUCCESS)
|
|
|
|
|
{
|
|
|
|
|
cv::Mat tmp(bmpInfo.height, bmpInfo.width, CV_8UC4, &frame[0]);
|
|
|
|
|
tmp.copyTo(rgb);
|
|
|
|
|
|
|
|
|
|
//convert RGB to BGR
|
|
|
|
|
cv::cvtColor(rgb, rgb, cv::COLOR_RGB2BGR);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
// std::string imagePath = "/data/data/com.xypower.mppreview/files/test.png";
|
|
|
|
|
// int file = open(imagePath.c_str(), O_CREAT | O_RDWR | O_TRUNC, S_IRUSR | S_IWUSR);
|
|
|
|
|
// if (file == -1) {}
|
|
|
|
|
std::vector<uint8_t> pngData;
|
|
|
|
|
result = AndroidBitmap_compress(&bmpInfo, dataSpace, &frame[0], ANDROID_BITMAP_COMPRESS_FORMAT_PNG, 100, (void*)&pngData, AndroidBitmap_CompressWriteBuffer);
|
|
|
|
|
{
|
|
|
|
|
std::vector<uint8_t> empty;
|
|
|
|
|
empty.swap(frame);
|
|
|
|
|
}
|
|
|
|
|
// close(file);
|
|
|
|
|
if (ANDROID_BITMAP_RESULT_SUCCESS == result)
|
|
|
|
|
{
|
|
|
|
|
rgb = cv::imdecode(pngData, cv::IMREAD_COLOR);
|
|
|
|
|
}
|
|
|
|
|
*/
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AImageDecoder_delete(imageDecoder);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline std::string jstring2string(JNIEnv *env, jstring jStr)
|
|
|
|
|
{
|
|
|
|
|
if (!jStr)
|
|
|
|
|
return "";
|
|
|
|
|
const jclass stringClass = env->GetObjectClass(jStr);
|
|
|
|
|
const jmethodID getBytes = env->GetMethodID(stringClass, "getBytes", "(Ljava/lang/String;)[B");
|
|
|
|
|
const jbyteArray stringJbytes = (jbyteArray) env->CallObjectMethod(jStr, getBytes, env->NewStringUTF("UTF-8"));
|
|
|
|
|
size_t length = (size_t) env->GetArrayLength(stringJbytes);
|
|
|
|
|
jbyte* pBytes = env->GetByteArrayElements(stringJbytes, NULL);
|
|
|
|
|
std::string ret = std::string((char *)pBytes, length);
|
|
|
|
|
env->ReleaseByteArrayElements(stringJbytes, pBytes, JNI_ABORT);
|
|
|
|
|
env->DeleteLocalRef(stringJbytes);
|
|
|
|
|
env->DeleteLocalRef(stringClass);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool makeHdr(std::vector<float>& times, std::vector<std::string>& paths, cv::Mat& rgb)
|
|
|
|
|
{
|
|
|
|
|
// Read images and exposure times
|
|
|
|
|
std::vector<cv::Mat> images;
|
|
|
|
|
images.resize(paths.size());
|
|
|
|
|
|
|
|
|
|
#pragma omp parallel for
|
|
|
|
|
for (int idx = 0; idx < paths.size(); idx++)
|
|
|
|
|
{
|
|
|
|
|
images[idx] = cv::imread(paths[idx].c_str());
|
|
|
|
|
}
|
|
|
|
|
// Align input images
|
|
|
|
|
// cout << "Aligning images ... " << endl;
|
|
|
|
|
cv::Ptr<cv::AlignMTB> alignMTB = cv::createAlignMTB();
|
|
|
|
|
#if 0
|
|
|
|
|
alignMTB->process(images, images);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// Obtain Camera Response Function (CRF)
|
|
|
|
|
// cout << "Calculating Camera Response Function (CRF) ... " << endl;
|
|
|
|
|
cv::Mat responseDebevec;
|
|
|
|
|
cv::Ptr<cv::CalibrateDebevec> calibrateDebevec = cv::createCalibrateDebevec();
|
|
|
|
|
calibrateDebevec->process(images, responseDebevec, times);
|
|
|
|
|
|
|
|
|
|
// Merge images into an HDR linear image
|
|
|
|
|
// cout << "Merging images into one HDR image ... ";
|
|
|
|
|
cv::Mat hdrDebevec;
|
|
|
|
|
cv::Ptr<cv::MergeDebevec> mergeDebevec = cv::createMergeDebevec();
|
|
|
|
|
mergeDebevec->process(images, hdrDebevec, times, responseDebevec);
|
|
|
|
|
// Save HDR image.
|
|
|
|
|
// imwrite((OUTPUT_DIR "hdrDebevec.hdr"), hdrDebevec);
|
|
|
|
|
// cout << "saved hdrDebevec.hdr " << endl;
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
std::vector<cv::Mat> empty;
|
|
|
|
|
empty.swap(images);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Tonemap using Reinhard's method to obtain 24-bit color image
|
|
|
|
|
// cout << "Tonemaping using Reinhard's method ... ";
|
|
|
|
|
cv::Mat ldrReinhard;
|
|
|
|
|
cv::Ptr<cv::TonemapReinhard> tonemapReinhard = cv::createTonemapReinhard(1.5, 0, 0, 0);
|
|
|
|
|
tonemapReinhard->process(hdrDebevec, ldrReinhard);
|
|
|
|
|
hdrDebevec.release();
|
|
|
|
|
|
|
|
|
|
int type = ldrReinhard.type();
|
|
|
|
|
ldrReinhard = ldrReinhard * 255;
|
|
|
|
|
|
|
|
|
|
ldrReinhard.convertTo(rgb, CV_8U);
|
|
|
|
|
ldrReinhard.release();
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool makeHdr(std::vector<float>& times, std::vector<cv::Mat>& images, cv::Mat& rgb)
|
|
|
|
|
{
|
|
|
|
|
// Read images and exposure times
|
|
|
|
|
|
|
|
|
|
// Align input images
|
|
|
|
|
// cout << "Aligning images ... " << endl;
|
|
|
|
|
cv::Ptr<cv::AlignMTB> alignMTB = cv::createAlignMTB();
|
|
|
|
|
#if 0
|
|
|
|
|
alignMTB->process(images, images);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// Obtain Camera Response Function (CRF)
|
|
|
|
|
ALOGI("Calculating Camera Response Function (CRF) ... ");
|
|
|
|
|
cv::Mat responseDebevec;
|
|
|
|
|
cv::Ptr<cv::CalibrateDebevec> calibrateDebevec = cv::createCalibrateDebevec();
|
|
|
|
|
calibrateDebevec->process(images, responseDebevec, times);
|
|
|
|
|
|
|
|
|
|
// Merge images into an HDR linear image
|
|
|
|
|
ALOGI("Merging images into one HDR image ... ");
|
|
|
|
|
cv::Mat hdrDebevec;
|
|
|
|
|
cv::Ptr<cv::MergeDebevec> mergeDebevec = cv::createMergeDebevec();
|
|
|
|
|
mergeDebevec->process(images, hdrDebevec, times, responseDebevec);
|
|
|
|
|
// Save HDR image.
|
|
|
|
|
// imwrite((OUTPUT_DIR "hdrDebevec.hdr"), hdrDebevec);
|
|
|
|
|
// cout << "saved hdrDebevec.hdr " << endl;
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
std::vector<cv::Mat> empty;
|
|
|
|
|
empty.swap(images);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Tonemap using Reinhard's method to obtain 24-bit color image
|
|
|
|
|
ALOGI("Tonemaping using Reinhard's method ... ");
|
|
|
|
|
cv::Mat ldrReinhard;
|
|
|
|
|
cv::Ptr<cv::TonemapReinhard> tonemapReinhard = cv::createTonemapReinhard(1.5, 0, 0, 0);
|
|
|
|
|
tonemapReinhard->process(hdrDebevec, ldrReinhard);
|
|
|
|
|
hdrDebevec.release();
|
|
|
|
|
|
|
|
|
|
int type = ldrReinhard.type();
|
|
|
|
|
ldrReinhard = ldrReinhard * 255;
|
|
|
|
|
|
|
|
|
|
ldrReinhard.convertTo(rgb, CV_8U);
|
|
|
|
|
ldrReinhard.release();
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C"
|
|
|
|
|
JNIEXPORT jboolean JNICALL
|
|
|
|
|
Java_com_xypower_mppreview_Camera2RawFragment_makeHdr(
|
|
|
|
|
JNIEnv *env, jobject thiz, jlong exposureTime1, jstring path1, jlong exposureTime2,
|
|
|
|
|
jstring path2, jstring outputPath) {
|
|
|
|
|
|
|
|
|
|
cv::setNumThreads(4);
|
|
|
|
|
std::vector<float> times;
|
|
|
|
|
std::vector<std::string> paths;
|
|
|
|
|
|
|
|
|
|
times.push_back((double)(exposureTime1) / 1000000000.0);
|
|
|
|
|
times.push_back((double)(exposureTime2) / 1000000000.0);
|
|
|
|
|
|
|
|
|
|
paths.push_back(jstring2string(env, path1));
|
|
|
|
|
paths.push_back(jstring2string(env, path2));
|
|
|
|
|
|
|
|
|
|
cv::Mat rgb;
|
|
|
|
|
if (makeHdr(times, paths, rgb))
|
|
|
|
|
{
|
|
|
|
|
std::string fileName = jstring2string(env, outputPath);
|
|
|
|
|
if (cv::imwrite(fileName.c_str(), rgb))
|
|
|
|
|
{
|
|
|
|
|
return JNI_TRUE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return JNI_FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extern "C"
|
|
|
|
|
JNIEXPORT jboolean JNICALL
|
|
|
|
|
Java_com_xypower_mppreview_Camera2RawFragment_makeHdr2(
|
|
|
|
|
JNIEnv *env, jobject thiz, jlong exposureTime1, jstring path1, jlong exposureTime2,
|
|
|
|
|
jstring path2, jlong exposureTime3, jstring path3, jstring outputPath) {
|
|
|
|
|
|
|
|
|
|
cv::setNumThreads(4);
|
|
|
|
|
std::vector<float> times;
|
|
|
|
|
std::vector<std::string> paths;
|
|
|
|
|
|
|
|
|
|
times.push_back((float)(exposureTime1 / 1000) / 1000000.0);
|
|
|
|
|
times.push_back((float)(exposureTime2 / 1000) / 1000000.0);
|
|
|
|
|
times.push_back((float)(exposureTime3 / 1000) / 1000000.0);
|
|
|
|
|
|
|
|
|
|
paths.push_back(jstring2string(env, path1));
|
|
|
|
|
paths.push_back(jstring2string(env, path2));
|
|
|
|
|
paths.push_back(jstring2string(env, path3));
|
|
|
|
|
|
|
|
|
|
cv::Mat rgb;
|
|
|
|
|
if (makeHdr(times, paths, rgb))
|
|
|
|
|
{
|
|
|
|
|
std::string fileName = jstring2string(env, outputPath);
|
|
|
|
|
if (cv::imwrite(fileName.c_str(), rgb))
|
|
|
|
|
{
|
|
|
|
|
return JNI_TRUE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return JNI_FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//extern "C"
|
|
|
|
|
//JNIEXPORT void JNICALL
|
|
|
|
|
//Java_com_xypower_mppreview_Camera2RawFragment_makeHdr3(JNIEnv *env,
|
|
|
|
|
// jobject thiz,
|
|
|
|
|
// jlong img1_obj,
|
|
|
|
|
// jlong img2_obj,
|
|
|
|
|
// jlong img3_obj,
|
|
|
|
|
// jfloat time1,
|
|
|
|
|
// jfloat time2,
|
|
|
|
|
// jfloat time3,
|
|
|
|
|
// jlong hdrImg) {
|
|
|
|
|
// cv::Mat &mImg1 = *(cv::Mat *) img1_obj;
|
|
|
|
|
// cv::Mat &mImg2 = *(cv::Mat *) img2_obj;
|
|
|
|
|
// cv::Mat &mImg3 = *(cv::Mat *) img3_obj;
|
|
|
|
|
//
|
|
|
|
|
// vector<cv::Mat> images;
|
|
|
|
|
// vector<float> times;
|
|
|
|
|
// images.push_back(mImg1);
|
|
|
|
|
// images.push_back(mImg2);
|
|
|
|
|
// images.push_back(mImg3);
|
|
|
|
|
//
|
|
|
|
|
// //曝光时间列表
|
|
|
|
|
// static const float timesArray[] = { time1,time2,time3};
|
|
|
|
|
// times.assign(timesArray, timesArray + 3);
|
|
|
|
|
//
|
|
|
|
|
// //利用中值阈值图(MTB)进行对齐
|
|
|
|
|
// /*Ptr<AlignMTB> alignMTB = createAlignMTB();
|
|
|
|
|
// alignMTB->process(images, images);
|
|
|
|
|
//
|
|
|
|
|
// //恢复相机响应函数
|
|
|
|
|
// Mat responce;
|
|
|
|
|
// Ptr<CalibrateDebevec> calibratedebevec = createCalibrateDebevec();
|
|
|
|
|
// calibratedebevec->process(images, responce, times);*/
|
|
|
|
|
//
|
|
|
|
|
// //融合
|
|
|
|
|
// cv::Mat hdrMat;
|
|
|
|
|
// Ptr<MergeDebevec> mergedebevec = createMergeDebevec();
|
|
|
|
|
// mergedebevec->process(images, hdrMat, times);
|
|
|
|
|
//
|
|
|
|
|
// cv::Mat sdr;
|
|
|
|
|
// float gamma = 1.0f;
|
|
|
|
|
// Ptr<Tonemap> tonemap = createTonemap(gamma);
|
|
|
|
|
// tonemap->process(hdrMat, hdrMat);
|
|
|
|
|
//
|
|
|
|
|
// hdrMat = hdrMat * 255;
|
|
|
|
|
// hdrMat.convertTo(*(Mat *)hdrImg, CV_8UC3);
|
|
|
|
|
//
|
|
|
|
|
// //hdrImg = cv::normalize(images, None, 0, 255, cv::NORM_MINMAX, cv::CV_8UC3)
|
|
|
|
|
//
|
|
|
|
|
// /*Mat fusion;
|
|
|
|
|
// Ptr<MergeMertens> merge_mertens = createMergeMertens();
|
|
|
|
|
// merge_mertens->process(images, *(Mat *)hdrImg);*/
|
|
|
|
|
//}
|
|
|
|
|
extern "C"
|
|
|
|
|
JNIEXPORT void JNICALL
|
|
|
|
|
Java_com_xypower_mppreview_MainActivity_test(JNIEnv *env, jobject thiz) {
|
|
|
|
|
// TODO: implement test()
|
|
|
|
|
}
|
|
|
|
|
extern "C"
|
|
|
|
|
JNIEXPORT jboolean JNICALL
|
|
|
|
|
Java_com_xypower_mppreview_Camera2RawFragment_makeHdr3(JNIEnv *env, jclass clazz,
|
|
|
|
|
jlong exposureTime1, jobject img1, jint length1,
|
|
|
|
|
jlong exposureTime2, jobject img2, jint length2,
|
|
|
|
|
jstring outputPath) {
|
|
|
|
|
|
|
|
|
|
ALOGI("Start HDR3");
|
|
|
|
|
|
|
|
|
|
std::vector<cv::Mat> images;
|
|
|
|
|
images.resize(2);
|
|
|
|
|
|
|
|
|
|
std::vector<jobject> bitmaps;
|
|
|
|
|
bitmaps.push_back(img1);
|
|
|
|
|
bitmaps.push_back(img2);
|
|
|
|
|
|
|
|
|
|
ALOGI("Start Decode");
|
|
|
|
|
// omp_set_num_threads(2);
|
|
|
|
|
// #pragma omp parallel for num_threads(2)
|
|
|
|
|
for (int idx = 0; idx < 2; idx++)
|
|
|
|
|
{
|
|
|
|
|
AndroidBitmapInfo bmpInfo = { 0 };
|
|
|
|
|
int result = AndroidBitmap_getInfo(env, bitmaps[idx], &bmpInfo);
|
|
|
|
|
|
|
|
|
|
if ((ANDROID_BITMAP_FLAGS_IS_HARDWARE & bmpInfo.flags) == ANDROID_BITMAP_FLAGS_IS_HARDWARE)
|
|
|
|
|
{
|
|
|
|
|
AHardwareBuffer* hardwareBuffer = NULL;
|
|
|
|
|
result = AndroidBitmap_getHardwareBuffer(env, bitmaps[idx], &hardwareBuffer);
|
|
|
|
|
|
|
|
|
|
void* outVirtualAddress = NULL;
|
|
|
|
|
int32_t fence = -1;
|
|
|
|
|
result = AHardwareBuffer_lock(hardwareBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_RARELY, fence, NULL, &outVirtualAddress);
|
|
|
|
|
cv::Mat tmp(bmpInfo.height, bmpInfo.width, CV_8UC4, outVirtualAddress);
|
|
|
|
|
AHardwareBuffer_unlock(hardwareBuffer, &fence);
|
|
|
|
|
tmp.copyTo(images[idx]);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
void* outAddress = NULL;
|
|
|
|
|
result = AndroidBitmap_lockPixels(env, bitmaps[idx], &outAddress);
|
|
|
|
|
cv::Mat tmp(bmpInfo.height, bmpInfo.width, CV_8UC4, outAddress);
|
|
|
|
|
AndroidBitmap_unlockPixels(env, bitmaps[idx]);
|
|
|
|
|
tmp.copyTo(images[idx]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//convert RGB to BGR
|
|
|
|
|
cv::cvtColor(images[idx], images[idx], cv::COLOR_RGB2BGR);
|
|
|
|
|
|
|
|
|
|
// ConvertDngToPng(pngDatas[idx], pngLengths[idx], images[idx]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ALOGI("End Decode");
|
|
|
|
|
cv::Mat rgb;
|
|
|
|
|
|
|
|
|
|
std::vector<float> times;
|
|
|
|
|
times.push_back((double)(exposureTime1) / 1000000000.0);
|
|
|
|
|
times.push_back((double)(exposureTime2) / 1000000000.0);
|
|
|
|
|
|
|
|
|
|
ALOGI("Start MakeHDR3");
|
|
|
|
|
makeHdr(times, images, rgb);
|
|
|
|
|
ALOGI("End MakeHDR3");
|
|
|
|
|
|
|
|
|
|
std::string fileName = jstring2string(env, outputPath);
|
|
|
|
|
std::vector<int> params;
|
|
|
|
|
params.push_back(cv::IMWRITE_JPEG_QUALITY);
|
|
|
|
|
params.push_back(100);
|
|
|
|
|
if (cv::imwrite(fileName.c_str(), rgb, params))
|
|
|
|
|
{
|
|
|
|
|
ALOGI("End HDR3");
|
|
|
|
|
return JNI_TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// env->DeleteGlobalRef(img1);
|
|
|
|
|
// env->DeleteGlobalRef(img2);
|
|
|
|
|
|
|
|
|
|
return JNI_FALSE;
|
|
|
|
|
}
|
|
|
|
|
extern "C"
|
|
|
|
|
JNIEXPORT jboolean JNICALL
|
|
|
|
|
Java_com_xypower_mppreview_Camera2RawFragment_decodeDng(JNIEnv *env, jclass clazz,
|
|
|
|
|
jobject byte_buffer, jstring output_path) {
|
|
|
|
|
// TODO: implement decodeDng()
|
|
|
|
|
}
|