You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
MpPreview/app/src/main/cpp/MpPreview.cpp

570 lines
18 KiB
C++

#include <jni.h>
#include <string>
#include <vector>
// #include "ncnn/yolov5ncnn.h"
#include <fcntl.h>
#include <unistd.h>
#include <omp.h>
#include <android/imagedecoder.h>
#include <android/log.h>
#include <media/NdkImage.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "hdr.h"
#include "BmpLoader.h"
#include "vulkan_hdr_generator.h"
namespace cv2
{
using namespace cv;
Mat linearResponseNew(int channels)
{
Mat response = Mat(LDR_SIZE, 1, CV_MAKETYPE(CV_32F, channels));
for(int i = 0; i < LDR_SIZE; i++) {
response.at<Vec3f>(i) = Vec3f::all(static_cast<float>(i));
}
return response;
}
Mat triangleWeightsNew()
{
// hat function
Mat w(LDR_SIZE, 1, CV_32F);
int half = LDR_SIZE / 2;
for(int i = 0; i < LDR_SIZE; i++) {
w.at<float>(i) = i < half ? i + 1.0f : LDR_SIZE - i;
}
return w;
}
class CV_EXPORTS_W MergeExposuresNew
{
public:
virtual ~MergeExposuresNew() {}
/** @brief Merges images.
@param src vector of input images
@param dst result image
@param times vector of exposure time values for each image
@param response 256x1 matrix with inverse camera response function for each pixel value, it should
have the same number of channels as images.
*/
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
InputArray times, InputArray response) = 0;
};
class CV_EXPORTS_W MergeDebevecNew : public MergeExposuresNew
{
public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
InputArray times, InputArray response) CV_OVERRIDE = 0;
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
};
class MergeDebevecImplNew CV_FINAL : public MergeDebevecNew
{
public:
MergeDebevecImplNew() :
name("MergeDebevecNew"),
weights(triangleWeightsNew())
{
}
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response) CV_OVERRIDE
{
// CV_INSTRUMENT_REGION();
ALOGD("HDR Merge 1");
std::vector<Mat> images;
src.getMatVector(images);
Mat times = _times.getMat();
#if 0
CV_Assert(images.size() == times.total());
checkImageDimensions(images);
CV_Assert(images[0].depth() == CV_8U);
#endif
int channels = images[0].channels();
Size size = images[0].size();
int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
ALOGD("HDR Merge 2");
dst.create(images[0].size(), CV_32FCC);
Mat result = dst.getMat();
Mat response = input_response.getMat();
if(response.empty()) {
response = linearResponseNew(channels);
response.at<Vec3f>(0) = response.at<Vec3f>(1);
}
ALOGD("HDR Merge 3");
Mat log_response;
log(response, log_response);
CV_Assert(log_response.rows == LDR_SIZE && log_response.cols == 1 &&
log_response.channels() == channels);
Mat exp_values(times.clone());
log(exp_values, exp_values);
ALOGD("HDR Merge 4");
result = Mat::zeros(size, CV_32FCC);
std::vector<Mat> result_split;
split(result, result_split);
Mat weight_sum = Mat::zeros(size, CV_32F);
ALOGD("HDR Merge 5");
for(size_t i = 0; i < images.size(); i++) {
std::vector<Mat> splitted;
split(images[i], splitted);
ALOGD("HDR Merge 5 - 1");
Mat w = Mat::zeros(size, CV_32F);
for(int c = 0; c < channels; c++) {
LUT(splitted[c], weights, splitted[c]);
w += splitted[c];
}
w /= channels;
ALOGD("HDR Merge 5 - 2");
Mat response_img;
LUT(images[i], log_response, response_img);
split(response_img, splitted);
// #pragma omp parallel for num_threads(channels)
for(int c = 0; c < channels; c++) {
//这里崩溃
result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i));
}
weight_sum += w;
ALOGD("HDR Merge 5 - 3");
}
ALOGD("HDR Merge 6");
weight_sum = 1.0f / weight_sum;
for(int c = 0; c < channels; c++) {
result_split[c] = result_split[c].mul(weight_sum);
}
ALOGD("HDR Merge 7");
merge(result_split, result);
exp(result, result);
ALOGD("HDR Merge 8");
}
void process(InputArrayOfArrays src, OutputArray dst, InputArray times) CV_OVERRIDE
{
// CV_INSTRUMENT_REGION();
process(src, dst, times, Mat());
}
protected:
String name;
Mat weights;
};
Ptr<MergeDebevecNew> createMergeDebevecNew()
{
return makePtr<MergeDebevecImplNew>();
}
class CV_EXPORTS_W TonemapReinhardNew
{
public:
CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;
virtual ~TonemapReinhardNew() {}
CV_WRAP virtual float getGamma() const = 0;
CV_WRAP virtual void setGamma(float gamma) = 0;
CV_WRAP virtual float getIntensity() const = 0;
CV_WRAP virtual void setIntensity(float intensity) = 0;
CV_WRAP virtual float getLightAdaptation() const = 0;
CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0;
CV_WRAP virtual float getColorAdaptation() const = 0;
CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
};
inline void log_(const Mat& src, Mat& dst)
{
max(src, Scalar::all(1e-4), dst);
log(dst, dst);
}
class TonemapReinhardImpl CV_FINAL : public TonemapReinhardNew
{
public:
TonemapReinhardImpl(float _gamma, float _intensity, float _light_adapt, float _color_adapt) :
name("TonemapReinhardNew"),
gamma(_gamma),
intensity(_intensity),
light_adapt(_light_adapt),
color_adapt(_color_adapt)
{
}
void process(InputArray _src, OutputArray _dst) CV_OVERRIDE
{
ALOGD("HDR 1 ");
Mat src = _src.getMat();
CV_Assert(!src.empty());
_dst.create(src.size(), CV_32FC3);
Mat img = _dst.getMat();
Ptr<Tonemap> linear = createTonemap(1.0f);
linear->process(src, img);
ALOGD("HDR 2 ");
Mat gray_img;
cvtColor(img, gray_img, COLOR_RGB2GRAY);
Mat log_img;
log_(gray_img, log_img);
float log_mean = static_cast<float>(sum(log_img)[0] / log_img.total());
double log_min, log_max;
minMaxLoc(log_img, &log_min, &log_max);
log_img.release();
ALOGD("HDR 3 ");
double key = static_cast<float>((log_max - log_mean) / (log_max - log_min));
float map_key = 0.3f + 0.7f * pow(static_cast<float>(key), 1.4f);
intensity = exp(-intensity);
Scalar chan_mean = mean(img);
float gray_mean = static_cast<float>(mean(gray_img)[0]);
std::vector<Mat> channels(3);
split(img, channels);
ALOGD("HDR 4 ");
#pragma omp parallel for num_threads(3)
for (int i = 0; i < 3; i++) {
float global = color_adapt * static_cast<float>(chan_mean[i]) + (1.0f - color_adapt) * gray_mean;
Mat adapt = color_adapt * channels[i] + (1.0f - color_adapt) * gray_img;
adapt = light_adapt * adapt + (1.0f - light_adapt) * global;
pow(intensity * adapt, map_key, adapt);
//这里崩溃
channels[i] = channels[i].mul(1.0f/(adapt + channels[i]));
}
gray_img.release();
merge(channels, img);
ALOGD("HDR 5 ");
linear->setGamma(gamma);
linear->process(img, img);
ALOGD("HDR 6 ");
}
float getGamma() const CV_OVERRIDE { return gamma; }
void setGamma(float val) CV_OVERRIDE { gamma = val; }
float getIntensity() const CV_OVERRIDE { return intensity; }
void setIntensity(float val) CV_OVERRIDE { intensity = val; }
float getLightAdaptation() const CV_OVERRIDE { return light_adapt; }
void setLightAdaptation(float val) CV_OVERRIDE { light_adapt = val; }
float getColorAdaptation() const CV_OVERRIDE { return color_adapt; }
void setColorAdaptation(float val) CV_OVERRIDE { color_adapt = val; }
void write(FileStorage& fs) const
{
#if 0
writeFormat(fs);
fs << "name" << name
<< "gamma" << gamma
<< "intensity" << intensity
<< "light_adapt" << light_adapt
<< "color_adapt" << color_adapt;
#endif
}
void read(const FileNode& fn)
{
#if 0
FileNode n = fn["name"];
CV_Assert(n.isString() && String(n) == name);
gamma = fn["gamma"];
intensity = fn["intensity"];
light_adapt = fn["light_adapt"];
color_adapt = fn["color_adapt"];
#endif
}
protected:
String name;
float gamma, intensity, light_adapt, color_adapt;
};
Ptr<TonemapReinhardNew> createTonemapReinhardNew(float gamma, float contrast, float sigma_color, float sigma_space)
{
return makePtr<TonemapReinhardImpl>(gamma, contrast, sigma_color, sigma_space);
}
};
bool AndroidBitmap_CompressWriteFile(void *userContext, const void *data, size_t size)
{
int file = (int)((size_t)userContext);
int bytesWritten = write(file, data, size);
return bytesWritten == size;
}
bool AndroidBitmap_CompressWriteBuffer(void *userContext, const void *data, size_t size)
{
std::vector<uint8_t>* buffer = (std::vector<uint8_t>*)userContext;
// int bytesWritten = write(file, data, size);
const uint8_t* pBytes = (const uint8_t*)data;
buffer->insert(buffer->cend(), pBytes, pBytes + size);
return true;
}
inline std::string jstring2string(JNIEnv *env, jstring jStr)
{
if (!jStr)
return "";
const jclass stringClass = env->GetObjectClass(jStr);
const jmethodID getBytes = env->GetMethodID(stringClass, "getBytes", "(Ljava/lang/String;)[B");
const jbyteArray stringJbytes = (jbyteArray) env->CallObjectMethod(jStr, getBytes, env->NewStringUTF("UTF-8"));
size_t length = (size_t) env->GetArrayLength(stringJbytes);
jbyte* pBytes = env->GetByteArrayElements(stringJbytes, NULL);
std::string ret = std::string((char *)pBytes, length);
env->ReleaseByteArrayElements(stringJbytes, pBytes, JNI_ABORT);
env->DeleteLocalRef(stringJbytes);
env->DeleteLocalRef(stringClass);
return ret;
}
bool makeHdr(std::vector<float>& times, std::vector<std::string>& paths, cv::Mat& rgb)
{
// Read images and exposure times
std::vector<cv::Mat> images;
images.resize(paths.size());
#pragma omp parallel for
for (int idx = 0; idx < paths.size(); idx++)
{
images[idx] = cv::imread(paths[idx].c_str());
}
// Align input images
// cout << "Aligning images ... " << endl;
cv::Ptr<cv::AlignMTB> alignMTB = cv::createAlignMTB();
#if 0
alignMTB->process(images, images);
#endif
// Obtain Camera Response Function (CRF)
// cout << "Calculating Camera Response Function (CRF) ... " << endl;
cv::Mat responseDebevec;
cv::Ptr<cv::CalibrateDebevec> calibrateDebevec = cv::createCalibrateDebevec();
calibrateDebevec->process(images, responseDebevec, times);
// Merge images into an HDR linear image
// cout << "Merging images into one HDR image ... ";
cv::Mat hdrDebevec;
cv::Ptr<cv::MergeDebevec> mergeDebevec = cv::createMergeDebevec();
mergeDebevec->process(images, hdrDebevec, times, responseDebevec);
// Save HDR image.
// imwrite((OUTPUT_DIR "hdrDebevec.hdr"), hdrDebevec);
// cout << "saved hdrDebevec.hdr " << endl;
{
std::vector<cv::Mat> empty;
empty.swap(images);
}
// Tonemap using Reinhard's method to obtain 24-bit color image
// cout << "Tonemaping using Reinhard's method ... ";
cv::Mat ldrReinhard;
cv::Ptr<cv::TonemapReinhard> tonemapReinhard = cv::createTonemapReinhard(1.5, 0, 0, 0);
tonemapReinhard->process(hdrDebevec, ldrReinhard);
hdrDebevec.release();
int type = ldrReinhard.type();
ldrReinhard = ldrReinhard * 255;
ldrReinhard.convertTo(rgb, CV_8U);
ldrReinhard.release();
return true;
}
extern "C"
JNIEXPORT jboolean JNICALL
Java_com_xypower_mppreview_Camera2RawFragment_makeHdr3(JNIEnv *env, jclass clazz,
jlong exposureTime1, jobject img1, jint length1,
jlong exposureTime2, jobject img2, jint length2,
jstring outputPath) {
ALOGI("Start HDR3");
std::vector<cv::Mat> images;
images.resize(2);
std::vector<jobject> bitmaps;
bitmaps.push_back(img1);
bitmaps.push_back(img2);
ALOGI("Start Decode");
// omp_set_num_threads(2);
// #pragma omp parallel for num_threads(2)
for (int idx = 0; idx < 2; idx++)
{
AndroidBitmapInfo bmpInfo = { 0 };
int result = AndroidBitmap_getInfo(env, bitmaps[idx], &bmpInfo);
if ((ANDROID_BITMAP_FLAGS_IS_HARDWARE & bmpInfo.flags) == ANDROID_BITMAP_FLAGS_IS_HARDWARE)
{
#if 0
AHardwareBuffer* hardwareBuffer = NULL;
result = AndroidBitmap_getHardwareBuffer(env, bitmaps[idx], &hardwareBuffer);
void* outVirtualAddress = NULL;
int32_t fence = -1;
result = AHardwareBuffer_lock(hardwareBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_RARELY, fence, NULL, &outVirtualAddress);
cv::Mat tmp(bmpInfo.height, bmpInfo.width, CV_8UC4, outVirtualAddress);
tmp.copyTo(images[idx]);
AHardwareBuffer_unlock(hardwareBuffer, &fence);
AHardwareBuffer_release(hardwareBuffer);
#endif
}
else
{
void* outAddress = NULL;
result = AndroidBitmap_lockPixels(env, bitmaps[idx], &outAddress);
cv::Mat tmp(bmpInfo.height, bmpInfo.width, CV_8UC4, outAddress);
tmp.copyTo(images[idx]);
AndroidBitmap_unlockPixels(env, bitmaps[idx]);
tmp.release();
}
//convert RGB to BGR
cv::cvtColor(images[idx], images[idx], cv::COLOR_RGB2BGR);
}
bitmaps.clear();
env->DeleteLocalRef(img1);
env->DeleteLocalRef(img2);
ALOGI("End Decode");
cv::Mat rgb;
std::vector<float> times;
times.push_back((double)(exposureTime1) / 1000000000.0);
times.push_back((double)(exposureTime2) / 1000000000.0);
ALOGI("Start MakeHDR3");
makeHdr(times, images, rgb);
ALOGI("End MakeHDR3");
std::string fileName = jstring2string(env, outputPath);
std::vector<int> params;
params.push_back(cv::IMWRITE_JPEG_QUALITY);
params.push_back(100);
if (cv::imwrite(fileName.c_str(), rgb, params))
{
rgb.release();
// images[0].release();
// images[1].release();
// images.clear();
ALOGI("End HDR3");
return JNI_TRUE;
}
// env->DeleteGlobalRef(img1);
// env->DeleteGlobalRef(img2);
return JNI_FALSE;
}
extern "C"
JNIEXPORT jboolean JNICALL
Java_com_xypower_mppreview_Camera2RawFragment_decodeDng(JNIEnv *env, jclass clazz,
jobject byte_buffer, jstring output_path) {
// TODO: implement decodeDng()
}
extern "C"
JNIEXPORT jboolean JNICALL
Java_com_xypower_mppreview_Camera2RawFragment_makeHdr5(JNIEnv *env, jclass clazz,
jstring compFile,
jlong exposureTime1, jstring img1,
jlong exposureTime2, jstring img2,
jstring outputPath) {
std::string compFilePath = jstring2string(env, compFile);
std::string outputFile = jstring2string(env, outputPath);
std::vector<std::string> inputFiles;
std::vector<float> exposureValues;
inputFiles.push_back(jstring2string(env, img1));
inputFiles.push_back(jstring2string(env, img2));
exposureValues.push_back((double)(exposureTime1) / 1000000000.0);
exposureValues.push_back((double)(exposureTime2) / 1000000000.0);
if (inputFiles.empty()) {
// std::cerr << "No input BMP files specified" << std::endl;
return JNI_FALSE;
}
// std::cout << "Processing " << inputFiles.size() << " images..." << std::endl;
for (size_t i = 0; i < inputFiles.size(); i++) {
// std::cout << " " << inputFiles[i] << " (exposure: " << exposureValues[i] << ")" << std::endl;
}
try {
// Compile shader if needed (in a production app, you'd precompile these)
#ifdef _WIN32
// system("glslc.exe shaders/hdr_merge.comp -o shaders/hdr_merge.comp.spv");
#else
// system("glslc shaders/hdr_merge.comp -o shaders/hdr_merge.comp.spv");
#endif
// Create HDR generator and process images
VulkanHdrGenerator generator(compFilePath);
// Process with 256x256 tiles to keep memory usage low
bool success = generator.generateHdr(inputFiles, outputFile, exposureValues, 256, 256);
if (success) {
// std::cout << "HDR image successfully created: " << outputFile << std::endl;
return JNI_TRUE;
} else {
// std::cerr << "Failed to generate HDR image" << std::endl;
return JNI_FALSE;
}
} catch (const std::exception& e)
{
ALOGE("Error: %s", e.what());
return JNI_FALSE;
}
return JNI_FALSE;
}