Compare commits

..

3 Commits

@ -4,8 +4,8 @@ plugins {
// 10,00,000 major-minor-build
def AppMajorVersion = 1
def AppMinorVersion = 3
def AppBuildNumber = 196
def AppMinorVersion = 1
def AppBuildNumber = 1
def AppVersionName = AppMajorVersion + "." + AppMinorVersion + "." + AppBuildNumber
def AppVersionCode = AppMajorVersion * 100000 + AppMinorVersion * 1000 + AppBuildNumber
@ -36,11 +36,11 @@ android {
externalNativeBuild {
cmake {
// cppFlags '-std=c++17 -frtti -fexceptions -Wno-error=format-security'
cppFlags '-std=c++17 -fexceptions -Wno-error=format-security -fopenmp'
cppFlags '-std=c++17 -fexceptions -Wno-error=format-security'
// cppFlags '-std=c++17 -Wno-error=format-security'
// arguments "-DANDROID_STL=c++_shared"
arguments "-DNCNN_DISABLE_EXCEPTION=OFF", "-DTERM_CORE_ROOT=" + coreroot, "-DOpenCV_DIR=" + opencvsdk + "/sdk/native/jni", "-DHDRPLUS_ROOT=" + hdrplusroot, "-DNCNN_ROOT=" + ncnnroot, "-DHALIDE_ROOT=" + halideroot
abiFilters 'arm64-v8a', 'armeabi-v7a'
arguments "-DNCNN_DISABLE_EXCEPTION=OFF", "-DTERM_CORE_ROOT=" + coreroot, "-DOpenCV_DIR=" + opencvsdk + "/sdk/native/jni", "-DASIO_ROOT=" + asioroot, "-DEVPP_ROOT=" + evpproot, "-DNCNN_ROOT=" + ncnnroot
// abiFilters 'arm64-v8a', 'armeabi-v7a'
// setAbiFilters(['arm64-v8a'])
}
}
@ -52,7 +52,6 @@ android {
proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
}
debug {
minifyEnabled false
jniDebuggable true
testCoverageEnabled false
}
@ -82,7 +81,6 @@ android {
enable isReleaseTask
reset()
include "armeabi-v7a", "arm64-v8a"
// include "arm64-v8a"
universalApk false
}
}
@ -91,14 +89,12 @@ android {
variant.outputs.all { output ->
if (outputFileName.endsWith('.apk')) {
def buildTypeFlag = "dbg"
def prevFileName = "mpapp"
if(variant.buildType.name.equals('release')) {
buildTypeFlag = "rel"
}
def abi = output.getFilter(com.android.build.OutputFile.ABI)
if (abi == null) abi = "all"
if (abi.contains("v7a")) prevFileName = "N938"
def fileName = "${prevFileName}_v${defaultConfig.versionName}_${buildTypeFlag}_${new Date(System.currentTimeMillis()).format("yyyyMMdd")}.apk"
def fileName = "mpapp_v${defaultConfig.versionName}_${buildTypeFlag}_${new Date(System.currentTimeMillis()).format("yyyyMMdd")}_${abi}.apk"
outputFileName = fileName
}
}
@ -117,19 +113,15 @@ android {
exclude 'META-INF/LICENSE-notice.md'
exclude 'META-INF/LICENSE.md'
jniLibs {
useLegacyPackaging true
}
}
}
dependencies {
implementation 'androidx.legacy:legacy-support-v4:1.0.0'
implementation 'androidx.legacy:legacy-support-v13:1.0.0'
// implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version"
implementation 'androidx.appcompat:appcompat:1.0.0'
// implementation "androidx.core:core:1.10.0" // 使
implementation 'androidx.fragment:fragment:1.3.6'
implementation 'androidx.constraintlayout:constraintlayout:2.1.4'
implementation 'com.google.android.material:material:1.8.0'
implementation project(path: ':common')

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -1,8 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:sharedUserId="com.xypower.mp"
tools:ignore="Deprecated">
xmlns:tools="http://schemas.android.com/tools">
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
@ -12,10 +10,9 @@
<uses-permission android:name="android.permission.CHANGE_WIFI_STATE" />
<uses-permission android:name="android.permission.RECEIVE_BOOT_COMPLETED" />
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.MANAGE_NETWORK_POLICY"
tools:ignore="ProtectedPermissions" />
<uses-permission
android:name="android.permission.READ_PRIVILEGED_PHONE_STATE"
tools:ignore="ProtectedPermissions" />
@ -58,7 +55,6 @@
<uses-permission android:name="android.permission.SYSTEM_ALERT_WINDOW" />
<uses-permission android:name="android.permission.WAKE_LOCK" />
<uses-permission android:name="android.permission.DISABLE_KEYGUARD" />
<uses-permission android:name="android.permission.USB_PERMISSION" />
<uses-permission
android:name="android.permission.DEVICE_POWER"
tools:ignore="ProtectedPermissions" />
@ -67,23 +63,14 @@
tools:ignore="ProtectedPermissions" />
<uses-permission
android:name="android.permission.START_ACTIVITIES_FROM_BACKGROUND"
tools:ignore="ProtectedPermissions" />
<uses-permission android:name="android.permission.KILL_BACKGROUND_PROCESSES" />
tools:ignore="ProtectedPermissions" /> <!-- WiFi AP startTethering -->
<uses-permission
android:name="android.permission.TETHER_PRIVILEGED"
tools:ignore="ProtectedPermissions" />
<uses-permission android:name="android.permission.CONNECTIVITY_INTERNAL"
tools:ignore="ProtectedPermissions" />
<uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="com.mediatek.camera.feature.mfnr" />
<uses-permission android:name="android.hardware.usb.accessory" />
<uses-feature android:name="android.hardware.usb.host" />
<uses-feature
android:name="android.hardware.telephony"
android:required="false" />
<queries>
<provider
@ -92,17 +79,6 @@
android:enabled="true"
android:exported="false"
android:grantUriPermissions="true" />
<intent>
<action android:name="android.media.action.IMAGE_CAPTURE" />
</intent>
<intent>
<action android:name="android.media.action.STILL_IMAGE_CAMERA" />
</intent>
<intent>
<action android:name="android.intent.action.TIME_CHANGED" />
</intent>
<package android:name="com.xypower.mplive" />
</queries>
<application
@ -116,14 +92,6 @@
android:supportsRtl="true"
android:theme="@style/Theme.MicroPhoto"
tools:targetApi="28">
<activity
android:name=".LogActivity"
android:exported="false"
android:screenOrientation="landscape" />
<activity
android:name=".video.RawActivity"
android:exported="false"
android:screenOrientation="landscape" />
<activity
android:name=".StreamActivity"
android:exported="false"
@ -174,10 +142,11 @@
<category android:name="android.intent.category.default" />
</intent-filter>
</service>
<service android:name=".FloatingWindow" />
<receiver
android:name=".MicroPhotoService$AlarmReceiver"
android:exported="true" >
</receiver>
android:exported="true" />
<receiver
android:name=".BootBroadcastReceiver"
android:enabled="true"
@ -191,7 +160,17 @@
</intent-filter>
</receiver>
<receiver android:name=".NetworkChangedReceiver" />
<receiver
android:name=".ScreenActionReceiver"
android:exported="true">
<intent-filter android:priority="90000">
<action android:name="android.intent.action.USER_PRESENT" />
<action android:name="android.intent.action.BOOT_COMPLETED" />
<action android:name="android.intent.action.SCREEN_ON" />
<action android:name="android.intent.action.USER_PRESENT" />
<action android:name="android.intent.action.USER_UNLOCKED" />
</intent-filter>
</receiver>
<receiver
android:name="com.xypower.common.UpdateReceiver"
android:enabled="true"
@ -204,17 +183,11 @@
<data android:scheme="package" />
</intent-filter>
</receiver>
<receiver
android:name=".HeartBeatResponseReceiver"
android:enabled="true"
android:exported="true">
<intent-filter >
<action android:name="com.systemui.ACTION_HEARTBEAT_RESPONSE" />
</intent-filter>
</receiver>
<activity
android:name=".MainActivity"
android:exported="true"
android:launchMode="singleTop"
android:screenOrientation="landscape">
<intent-filter>
<action android:name="android.intent.action.MAIN" />

@ -1,227 +0,0 @@
#!/system/bin/sh
# ==============================================
# Configuration parameters - modify as needed
# ==============================================
ETH_IP="192.168.68.91" # Ethernet IP address
ETH_NETMASK="24" # Subnet mask (CIDR format)
ETH_NETWORK="192.168.68.0" # Network address
ETH_BROADCAST="192.168.68.255" # Broadcast address
ETH_GATEWAY="192.168.68.1" # Default gateway
ROUTE_TABLE="20" # Routing table number
MAX_INIT_WAIT=150 # Maximum seconds to wait for ethernet interface
MAX_UP_WAIT=10 # Maximum seconds to wait for interface to come UP
MAX_ROUTE_WAIT=5 # Maximum seconds to wait for routing rules
# For debugging only - comment out in production
# set -x
ANDROID_VERSION=$(getprop ro.build.version.release 2>/dev/null | cut -d '.' -f1)
# Record script start time
SCRIPT_START=$(date +%s)
# Cleanup function - handles unexpected interruptions
cleanup() {
echo "Script interrupted, cleaning up..." >&2
# Add additional cleanup code here if needed
exit 1
}
trap cleanup INT TERM
# Get script directory for finding tools like ethtool
SCRIPT_PATH="$0"
# Ensure path is absolute
case "$SCRIPT_PATH" in
/*) ;; # Already absolute path
*) SCRIPT_PATH="$PWD/$SCRIPT_PATH" ;;
esac
SCRIPT_DIR=$(dirname "$SCRIPT_PATH")
echo "Script directory detected as: $SCRIPT_DIR"
# Only configure rp_filter for eth0 interface
echo 0 > /proc/sys/net/ipv4/conf/eth0/rp_filter 2>/dev/null || true
# Wait for eth0 interface to appear
WAITED=0
while [ $WAITED -lt $MAX_INIT_WAIT ]; do
if [ -d "/sys/class/net/eth0" ]; then
echo "eth0 found after $WAITED seconds"
break
fi
echo "Wait eth0... ($WAITED/$MAX_INIT_WAIT)"
sleep 0.1
WAITED=$((WAITED+1))
done
# Check if eth0 exists
if ! [ -d "/sys/class/net/eth0" ]; then
echo "Error: eth0 not exists" >&2
exit 1
fi
# Check physical connection status
if [ -f "/sys/class/net/eth0/carrier" ]; then
CARRIER=$(cat /sys/class/net/eth0/carrier)
echo "Physical connection status: $CARRIER (1=connected, 0=disconnected)"
if [ "$CARRIER" != "1" ]; then
echo "Warning: Ethernet physical connection may have issues, please check the cable" >&2
fi
fi
# Clear previous configuration
/system/bin/ip link set eth0 down
/system/bin/ip addr flush dev eth0
/system/bin/ip route flush dev eth0
/system/bin/ip route flush table $ROUTE_TABLE
/system/bin/ip rule del to $ETH_NETWORK/$ETH_NETMASK 2>/dev/null || true
# Configure physical layer with ethtool (while interface is DOWN)
if [ -x "$SCRIPT_DIR/ethtool" ]; then
echo "Using ethtool from script directory: $SCRIPT_DIR/ethtool"
"$SCRIPT_DIR/ethtool" -s eth0 speed 10 duplex full autoneg off
# Try alternative path next
elif [ -x "/data/data/com.xypower.mpapp/files/ethtool" ]; then
echo "Configuring eth0 to 10Mbps full duplex..."
/data/data/com.xypower.mpapp/files/ethtool -s eth0 speed 10 duplex full autoneg off
else
echo "Warning: ethtool not found, falling back to sysfs configuration" >&2
# Try sysfs configuration as fallback
if [ -f "/sys/class/net/eth0/speed" ]; then
echo "off" > /sys/class/net/eth0/autoneg 2>/dev/null || true
echo "10" > /sys/class/net/eth0/speed 2>/dev/null || true
echo "full" > /sys/class/net/eth0/duplex 2>/dev/null || true
fi
fi
# ====================================================
# MTK Android 9 IP configuration with loss prevention
# ====================================================
# Configure IP address first while interface is DOWN
echo "Setting IP address while interface is DOWN..."
/system/bin/ip addr add $ETH_IP/$ETH_NETMASK broadcast $ETH_BROADCAST dev eth0
PRE_UP_IP=$(/system/bin/ip addr show eth0 | grep -c "inet $ETH_IP")
echo "IP configuration before UP: $PRE_UP_IP (1=configured, 0=missing)"
# Enable interface and wait for UP
echo "Bringing up interface..."
/system/bin/ip link set eth0 up
if [ "$ANDROID_VERSION" = "9" ]; then
sleep 3
else
# Use standard configuration for other devices
sleep 1
fi
# Check if IP was lost after interface UP (common issue on MTK devices)
POST_UP_IP=$(/system/bin/ip addr show eth0 | grep -c "inet $ETH_IP")
echo "IP configuration after UP: $POST_UP_IP (1=retained, 0=lost)"
# IP address lost detection and recovery
if [ "$PRE_UP_IP" = "1" ] && [ "$POST_UP_IP" = "0" ]; then
echo "Warning: IP address was lost after bringing interface up - MTK issue detected"
echo "Reapplying IP configuration..."
/system/bin/ip addr add $ETH_IP/$ETH_NETMASK broadcast $ETH_BROADCAST dev eth0
# Check if reapplied configuration worked
FIXED_IP=$(/system/bin/ip addr show eth0 | grep -c "inet $ETH_IP")
echo "IP reapplication result: $FIXED_IP (1=success, 0=still missing)"
# If standard method fails, try MTK-specific approaches
if [ "$FIXED_IP" = "0" ]; then
echo "Standard IP configuration failed, trying MTK-specific methods"
# Try ifconfig if available (works better on some MTK devices)
if command -v ifconfig >/dev/null 2>&1; then
echo "Using ifconfig method..."
ifconfig eth0 $ETH_IP netmask 255.255.255.0 up
sleep 1
fi
# Try Android's netd service if available
if [ -x "/system/bin/ndc" ]; then
echo "Using MTK netd service..."
/system/bin/ndc network interface setcfg eth0 $ETH_IP 255.255.255.0 up
sleep 1
fi
fi
fi
# Use loop to wait for interface UP instead of fixed sleep
WAITED=0
while [ $WAITED -lt $MAX_UP_WAIT ]; do
# Check both link status and IP configuration
IF_STATUS=$(/system/bin/ip link show eth0 | grep -c ",UP")
IP_STATUS=$(/system/bin/ip addr show eth0 | grep -c "inet $ETH_IP")
if [ "$IF_STATUS" = "1" ] && [ "$IP_STATUS" = "1" ]; then
echo "Interface is UP with correct IP after $WAITED seconds"
break
fi
echo "Waiting for interface UP with IP... ($WAITED/$MAX_UP_WAIT)"
# If interface is UP but IP is missing, reapply IP
if [ "$IF_STATUS" = "1" ] && [ "$IP_STATUS" = "0" ]; then
echo "Interface UP but IP missing, reapplying IP..."
/system/bin/ip addr add $ETH_IP/$ETH_NETMASK broadcast $ETH_BROADCAST dev eth0
fi
sleep 0.5
WAITED=$((WAITED+1))
done
# Final status check
FINAL_IF_STATUS=$(/system/bin/ip link show eth0 | grep -c ",UP")
FINAL_IP_STATUS=$(/system/bin/ip addr show eth0 | grep -c "inet $ETH_IP")
if [ "$FINAL_IF_STATUS" != "1" ] || [ "$FINAL_IP_STATUS" != "1" ]; then
echo "Warning: Failed to achieve stable interface state with IP" >&2
echo "Final interface status: $FINAL_IF_STATUS (1=UP, 0=DOWN)"
echo "Final IP status: $FINAL_IP_STATUS (1=configured, 0=missing)"
/system/bin/ip addr show eth0
else
echo "Successfully configured eth0 with IP $ETH_IP"
fi
# First add to main routing table
/system/bin/ip route add $ETH_NETWORK/$ETH_NETMASK dev eth0 proto static scope link
# Then add to specified routing table
/system/bin/ip route add $ETH_NETWORK/$ETH_NETMASK dev eth0 proto static scope link table $ROUTE_TABLE
ADD_ROUTE_STATUS=$?
if [ $ADD_ROUTE_STATUS -eq 0 ]; then
echo "Add route successfully"
else
echo "Failed to add route: $ADD_ROUTE_STATUS" >&2
fi
# Only clear ARP and neighbor cache for eth0
/system/bin/ip neigh flush dev eth0
# Add routing rules - only flush cache once after rule is added
/system/bin/ip rule add from all to $ETH_NETWORK/$ETH_NETMASK lookup $ROUTE_TABLE prio 1000
/system/bin/ip route flush cache dev eth0
# Only enable forwarding for eth0 interface
echo 1 > /proc/sys/net/ipv4/conf/eth0/forwarding 2>/dev/null || true
# Wait for routing rules to take effect - using loop check instead of fixed wait
WAITED=0
while [ $WAITED -lt $MAX_ROUTE_WAIT ]; do
if /system/bin/ip rule | grep -q "$ETH_NETWORK/$ETH_NETMASK"; then
echo "Routing rules are now effective after $WAITED seconds"
break
fi
echo "Waiting for routing rules to take effect... ($WAITED/$MAX_ROUTE_WAIT)"
sleep 0.5
WAITED=$((WAITED+1))
done
# Display execution time
SCRIPT_END=$(date +%s)
TOTAL_TIME=$((SCRIPT_END - SCRIPT_START))
echo "Total script execution time: $TOTAL_TIME seconds"
exit 0

Binary file not shown.

Binary file not shown.

@ -14,27 +14,6 @@ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ffunction-sections -fdata-sections -Wformat
set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS}")
# SET_TARGET_PROPERTIES(microphoto PROPERTIES LINK_FLAGS "-Wl,-s,--gc-sections")
add_definitions(-DUSING_ETHERNET)
if(ANDROID_ABI STREQUAL "armeabi-v7a")
add_definitions(-DUSING_N938)
elseif(ANDROID_ABI STREQUAL "arm64-v8a")
# add_definitions(-DUSING_N938)
# add_definitions(-DUSING_PTZ)
endif()
# OUTPUT_DBG_INFO:
add_definitions(-DOUTPUT_DBG_INFO)
# OUTPUT_SOCKET_DBG_INFO Depends ON OUTPUT_DBG_INFO
# TerminalService.cpp
# add_definitions(-DOUTPUT_SOCKET_DBG_INFO)
# OUTPUT_DB_DBG_INFO Depends ON OUTPUT_DBG_INFO
# Database.cpp
# add_definitions(-DOUTPUT_DB_DBG_INFO)
add_definitions(-DUSING_FFMPEG)
IF (CMAKE_BUILD_TYPE STREQUAL Debug)
ADD_DEFINITIONS(-D_DEBUG)
ELSE()
@ -49,22 +28,17 @@ add_definitions(-DASIO_STANDALONE)
add_definitions(-DUSING_XY_EXTENSION)
# add_definitions(-DUSING_BREAK_PAD)
add_definitions(-DSQLITE_THREADSAFE=1)
add_definitions(-DLIBRAW_NO_MEMPOOL_CHECK=1)
# add_definitions(-DHDRPLUS_NO_DETAILED_OUTPUT=1)
add_definitions(-DHAVE_STRING_H) # for memcpy in md5.c
# add_definitions(-DUSING_NRSEC)
# add_definitions(-DUSING_NRSEC_VPN)
add_definitions(-DUSING_NRSEC)
add_definitions(-DUSING_NRSEC_VPN)
# add_definitions(-DUSING_CERT)
# add_definitions(-DUSING_DOWSE)
# OUTPUT_CAMERA_DBG_INFO: CARERA
# add_definitions(-DOUTPUT_CAMERA_DBG_INFO)
add_definitions(-DALIGN_HB_TIMER_TO_PHOTO)
add_definitions(-DENABLE_3V3_ALWAYS)
add_definitions(-DCURL_STATICLIB)
add_definitions(-DUSING_HDRPLUS)
add_definitions(-DUSING_EXEC_HDRP=0)
#set(USING_EXEC_HDRP 1)
#add_definitions(-DUSING_N938)
# include_directories(${OpenCV_DIR}/include)
# add_library( lib_opencv SHARED IMPORTED )
@ -74,7 +48,7 @@ add_definitions(-DUSING_EXEC_HDRP=0)
project("microphoto")
find_package(OpenCV REQUIRED core imgproc highgui photo)
find_package(OpenCV REQUIRED core imgproc highgui)
# find_package(OpenCV REQUIRED core imgproc)
if(OpenCV_FOUND)
include_directories(${OpenCV_INCLUDE_DIRS})
@ -94,115 +68,92 @@ endif(OpenCV_FOUND)
set(ncnn_DIR ${NCNN_ROOT}/${ANDROID_ABI}/lib/cmake/ncnn)
find_package(ncnn REQUIRED)
# include(mars/src/CMakeUtils.txt)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/breakpad)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/libcutils/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/libutils/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/img_utils/include)
SET( IMG_UTILS_SRCS
"img_utils/src/EndianUtils.cpp"
#"img_utils/src/FileInput.cpp"
#"img_utils/src/FileOutput.cpp"
#"img_utils/src/SortedEntryVector.cpp"
"img_utils/src/Input.cpp"
"img_utils/src/Output.cpp"
"img_utils/src/Orderable.cpp"
"img_utils/src/TiffIfd.cpp"
"img_utils/src/TiffWritable.cpp"
"img_utils/src/TiffWriter.cpp"
"img_utils/src/TiffEntry.cpp"
"img_utils/src/TiffEntryImpl.cpp"
"img_utils/src/ByteArrayOutput.cpp"
"img_utils/src/DngUtils.cpp"
"img_utils/src/StripSource.cpp"
libutils/SharedBuffer.cpp
libutils/StrongPointer.cpp
DngCreator.cpp
)
message(WARNING "include_directories ${HDRPLUS_ROOT}/${ANDROID_ABI}/include")
include_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/include)
link_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/lib)
# message(WARNING "exiv2_DIR=${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2")
# SET(exiv2_DIR ${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2)
# list(APPEND CMAKE_PREFIX_PATH ${HDRPLUS_ROOT}/${ANDROID_ABI}/lib/cmake/exiv2)
# find_package(exiv2 REQUIRED CONFIG NAMES exiv2)
# message(STATUS "Found Exiv2 and linked")
SET(YAMC_INC_DIR ${CMAKE_SOURCE_DIR})
# OpenMP
find_package(OpenMP REQUIRED)
# SET(TERM_CORE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../xymp/Core)
#SET(TERM_CORE_ROOT D:/shxy/xymp/Core)
SET(JSONCPP_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jsoncpp)
SET(JSONCPP_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jsoncpp/include)
# library
include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/hdrplus/include )
SET(SQLITE_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sqlite)
SET(SQLITE_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sqlite)
SET(BREAKPAD_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/breakpad)
# include_directories(${HDRPLUS_ROOT}/${ANDROID_ABI}/include)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/hdrplus2)
include_directories(hdrplus2/${ANDROID_ABI})
include_directories(${HALIDE_ROOT}/${ANDROID_ABI}/include)
SET(CAMERA2_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/camera2)
SET(FREETYPE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/freetype)
SET(ZLMEDIAKIT_LIBS "")
SET(STREAMING_SRCS "")
SET(MQTT_ROOT ${TERM_CORE_ROOT}/Client/mqtt)
SET(SSL_LIBRARY_PATH "${TERM_CORE_ROOT}/Client/mqtt/lib/${ANDROID_ABI}/libssl.a")
SET(CRYPTO_LIBRARY_PATH "${TERM_CORE_ROOT}/Client/mqtt/lib/${ANDROID_ABI}/libcrypto.a")
add_definitions(-DDISABLE_RTTI)
# include_directories( ${HDRPLUS_ROOT}/${ANDROID_ABI}/include/ZLMediaKit )
# include_directories( ${HDRPLUS_ROOT}/${ANDROID_ABI}/include/ZLToolKit/src/ )
# SET(ZLMEDIAKIT_LIBS ${ZLMEDIAKIT_LIBS} zlmediakit zltoolkit)
# SET(EVPP_SRC_DIR ${EVPP_ROOT}/evpp)
SET(STREAMING_SRCS media/RTSPToMP4.cpp media/RTSPRecorder.cpp media/Streaming.cpp )
include_directories(${YAMC_INC_DIR})
include_directories(${BREAKPAD_ROOT} ${BREAKPAD_ROOT}/common/android/include)
include_directories(${ASIO_ROOT}/include)
#SET(HDRPLUS_LIBS raw exiv2 exiv2-xmp expat lcms2 OpenMP::OpenMP_CXX)
add_library( # Sets the name of the library.
sqlite3
#SET(HDRPLUS2_LIBS raw raw_r lcms2 tiff tiffxx jpeg hdrplus_pipeline)
# Sets the library as a shared library.
STATIC
SET(HDRPLUS_SOURCES
hdrplus/src/align.cpp
hdrplus/src/bayer_image.cpp
hdrplus/src/burst.cpp
hdrplus/src/finish.cpp
hdrplus/src/hdrplus_pipeline.cpp
hdrplus/src/merge.cpp
hdrplus/src/params.cpp
# Provides a relative path to your source file(s).
${SQLITE_SRC_DIR}/sqlite3.c
)
SET(HDRPLUS2_SOURCES
hdrplus2/src/HDRPlus.cpp
hdrplus2/src/Burst.cpp
hdrplus2/src/InputSource.cpp
hdrplus2/src/LibRaw2DngConverter.cpp
hdrplus2/${ANDROID_ABI}/hdrplus_pipeline.registration.cpp)
SET(HDRPLUS2_SOURCES )
SET(YAMC_INC_DIR ${CMAKE_SOURCE_DIR})
# SET(TERM_CORE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../xymp/Core)
#SET(TERM_CORE_ROOT D:/shxy/xymp/Core)
INCLUDE_DIRECTORIES(${SQLITE_INCLUDE_DIR})
file(GLOB BREAKPAD_SOURCES_COMMON
native-lib.cpp
${BREAKPAD_ROOT}/client/linux/crash_generation/crash_generation_client.cc
${BREAKPAD_ROOT}/client/linux/dump_writer_common/thread_info.cc
${BREAKPAD_ROOT}/client/linux/dump_writer_common/ucontext_reader.cc
${BREAKPAD_ROOT}/client/linux/handler/exception_handler.cc
${BREAKPAD_ROOT}/client/linux/handler/minidump_descriptor.cc
${BREAKPAD_ROOT}/client/linux/log/log.cc
${BREAKPAD_ROOT}/client/linux/microdump_writer/microdump_writer.cc
${BREAKPAD_ROOT}/client/linux/minidump_writer/linux_dumper.cc
${BREAKPAD_ROOT}/client/linux/minidump_writer/linux_ptrace_dumper.cc
${BREAKPAD_ROOT}/client/linux/minidump_writer/minidump_writer.cc
${BREAKPAD_ROOT}/client/linux/minidump_writer/pe_file.cc
${BREAKPAD_ROOT}/client/minidump_file_writer.cc
${BREAKPAD_ROOT}/common/convert_UTF.cc
${BREAKPAD_ROOT}/common/md5.cc
${BREAKPAD_ROOT}/common/string_conversion.cc
${BREAKPAD_ROOT}/common/linux/elfutils.cc
${BREAKPAD_ROOT}/common/linux/file_id.cc
${BREAKPAD_ROOT}/common/linux/guid_creator.cc
${BREAKPAD_ROOT}/common/linux/linux_libc_support.cc
${BREAKPAD_ROOT}/common/linux/memory_mapped_file.cc
${BREAKPAD_ROOT}/common/linux/safe_readlink.cc
)
SET(JSONCPP_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jsoncpp)
SET(JSONCPP_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jsoncpp/include)
file(GLOB BREAKPAD_ASM_SOURCE ${BREAKPAD_ROOT}/common/linux/breakpad_getcontext.S)
set_property(SOURCE ${BREAKPAD_ROOT}/common/linux/breakpad_getcontext.S PROPERTY LANGUAGE C)
# set_source_files_properties(${BREAKPAD_ASM_SOURCE} PROPERTIES LANGUAGE C)
SET(CAMERA2_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/camera2)
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.
SET(FREETYPE_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/freetype)
add_library( # Sets the name of the library.
breakpad
# SET(EVPP_SRC_DIR ${EVPP_ROOT}/evpp)
# Sets the library as a shared library.
STATIC
include_directories(${YAMC_INC_DIR})
include_directories(${ASIO_ROOT}/include)
# Provides a relative path to your source file(s).
${BREAKPAD_SOURCES_COMMON}
${BREAKPAD_ASM_SOURCE}
)
# SET(SQLITE_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sqlite)
# SET(SQLITE_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/sqlite)
# add_library(sqlite3 STATIC ${SQLITE_SRC_DIR}/sqlite3.c )
# INCLUDE_DIRECTORIES(${SQLITE_INCLUDE_DIR})
INCLUDE_DIRECTORIES(${JSONCPP_INCLUDE_DIR})
@ -235,6 +186,63 @@ SET(CAMERA2_SOURCES
# ${CAMERA2_ROOT_DIR}/utils/camera_utils.cpp
${CAMERA2_ROOT_DIR}/ndkcamera.cpp)
SET(MQTT_SOURCES
${MQTT_ROOT}/mosquitto/lib/actions.c
${MQTT_ROOT}/mosquitto/lib/alias_mosq.c
${MQTT_ROOT}/mosquitto/lib/callbacks.c
${MQTT_ROOT}/mosquitto/lib/connect.c
${MQTT_ROOT}/mosquitto/lib/handle_auth.c
${MQTT_ROOT}/mosquitto/lib/handle_connack.c
${MQTT_ROOT}/mosquitto/lib/handle_disconnect.c
${MQTT_ROOT}/mosquitto/lib/handle_ping.c
${MQTT_ROOT}/mosquitto/lib/handle_pubackcomp.c
${MQTT_ROOT}/mosquitto/lib/handle_publish.c
${MQTT_ROOT}/mosquitto/lib/handle_pubrec.c
${MQTT_ROOT}/mosquitto/lib/handle_pubrel.c
${MQTT_ROOT}/mosquitto/lib/handle_suback.c
${MQTT_ROOT}/mosquitto/lib/handle_unsuback.c
${MQTT_ROOT}/mosquitto/lib/helpers.c
${MQTT_ROOT}/mosquitto/lib/logging_mosq.c
${MQTT_ROOT}/mosquitto/lib/loop.c
${MQTT_ROOT}/mosquitto/lib/memory_mosq.c
${MQTT_ROOT}/mosquitto/lib/messages_mosq.c
${MQTT_ROOT}/mosquitto/lib/mosquitto.c
${MQTT_ROOT}/mosquitto/lib/net_mosq_ocsp.c
${MQTT_ROOT}/mosquitto/lib/net_mosq.c
${MQTT_ROOT}/mosquitto/lib/options.c
${MQTT_ROOT}/mosquitto/lib/packet_datatypes.c
${MQTT_ROOT}/mosquitto/lib/packet_mosq.c
${MQTT_ROOT}/mosquitto/lib/property_mosq.c
${MQTT_ROOT}/mosquitto/lib/read_handle.c
${MQTT_ROOT}/mosquitto/lib/send_connect.c
${MQTT_ROOT}/mosquitto/lib/send_disconnect.c
${MQTT_ROOT}/mosquitto/lib/send_mosq.c
${MQTT_ROOT}/mosquitto/lib/send_publish.c
${MQTT_ROOT}/mosquitto/lib/send_subscribe.c
${MQTT_ROOT}/mosquitto/lib/send_unsubscribe.c
${MQTT_ROOT}/mosquitto/lib/socks_mosq.c
${MQTT_ROOT}/mosquitto/lib/srv_mosq.c
${MQTT_ROOT}/mosquitto/lib/thread_mosq.c
${MQTT_ROOT}/mosquitto/lib/time_mosq.c
${MQTT_ROOT}/mosquitto/lib/tls_mosq.c
${MQTT_ROOT}/mosquitto/lib/utf8_mosq.c
${MQTT_ROOT}/mosquitto/lib/util_mosq.c
${MQTT_ROOT}/mosquitto/lib/util_topic.c
${MQTT_ROOT}/mosquitto/lib/will_mosq.c
${MQTT_ROOT}/mosquitto_wrapper.cpp
${MQTT_ROOT}/JNIEnvHandler.cpp
${MQTT_ROOT}/MqttClient.cpp
${MQTT_ROOT}/RespJson.cpp
)
include_directories(
${MQTT_ROOT}
${MQTT_ROOT}/openssl/include
${MQTT_ROOT}/uthash/src
${MQTT_ROOT}/mosquitto
${MQTT_ROOT}/mosquitto/lib)
add_definitions(-DFT2_BUILD_LIBRARY=1)
SET(FREETYPE_SRC_FILES
${FREETYPE_ROOT}/src/autofit/autofit.c
@ -296,39 +304,42 @@ include_directories(${TERM_CORE_ROOT})
add_library( # Sets the name of the library.
jsoncpp
# Sets the library as a shared library.
STATIC
# Provides a relative path to your source file(s).
${JSONCPP_SOURCES}
)
add_definitions(-DDARWIN_NO_CARBON)
add_definitions(-DFT2_BUILD_LIBRARY)
add_library(
mqtt
STATIC
${MQTT_SOURCES}
)
target_compile_definitions(mqtt PRIVATE
WITH_SOCKS
WITH_EC
WITH_UUID
WITH_SYS_TREE
WITH_MEMORY_TRACKING
WITH_PERSISTENCE
WITH_BRIDGE
WITH_THREADING
WITH_TLS_PSK
WITH_TLS
)
add_library(
freetype
STATIC
${FREETYPE_SRC_FILES}
)
if(USING_EXEC_HDRP)
message(WARNING "HDRP Compiled")
add_executable( libhdrp.so
${HDRPLUS_SOURCES}
hdrplus/bin/hdrplus.cpp )
target_link_libraries( libhdrp.so PUBLIC -fopenmp -static-openmp
android z
${OpenCV_LIBS}
# ${LIBRAW_LIBRARY}
${HDRPLUS_LIBS}
)
else(USING_EXEC_HDRP)
endif()
SET(HDRPLUS_SOURCES_EMBED ${HDRPLUS2_SOURCES} )
SET(HDRPLUS_LIBS_EMBED ${HDRPLUS2_LIBS} )
add_library( # Sets the name of the library.
microphoto
@ -338,8 +349,8 @@ add_library( # Sets the name of the library.
# Provides a relative path to your source file(s).
GPIOControl.cpp
MicroPhoto.cpp
TerminalDevice.cpp
PhoneDevice.cpp
PtzController.cpp
# PhoneDevice2.cpp
Camera.cpp
Camera2Reader.cpp
@ -352,23 +363,12 @@ add_library( # Sets the name of the library.
ncnn/yolov5ncnn.cpp
netcamera/httpclient.cpp
netcamera/VendorCtrl.cpp
netcamera/YuShiCtrl.cpp
netcamera/HangYuCtrl.cpp
netcamera/HikonCtrl.cpp
${STREAMING_SRCS}
#serial/WeatherComm.cpp
# camera2/OpenCVFont.cpp
${HDRPLUS_SOURCES_EMBED}
${CAMERA2_SOURCES}
${IMG_UTILS_SRCS}
${TERM_CORE_ROOT}/Factory.cpp
${TERM_CORE_ROOT}/FilePoster.cpp
${TERM_CORE_ROOT}/LogThread.cpp
@ -378,11 +378,9 @@ add_library( # Sets the name of the library.
${TERM_CORE_ROOT}/SpecData_I1_JS.cpp
${TERM_CORE_ROOT}/SpecData_I1_HN.cpp
${TERM_CORE_ROOT}/SpecData_I1_HEN.cpp
${TERM_CORE_ROOT}/SpecData_I1_HEN_TY.cpp
${TERM_CORE_ROOT}/SpecData_I1_HENZZ.cpp
${TERM_CORE_ROOT}/SpecData_I1_SHX.cpp
${TERM_CORE_ROOT}/SpecData_I1_NX.cpp
${TERM_CORE_ROOT}/SpecData_I1_SX_ZY.cpp
${TERM_CORE_ROOT}/SpecData_XY.cpp
${TERM_CORE_ROOT}/SpecData_ZJ.cpp
${TERM_CORE_ROOT}/SpecData_NW.cpp
@ -400,18 +398,14 @@ add_library( # Sets the name of the library.
${TERM_CORE_ROOT}/Client/Terminal_AH.cpp
${TERM_CORE_ROOT}/Client/Terminal_HEN_ZZ.cpp
${TERM_CORE_ROOT}/Client/Terminal_HEN.cpp
${TERM_CORE_ROOT}/Client/Terminal_HEN_TY.cpp
${TERM_CORE_ROOT}/Client/Terminal_SHX.cpp
${TERM_CORE_ROOT}/Client/Terminal_JS.cpp
${TERM_CORE_ROOT}/Client/Terminal_NX.cpp
${TERM_CORE_ROOT}/Client/Terminal_SX_ZY.cpp
${TERM_CORE_ROOT}/Client/Terminal_ZJ.cpp
${TERM_CORE_ROOT}/Client/Terminal_NW.cpp
${TERM_CORE_ROOT}/Client/DataController.cpp
${TERM_CORE_ROOT}/Client/UpgradeReceiver.cpp
${TERM_CORE_ROOT}/Client/Database.cpp
# ${TERM_CORE_ROOT}/Client/SimulatorDevice.cpp
${TERM_CORE_ROOT}/Client/DataController.cpp
${TERM_CORE_ROOT}/Client/SimulatorDevice.cpp
)
@ -432,17 +426,31 @@ find_library( # Sets the name of the path variable.
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
add_library(ssl STATIC IMPORTED)
set_target_properties(ssl PROPERTIES IMPORTED_LOCATION ${SSL_LIBRARY_PATH})
add_library(ssl_crypto STATIC IMPORTED)
set_target_properties(ssl_crypto PROPERTIES IMPORTED_LOCATION ${CRYPTO_LIBRARY_PATH})
target_link_libraries(mqtt PRIVATE ssl)
target_link_libraries(mqtt PRIVATE ssl_crypto)
target_link_libraries( # Specifies the target library.
${PROJECT_NAME}
microphoto
jsoncpp
freetype
breakpad
# breakpad
mqtt
# Links the target library to the log library
# included in the NDK.
avcodec avfilter avformat avutil swresample swscale x264
${log-lib}
android camera2ndk mediandk z curl
ncnn ${OpenCV_LIBS} sqlite3 ${HDRPLUS_LIBS_EMBED} ${ZLMEDIAKIT_LIBS}
android camera2ndk mediandk z
ncnn ${OpenCV_LIBS} sqlite3
)
# set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS_RELEASE "-strip-all")

@ -1,3 +1,4 @@
#include "TerminalDevice.h"
/*
* Copyright 2018 The Android Open Source Project
*

@ -371,7 +371,7 @@ namespace cv {
delete userData;
#if defined(USING_HB)
hb_buffer_destroy(hb_buffer);
#endif // 0
#endif 0
}
// https://freetype.org/freetype2/docs/tutorial/example2.cpp
@ -630,7 +630,7 @@ namespace cv {
#if defined(USING_HB)
hb_buffer_destroy(hb_buffer);
#endif // 0
#endif 0
}
Size FreeType2Impl::getTextSize(

File diff suppressed because it is too large Load Diff

@ -1,332 +0,0 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "DngCreator_JNI"
#include <inttypes.h>
#include <string.h>
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include <cmath>
#include <algorithm>
#include <camera/NdkCameraMetadata.h>
#include <img_utils/DngUtils.h>
#include <img_utils/TagDefinitions.h>
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffWriter.h>
#include <img_utils/Output.h>
#include <img_utils/Input.h>
#include <img_utils/StripSource.h>
#include <sys/system_properties.h>
// #include "core_jni_helpers.h"
// #include "android_runtime/AndroidRuntime.h"
// #include "android_runtime/android_hardware_camera2_CameraMetadata.h"
#include <jni.h>
// #include <nativehelper/JNIHelp.h>
using namespace android;
using namespace img_utils;
// using android::base::GetProperty;
/**
* Max width or height dimension for thumbnails.
*/
// max pixel dimension for TIFF/EP
#define MAX_THUMBNAIL_DIMENSION 256
// bytes per sample
#define DEFAULT_PIXEL_STRIDE 2
// byts per pixel
#define BYTES_PER_RGB_PIX 3
#define GPS_LAT_REF_NORTH "N"
#define GPS_LAT_REF_SOUTH "S"
#define GPS_LONG_REF_EAST "E"
#define GPS_LONG_REF_WEST "W"
#define GPS_DATE_FORMAT_STR "yyyy:MM:dd"
#define TIFF_DATETIME_FORMAT "yyyy:MM:dd kk:mm:ss"
class ByteVectorOutput : public Output {
public:
ByteVectorOutput(std::vector<uint8_t>& buf);
virtual ~ByteVectorOutput();
virtual status_t open();
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
virtual status_t close();
protected:
std::vector<uint8_t>& m_buf;
};
class ByteVectorInput : public Input {
public:
ByteVectorInput(const std::vector<uint8_t>& buf);
virtual ~ByteVectorInput();
/**
* Open this Input.
*
* Returns OK on success, or a negative error code.
*/
status_t open();
/**
* Read bytes into the given buffer. At most, the number of bytes given in the
* count argument will be read. Bytes will be written into the given buffer starting
* at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t read(uint8_t* buf, size_t offset, size_t count);
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t skip(size_t count);
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
status_t close();
protected:
const std::vector<uint8_t>& m_buf;
size_t m_offset;
};
class ByteBufferInput : public Input {
public:
ByteBufferInput(const uint8_t* buf, size_t len);
virtual ~ByteBufferInput();
/**
* Open this Input.
*
* Returns OK on success, or a negative error code.
*/
status_t open();
/**
* Read bytes into the given buffer. At most, the number of bytes given in the
* count argument will be read. Bytes will be written into the given buffer starting
* at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t read(uint8_t* buf, size_t offset, size_t count);
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
ssize_t skip(size_t count);
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
status_t close();
protected:
const uint8_t* m_buf;
size_t m_len;
size_t m_offset;
};
struct SIZE
{
int width;
int height;
};
#define BAIL_IF_INVALID_RET_BOOL(expr, jnienv, tagId, writer) \
if ((expr) != OK) { \
return false; \
}
#define BAIL_IF_INVALID_RET_NULL_SP(expr, jnienv, tagId, writer) \
if ((expr) != OK) { \
return nullptr; \
}
#define BAIL_IF_INVALID_R(expr, jnienv, tagId, writer) \
if ((expr) != OK) { \
return -1; \
}
#define BAIL_IF_EMPTY_RET_NULL_SP(entry, jnienv, tagId, writer) \
if ((entry).count == 0) { \
return nullptr; \
}
#define BAIL_IF_EXPR_RET_NULL_SP(expr, jnienv, tagId, writer) \
if (expr) { \
return nullptr; \
}
#define ANDROID_DNGCREATOR_CTX_JNI_ID "mNativeContext"
enum {
BITS_PER_SAMPLE = 16,
BYTES_PER_SAMPLE = 2,
BYTES_PER_RGB_PIXEL = 3,
BITS_PER_RGB_SAMPLE = 8,
BYTES_PER_RGB_SAMPLE = 1,
SAMPLES_PER_RGB_PIXEL = 3,
SAMPLES_PER_RAW_PIXEL = 1,
TIFF_IFD_0 = 0,
TIFF_IFD_SUB1 = 1,
TIFF_IFD_GPSINFO = 2,
};
/**
* POD container class for GPS tag data.
*/
class GpsData {
public:
enum {
GPS_VALUE_LENGTH = 6,
GPS_REF_LENGTH = 2,
GPS_DATE_LENGTH = 11,
};
uint32_t mLatitude[GPS_VALUE_LENGTH];
uint32_t mLongitude[GPS_VALUE_LENGTH];
uint32_t mTimestamp[GPS_VALUE_LENGTH];
uint8_t mLatitudeRef[GPS_REF_LENGTH];
uint8_t mLongitudeRef[GPS_REF_LENGTH];
uint8_t mDate[GPS_DATE_LENGTH];
};
// ----------------------------------------------------------------------------
/**
* Container class for the persistent native context.
*/
class NativeContext : public LightRefBase<NativeContext> {
public:
enum {
DATETIME_COUNT = 20,
};
NativeContext(ACameraMetadata* characteristics, ACameraMetadata* result);
virtual ~NativeContext();
TiffWriter* getWriter();
ACameraMetadata* getCharacteristics() const;
ACameraMetadata* getResult() const;
uint32_t getThumbnailWidth() const;
uint32_t getThumbnailHeight() const;
const uint8_t* getThumbnail() const;
bool hasThumbnail() const;
bool setThumbnail(const std::vector<uint8_t>& buffer, uint32_t width, uint32_t height);
void setOrientation(uint16_t orientation);
uint16_t getOrientation() const;
void setDescription(const std::string& desc);
std::string getDescription() const;
bool hasDescription() const;
void setGpsData(const GpsData& data);
GpsData getGpsData() const;
bool hasGpsData() const;
void setCaptureTime(const std::string& formattedCaptureTime);
std::string getCaptureTime() const;
bool hasCaptureTime() const;
protected:
std::vector<uint8_t> mCurrentThumbnail;
TiffWriter mWriter;
ACameraMetadata* mCharacteristics;
ACameraMetadata* mResult;
uint32_t mThumbnailWidth;
uint32_t mThumbnailHeight;
uint16_t mOrientation;
bool mThumbnailSet;
bool mGpsSet;
bool mDescriptionSet;
bool mCaptureTimeSet;
std::string mDescription;
GpsData mGpsData;
std::string mFormattedCaptureTime;
};
class DngCreator : public NativeContext
{
public:
DngCreator(ACameraMetadata* characteristics, ACameraMetadata* result);
#if 0
void setLocation(Location location);
#endif
void writeInputStream(std::vector<uint8_t>& dngOutput, SIZE size, const std::vector<uint8_t>& pixels, long offset);
void writeByteBuffer(std::vector<uint8_t>& dngOutput, SIZE size, const std::vector<uint8_t>& pixels, long offset);
#if 0
void writeImage(OutputStream& dngOutput, AImage& pixels);
#endif
void close();
// private static final DateFormat sExifGPSDateStamp = new SimpleDateFormat(GPS_DATE_FORMAT_STR);
// private static final DateFormat sDateTimeStampFormat = new SimpleDateFormat(TIFF_DATETIME_FORMAT);
#if 0
static {
sDateTimeStampFormat.setTimeZone(TimeZone.getDefault());
sExifGPSDateStamp.setTimeZone(TimeZone.getTimeZone("UTC"));
}
#endif
/**
* Offset, rowStride, and pixelStride are given in bytes. Height and width are given in pixels.
*/
void writeByteBuffer(int width, int height, const std::vector<uint8_t>& pixels, std::vector<uint8_t>& dngOutput, int pixelStride, int rowStride, long offset);
/**
* Generate a direct RGB {@link ByteBuffer} from a {@link Bitmap}.
*/
/**
* Convert coordinate to EXIF GPS tag format.
*/
void toExifLatLong(double value, int data[6]);
void init(ACameraMetadata* characteristics, ACameraMetadata* result, const std::string& captureTime);
sp<TiffWriter> setup(uint32_t imageWidth, uint32_t imageHeight);
void destroy();
void setGpsTags(const std::vector<int>& latTag, const std::string& latRef, const std::vector<int>& longTag, const std::string& longRef, const std::string& dateTag, const std::vector<int>& timeTag);
void writeImage(std::vector<uint8_t>& out, uint32_t width, uint32_t height, const std::vector<uint8_t>& rawBuffer, int rowStride, int pixStride, uint64_t offset, bool isDirect);
void writeInputStream(std::vector<uint8_t>& out, const std::vector<uint8_t>& rawStream, uint32_t width, uint32_t height, long offset);
void writeInputBuffer(std::vector<uint8_t>& out, const uint8_t* rawBuffer, size_t bufferLen, uint32_t width, uint32_t height, long offset);
};

@ -11,7 +11,6 @@
#include <sys/mman.h>
#include <unistd.h>
#include <climits>
#include "GPIOControl.h"
@ -21,143 +20,35 @@
#define IOT_PARAM_WRITE 0xAE
#define IOT_PARAM_READ 0xAF
#define MAX_STRING_LEN 32
std::mutex GpioControl::m_locker;
std::mutex GpioControl::m_gpioLocker;
std::vector<GpioControl::ITEM> GpioControl::m_items;
bool GpioControl::m_cameraPowerStatus = false;
#define ENABLE_GPIO_TRACING
#ifdef ENABLE_GPIO_TRACING
class GpioDebugLogger
{
public:
GpioDebugLogger(int cmd, int value)
{
m_startTime = GetMicroTimeStamp();
m_path = std::string("/sdcard/com.xypower.mpapp/tmp/") + std::to_string(cmd) + std::string("_") + std::to_string(m_startTime) + "_val." + std::to_string(value);
CreateEmptyFile(m_path + ".enter");
}
GpioDebugLogger(int cmd)
{
m_startTime = GetMicroTimeStamp();
m_path = std::string("/sdcard/com.xypower.mpapp/tmp/") + std::to_string(cmd) + std::string("_") + std::to_string(m_startTime) + "_get";
CreateEmptyFile(m_path + ".enter");
}
~GpioDebugLogger()
{
uint64_t ts = (GetMicroTimeStamp() - m_startTime);
if (ts > 1000)
{
CreateEmptyFile(m_path + ".leave." + std::to_string(ts));
}
else
{
std::string path = m_path + ".enter";
std::remove(path.c_str());
}
}
private:
std::string m_path;
uint64_t m_startTime;
};
#endif
size_t GpioControl::turnOnImpl(const IOT_PARAM& param)
typedef struct
{
size_t oldRef = 0;
size_t references = 1;
std::vector<ITEM>::iterator it;
int res = 0;
int fd = -1;
time_t now = time(NULL);
// check res???
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == param.cmd)
{
oldRef = it->references;
it->references++;
// it->closeTime = 0;
references = it->references;
if(it->openTime == 0)
it->openTime = now;
SetCamerastatus(it->cmd, true);
break;
}
}
if (it == m_items.end())
{
oldRef = 0;
ITEM item = {param.cmd, references, now};
m_items.push_back(item);
SetCamerastatus(param.cmd, true);
}
if (oldRef == 0/* || param.cmd != CMD_SET_3V3_PWR_EN*/)
{
#ifdef ENABLE_GPIO_TRACING
GpioDebugLogger logger(param.cmd, param.value);
#endif
m_gpioLocker.lock();
fd = open(GPIO_NODE_MP, O_RDONLY);
if( fd > 0 )
{
res = ioctl(fd, IOT_PARAM_WRITE, &param);
close(fd);
#ifdef OUTPUT_DBG_INFO
// int realVal = getInt(param.cmd);
// XYLOG(XYLOG_SEVERITY_INFO, "setInt cmd=%d,value=%d,result=%d RealVal=%d",param.cmd, param.value, param.result/*, realVal*/);
XYLOG(XYLOG_SEVERITY_DEBUG, "setInt cmd=%d,value=%d,result=%d",param.cmd, param.value, param.result);
#endif
}
m_gpioLocker.unlock();
#ifdef _DEBUG
ALOGI("PWR TurnOn cmd=%d,result=%d ref=%u\r\n",param.cmd, param.result, (uint32_t)references);
#endif
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
return references;
}
int cmd;
int value;
int result;
long value2;
char str[MAX_STRING_LEN];
}IOT_PARAM;
void GpioControl::setInt(int cmd, int value)
{
IOT_PARAM param = { cmd, value, 0 };
// param.cmd = cmd;
// param.value = value;
#ifdef ENABLE_GPIO_TRACING
GpioDebugLogger logger(cmd, value);
#endif
m_gpioLocker.lock();
int fd = open(GPIO_NODE_MP, O_RDONLY);
if (fd > 0)
IOT_PARAM param;
param.cmd = cmd;
param.value = value;
// LOGE("set_int fd=%d,cmd=%d,value=%d\r\n",fd, cmd, value);
if( fd > 0 )
{
int res = ioctl(fd, IOT_PARAM_WRITE, &param);
// LOGE("set_int22 cmd=%d,value=%d,result=%d\r\n",param.cmd, param.value, param.result);
close(fd);
#ifdef OUTPUT_DBG_INFO
// int realVal = getInt(param.cmd);
// XYLOG(XYLOG_SEVERITY_INFO, "setInt cmd=%d,value=%d,result=%d RealVal=%d",param.cmd, value, param.result/*, realVal*/);
XYLOG(XYLOG_SEVERITY_DEBUG, "setInt cmd=%d,value=%d,result=%d",param.cmd, value, param.result);
#endif
}
m_gpioLocker.unlock();
return;
}
int GpioControl::getInt(int cmd)
{
#ifdef ENABLE_GPIO_TRACING
GpioDebugLogger logger(cmd);
#endif
m_gpioLocker.lock();
int fd = open(GPIO_NODE_MP, O_RDONLY);
// LOGE("get_int fd=%d,cmd=%d\r\n",fd, cmd);
if( fd > 0 )
@ -166,37 +57,32 @@ int GpioControl::getInt(int cmd)
param.cmd = cmd;
ioctl(fd, IOT_PARAM_READ, &param);
#ifdef _DEBUG
ALOGI("getInt cmd=%d,value=%d,result=%d",param.cmd, param.value, param.result);
ALOGI("getInt cmd=%d,value=%d,result=%d\r\n",param.cmd, param.value, param.result);
#endif
close(fd);
m_gpioLocker.unlock();
return param.value;
}
m_gpioLocker.unlock();
return -1;
}
void GpioControl::setLong(int cmd, long value)
{
int fd = open(GPIO_NODE_MP, O_RDONLY);
IOT_PARAM param;
param.cmd = cmd;
param.value2 = value;
// LOGE("set_long fd=%d,cmd=%d,value2=%ld\r\n",fd, param.cmd, param.value2);
m_gpioLocker.lock();
int fd = open(GPIO_NODE_MP, O_RDONLY);
if( fd > 0 )
{
ioctl(fd, IOT_PARAM_WRITE, &param);
// LOGE("set_long22 cmd=%d,value2=%ld,result=%d\r\n",param.cmd, param.value2, param.result);
close(fd);
}
m_gpioLocker.unlock();
}
long GpioControl::getLong(int cmd)
{
m_gpioLocker.lock();
int fd = open(GPIO_NODE_MP, O_RDONLY);
// LOGE("get_long fd=%d,cmd=%d\r\n",fd, cmd);
if( fd > 0 )
@ -206,37 +92,32 @@ long GpioControl::getLong(int cmd)
ioctl(fd, IOT_PARAM_READ, &param);
// LOGE("get_long22 cmd=%d,value2=%ld,result=%d\r\n",param.cmd, param.value2, param.result);
close(fd);
m_gpioLocker.unlock();
return param.value2;
}
m_gpioLocker.unlock();
return -1;
}
void GpioControl::setString(int cmd, const std::string& value)
{
IOT_PARAM param;
int fd = open(GPIO_NODE_MP, O_RDONLY);
int len = MAX_STRING_LEN < value.size() ? MAX_STRING_LEN : value.size();
param.cmd = cmd;
memset(param.str, 0, MAX_STRING_LEN);
int len = MAX_STRING_LEN < value.size() ? MAX_STRING_LEN : value.size();
memcpy(param.str, value.c_str(), len);
// LOGE("set_string fd=%d,cmd=%d,str=%s\r\n",fd, param.cmd, param.str);
m_gpioLocker.lock();
int fd = open(GPIO_NODE_MP, O_RDONLY);
if( fd > 0 )
{
ioctl(fd, IOT_PARAM_WRITE, &param);
// LOGE("set_string22 cmd=%d,str=%s,result=%d\r\n",param.cmd, param.str, param.result);
close(fd);
}
m_gpioLocker.unlock();
return;
}
std::string GpioControl::getString(int cmd)
{
m_gpioLocker.lock();
int fd = open(GPIO_NODE_MP, O_RDONLY);
// LOGE("get_string fd=%d,cmd=%d\r\n",fd, cmd);
if( fd > 0 )
@ -246,273 +127,68 @@ std::string GpioControl::getString(int cmd)
ioctl(fd, IOT_PARAM_READ, &param);
// LOGE("get_string22 cmd=%d,str=%s,result=%d\r\n",param.cmd, param.str, param.result);
close(fd);
m_gpioLocker.unlock();
return std::string(param.str);
}
m_gpioLocker.unlock();
return "";
}
/////////////////////////// Power Control /////////////////////////////////
size_t GpioControl::TurnOn(int cmd)
{
IOT_PARAM param = { cmd, 1, 0 };
// param.cmd = cmd;
// param.value = value;
m_locker.lock();
size_t ref = turnOnImpl(param);
m_locker.unlock();
return ref;
}
size_t GpioControl::TurnOn(const std::vector<int>& cmds)
{
IOT_PARAM param = { 0, 1, 0 };
// param.cmd = cmd;
// param.value = value;
std::vector<int>::const_iterator it;
m_locker.lock();
for (it = cmds.cbegin(); it != cmds.cend(); ++it)
{
if (*it == 0)
{
continue;
}
param.cmd = *it;
turnOnImpl(param);
}
m_locker.unlock();
return 0;
}
size_t GpioControl::TurnOffImmediately(int cmd)
{
time_t ts = time(NULL);
size_t ref = 0;
std::vector<ITEM>::iterator it;
m_locker.lock();
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == cmd)
{
if (it->references > 0)
{
it->references = 0;
SetCamerastatus(cmd, false);
setInt(it->cmd, 0);
it->openTime = 0;
}
break;
}
}
m_locker.unlock();
#ifdef _DEBUG
ALOGI("PWR TurnOffNow cmd=%d ref=%u", cmd, (uint32_t)ref);
#endif
return 0;
}
size_t GpioControl::TurnOff(int cmd, uint32_t delayedCloseTime/* = 0*/)
{
time_t ts = 0;
if (delayedCloseTime > 0)
{
ts = time(NULL) + delayedCloseTime;
}
size_t ref = 0;
std::vector<ITEM>::iterator it;
if (delayedCloseTime > 0)
{
std::shared_ptr<PowerControl> powerCtrl = std::make_shared<PowerControl>(cmd);
std::thread th([delayedCloseTime, powerCtrl]() mutable {
std::this_thread::sleep_for(std::chrono::seconds(delayedCloseTime));
powerCtrl.reset();
});
th.detach();
}
m_locker.lock();
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == cmd)
{
if (it->references > 0)
{
it->references--;
if (it->references == 0)
{
SetCamerastatus(cmd, false);
setInt(it->cmd, 0);
it->openTime = 0;
}
}
break;
}
}
m_locker.unlock();
#ifdef _DEBUG
ALOGI("PWR TurnOff cmd=%d ref=%u", cmd, (uint32_t)ref);
#endif
return 0;
}
#ifdef USING_N938
size_t GpioControl::TurnOff(const std::vector<int>& cmds, uint32_t delayedCloseTime/* = 0*/)
bool GpioControl::SetN938Cmd(int cmd, int val)
{
time_t ts = 0;
if (delayedCloseTime > 0)
{
ts = time(NULL) + delayedCloseTime;
}
std::vector<ITEM>::iterator it;
std::vector<int>::const_reverse_iterator itCmd;
if (delayedCloseTime > 0)
{
std::shared_ptr<PowerControl> powerCtrl = std::make_shared<PowerControl>(cmds);
std::thread th([delayedCloseTime, powerCtrl]() mutable {
std::this_thread::sleep_for(std::chrono::seconds(delayedCloseTime));
powerCtrl.reset();
});
th.detach();
}
m_locker.lock();
// turnOnImpl(param);
for (itCmd = cmds.crbegin(); itCmd != cmds.crend(); ++itCmd)
{
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == *itCmd)
{
if (it->references > 0)
{
it->references--;
if (it->references == 0)
{
SetCamerastatus(it->cmd, false);
setInt(it->cmd, 0);
it->openTime = 0;
}
}
break;
}
}
}
m_locker.unlock();
return 0;
}
char buf[32] = { 0 };
sprintf(buf, "out %d %d", cmd, val);
size_t GpioControl::TurnOff(const std::vector<std::pair<int, uint32_t> >& cmds)
{
for (auto itCmd = cmds.cbegin(); itCmd != cmds.end(); ++itCmd)
{
if (itCmd->second > 0)
{
uint32_t delayedCloseTime = itCmd->second;
std::shared_ptr<PowerControl> powerCtrl = std::make_shared<PowerControl>(itCmd->first);
std::thread th([delayedCloseTime, powerCtrl]() mutable {
std::this_thread::sleep_for(std::chrono::seconds(delayedCloseTime));
powerCtrl.reset();
});
th.detach();
}
}
IOT_PARAM param;
int len = MAX_STRING_LEN < strlen(buf) ? MAX_STRING_LEN : strlen(buf);
std::vector<ITEM>::iterator it;
std::vector<std::pair<int, uint32_t> >::const_iterator itCmd;
m_locker.lock();
for (itCmd = cmds.cbegin(); itCmd != cmds.end(); ++itCmd)
{
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == itCmd->first)
{
if (it->references > 0)
{
it->references--;
if (it->references == 0)
{
SetCamerastatus(it->cmd, false);
setInt(it->cmd, 0);
it->openTime = 0;
}
}
break;
}
}
}
m_locker.unlock();
param.cmd = cmd;
memset(param.str, 0, MAX_STRING_LEN);
// memcpy(param.str, value.c_str(), len);
memcpy(param.str, buf, len);
return 0;
}
bool GpioControl::SetCamerastatus(int cmd, bool status)
{
#ifdef USING_N938
if(cmd == CMD_SET_PIC1_POWER)
m_cameraPowerStatus = status;
#endif
#ifdef USING_PTZ
if(cmd == CMD_SET_PTZ_PWR_ENABLE)
int fd = open(GPIO_NODE_MP, O_RDONLY);
if( fd > 0 )
{
m_cameraPowerStatus = status;
ioctl(fd, IOT_PARAM_WRITE, &param);
close(fd);
}
#endif
return true;
}
bool GpioControl::GetCamerastatus()
{
return m_cameraPowerStatus;
}
bool GpioControl::GetSelftestStatus(time_t wait_time)
{
int cmd = 0;
#ifdef USING_N938
cmd = CMD_SET_PIC1_POWER;
#endif
#ifdef USING_PTZ
cmd = CMD_SET_PTZ_PWR_ENABLE;
#endif
time_t now = time(NULL);
std::vector<ITEM>::iterator it;
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == cmd && it->references > 0 && it->openTime!=0 && (now - it->openTime >= wait_time))
{
return true;//自检完成
}
}
return false;
bool GpioControl::OpenSensors()
{
GpioControl::setCam3V3Enable(true);
GpioControl::setInt(CMD_SET_485_EN_STATE, true ? 1 : 0);
int igpio;
GpioControl::setInt(CMD_SET_WTH_POWER, 1);
GpioControl::setInt(CMD_SET_PULL_POWER, 1);
GpioControl::setInt(CMD_SET_ANGLE_POWER, 1);
GpioControl::setInt(CMD_SET_OTHER_POWER, 1);
GpioControl::setInt(CMD_SET_PIC1_POWER, 1);
igpio = GpioControl::getInt(CMD_SET_WTH_POWER);
igpio = GpioControl::getInt(CMD_SET_PULL_POWER);
igpio = GpioControl::getInt(CMD_SET_ANGLE_POWER);
igpio = GpioControl::getInt(CMD_SET_OTHER_POWER);
igpio = GpioControl::getInt(CMD_SET_PIC1_POWER);
GpioControl::setInt(CMD_SET_SPI_POWER, 1);
GpioControl::setInt(CMD_SET_485_en0, 1);
GpioControl::setInt(CMD_SET_485_en1, 1);
GpioControl::setInt(CMD_SET_485_en2, 1);
GpioControl::setInt(CMD_SET_485_en3, 1);
GpioControl::setInt(CMD_SET_485_en4, 1);
igpio = GpioControl::getInt(CMD_SET_SPI_POWER);
igpio = GpioControl::getInt(CMD_SET_485_en0);
igpio = GpioControl::getInt(CMD_SET_485_en1);
igpio = GpioControl::getInt(CMD_SET_485_en2);
igpio = GpioControl::getInt(CMD_SET_485_en3);
igpio = GpioControl::getInt(CMD_SET_485_en4);
return 0;
}
time_t GpioControl::GetSelfTestRemain(time_t wait_time)
{
int cmd = 0;
#ifdef USING_N938
cmd = CMD_SET_PIC1_POWER;
#endif
#ifdef USING_PTZ
cmd = CMD_SET_PTZ_PWR_ENABLE;
#endif
time_t now = time(NULL);
std::vector<ITEM>::iterator it;
for (it = m_items.begin(); it != m_items.end(); ++it)
{
if (it->cmd == cmd && it->references > 0)
{
time_t remaintime = (now - it->openTime);
remaintime = (wait_time > remaintime) ? (wait_time - remaintime) : 0;
return remaintime;//自检完成
}
}
return 0;
}

@ -8,16 +8,6 @@
#include <string>
#include <chrono>
#include <thread>
#include <mutex>
#include <vector>
#include <utility>
#include <SemaphoreEx.h>
#include <LogThread.h>
#ifndef USING_N938
#ifndef USING_PTZ // MicroPhoto
#define CMD_GET_LIGHT_ADC 101
#define CMD_SET_LIGHT_ADC 102
@ -27,188 +17,113 @@
#define CMD_SET_NETWORK_STATE 106
#define CMD_SET_OTG_STATE 107
#define CMD_GET_OTG_STATE 108
//#define CMD_GET_CHARGING_VOL_STATE 110
//#define CMD_GET_CHARGING_SHUNT_VOLTAGE_STATE 111
#define CMD_GET_CHARGING_VOL_STATE 110
#define CMD_GET_CHARGING_SHUNT_VOLTAGE_STATE 111
#define CMD_GET_CHARGING_BUS_VOLTAGE_STATE 112
//#define CMD_GET_CHARGING_POWER_STATE 113
//#define CMD_GET_CHARGING_CURRENT_STATE 114
//#define CMD_GET_BAT_VOL_STATE 115
//#define CMD_GET_BAT_SHUNT_VOLTAGE_STATE 116
#define CMD_GET_CHARGING_POWER_STATE 113
#define CMD_GET_CHARGING_CURRENT_STATE 114
#define CMD_GET_BAT_VOL_STATE 115
#define CMD_GET_BAT_SHUNT_VOLTAGE_STATE 116
#define CMD_GET_BAT_BUS_VOLTAGE_STATE 117
//#define CMD_GET_BAT_POWER_STATE 118
//#define CMD_GET_BAT_CURRENT_STATE 119
#define CMD_GET_BAT_POWER_STATE 118
#define CMD_GET_BAT_CURRENT_STATE 119
#define CMD_SET_485_STATE 121
#define CMD_SET_SPI_MODE 123
#define CMD_SET_SPI_BITS_PER_WORD 124
#define CMD_SET_SPI_MAXSPEEDHZ 125
#define CMD_SET_PWM_BEE_STATE 126 // Removed
#define CMD_SET_ALM_MODE 128 // Removed
#define CMD_SET_SYSTEM_RESET 202
#define CMD_SET_SYSTEM_RESET2 203
#define CMD_SET_PWM_BEE_STATE 126
#define CMD_SET_ALM_MODE 128
#define CMD_SET_SPI_POWER 129
#define CMD_SET_485_EN_STATE 131
#define CMD_SET_CAM_3V3_EN_STATE 132
#define CMD_SET_12V_EN_STATE 133
#if 1
#define CMD_SET_SPI_POWER 129
#define CMD_SET_3V3_PWR_EN 132
#endif
#define CMD_SET_SYSTEM_RESET 202
#define CMD_GET_CAMERA_STATUS 310
#define CMD_SET_MADA_INIT_STATUS 312
#define CMD_SET_MADA_CLOSE_STATUS 313
#define CMD_SET_MADA_REG 314
#define CMD_GET_MADA_REG 315
#define CMD_SET_INIT_STATUS 401
#ifdef USING_N938
#define CMD_SET_5V_PWR_ENABLE 517
#define CMD_SET_NEW_OTG_STATE 507
#else // defined(USING_PTZ)
#define CMD_SET_485_EN_STATE 131
#define CMD_SET_CAM_3V3_EN_STATE 132
#define CMD_SET_12V_EN_STATE 133
#define CMD_SET_485_STATE 121
#define CMD_SET_SPI_MODE 123
#define CMD_SET_SPI_BITS_PER_WORD 124
#define CMD_SET_SPI_MAXSPEEDHZ 125
#define CMD_SET_SPI_POWER 129
#define CMD_SET_WTH_POWER 490
#define CMD_SET_PULL_POWER 491
#define CMD_SET_ANGLE_POWER 492
#define CMD_SET_OTHER_POWER 493
#define CMD_SET_PIC1_POWER 494
#define CMD_SET_GPIO157_POWER 510
#define CMD_SET_GPIO5_POWER 511
#define CMD_SET_PWM_BEE_STATE 126
#define CMD_SET_ALM_MODE 128
#define CMD_SET_485_en0 301
#define CMD_SET_485_en1 302
#define CMD_SET_485_en2 303
#define CMD_SET_485_en3 304
#define CMD_SET_485_en4 305
#define CMD_SET_OTG_STATE 107
#define CMD_GET_OTG_STATE 108
#define CMD_SET_OTG_STATE 107
#define CMD_GET_OTG_STATE 108
#define CMD_SET_SPI_POWER 129
#define CMD_SET_MADA_MOVE_STATUS 311
#if 0
#define CMD_485_0_DE 156 // 485_0 DE信号
#define CMD_485_0_PWR_EN 157 // 485_0 电源使能
#define CMD_485_0_1_DE_EN 171 // 485_0&1DE电平转换芯片使能信号
#define CMD_485_1_DE 172 //
#define CMD_SET_12V_EN_STATE 0 // TO BE ADDED
#define CMD_SET_SYSTEM_RESET 202
#define CMD_SET_SYSTEM_RESET2 203
#define CMD_GET_LIGHT_ADC 101
#define CMD_SET_LIGHT_ADC 102
#define CMD_GET_CHARGING_BUS_VOLTAGE_STATE 112
#define CMD_GET_BAT_BUS_VOLTAGE_STATE 117
#define CMD_SET_SPI_MODE 0 // TO BE ADDED
#define CMD_SET_SPI_BITS_PER_WORD 0 // TO BE ADDED
#define CMD_SET_SPI_MAXSPEEDHZ 0 // TO BE ADDED
#define CMD_SET_485_ENABLE 131
#define CMD_SET_3V3_PWR_EN 132
// #define CMD_SET_5V_PWR_ENABLE 517
#define CMD_SET_SENSOR_ENABLE 504
#define CMD_SET_SENSOR_PWR_ENABLE 505
#define CMD_SET_SENSOR2_ENABLE 506
#define CMD_SET_SENSOR4_ENABLE 510
#define CMD_SET_SENSOR1_PWR_ENABLE 513
#define CMD_SET_SENSOR2_PWR_ENABLE 514
#define CMD_SET_SENSOR3_PWR_ENABLE 509
#define CMD_SET_SENSOR4_PWR_ENABLE 525
#define CMD_SET_PHOTO_IN 520
#define CMD_SET_PHOTO_OUT 515
#define CMD_SET_ADC_ENABLE 500
#define CMD_SET_MIPI_SWITCH 501
#define CMD_SET_CAM_RSTN1 502
#define CMD_SET_CAM_RSTN0 503
#define CMD_SET_SD_DECT 507
#define CMD_SET_PTZ_PWR_ENABLE 508
#define CMD_SET_RTC_ENABLE 511
#define CMD_SET_100M_ENABLE 518
#define CMD_SET_100M_SWITCH_PWR_ENABLE 519
#define CMD_SET_AM_POWER_ENABLE 521
#define CMD_SET_NRSEC_POWER_ENABLE 522
#define CMD_SET_AMP_ENABLE 523
#define CMD_SET_LIGHT1_RESISTOR_ENABLE 524
#define CMD_SET_100M_RESET 526
#define CMD_GET_CAMERA_STATUS 310
#define CMD_SET_MADA_MOVE_STATUS 311
#define CMD_SET_MADA_INIT_STATUS 312
#define CMD_SET_MADA_CLOSE_STATUS 313
#define CMD_SET_MADA_REG 314
#define CMD_GET_MADA_REG 315
#define CMD_SET_INIT_STATUS 401
#endif // USING_PTZ
#else // defined(USING_N938)
#define CMD_SET_CAM_3V3_EN_STATE 72 // 整板3V3上电使能
#define CMD_3V3_SWITCH_EN 45 // 整板485_3V3信号电平转换电源使能
#define CMD_SET_SYSTEM_RESET 202
#define CMD_SET_SYSTEM_RESET2 203
#define CMD_SET_485_EN1 302
#define CMD_SET_3V3_PWR_EN 132
#define CMD_SET_UART0_EN 361
#define CMD_SET_485_EN0 301
#define CMD_SET_NETWORK_POWER_EN 362
#define CMD_SET_485_EN3 304
#define CMD_SET_485_EN2 303
#define CMD_SET_SPI_POWER 129
// #define CMD_SET_5V_EN 363
#define CMD_SDCARD_DETECT_EN 364
#define CMD_SET_PIC1_POWER 494
#define CMD_SET_OTHER_POWER 493
#define CMD_SET_ANGLE_POWER 492
#define CMD_SET_PULL_POWER 491
#define CMD_SET_WTH_POWER 490
#define CMD_SET_485_EN4 305
#define CMD_LED_CTRL 365
#define CMD_BD_EN 366
#define CMD_ADC_EN 367
#define CMD_SPI2SERIAL_POWER_EN 368
#define CMD_RS485_3V3_EN 369
// Others
#define CMD_SET_485_EN_STATE 131
#define CMD_SET_OTG_STATE 107
#define CMD_UART0_EN 73 // 预留UART0电平转换芯片使能
#define CMD_485_1_PWR_EN 5 // 485_1 电源使能
#define CMD_GET_CHARGING_BUS_VOLTAGE_STATE 112
#define CMD_GET_BAT_BUS_VOLTAGE_STATE 117
#define CMD_485_3_DE 6 // 485_3 DE信号
#define CMD_485_2_DE 7 // 485_2 DE信号
#define CMD_485_4_DE 13 // 485_4 DE信号
#define CMD_NETWORK_PWR_EN 94 // 100M网络电源使能
#define CMD_SET_INIT_STATUS 0 // 401
#define CMD_485_2_PWR_EN 92 // 485_2 电源使能
#define CMD_485_3_PWR_EN 91 // 485_3 电源使能
#define CMD_485_4_PWR_EN 90 // 485_4 电源使能
#endif // USING_N938
#define CMD_SEC_EN 27 // 加密芯片上电使能
#define CMD_485_2_3_DE_EN 26 // 485_2&3 DE电平转换芯片使能信号
#define GPIO_NODE_MP "/dev/mtkgpioctrl"
#define CMD_5V_PWR_EN 14 // 整板5V0上电使能
#define CMD_SD_CARD_DECT 15 // SD CARD DECT
#define CMD_PIC1_EN 16
#define MAX_STRING_LEN 32
typedef struct
{
int cmd;
int value;
int result;
long value2;
char str[MAX_STRING_LEN];
}IOT_PARAM;
#define CMD_OTHER_EN 21
#define CMD_ANGLE_EN 22
#define CMD_PULL_EN 23
#define CMD_WEATHER_EN 24
class GpioControl
{
public:
struct ITEM
{
int cmd;
size_t references;
time_t openTime;
};
#define CMD_LED_CTRL 46
#define CMD_BD_EN 47
#define CMD_ADC_EN 44
private:
static std::mutex m_locker;
static std::vector<ITEM> m_items;
static bool m_cameraPowerStatus;
#define CMD_SPI_PWR_EN 43 // SPI转串口电源使能
static std::mutex m_gpioLocker;
#endif
protected:
static size_t turnOnImpl(const IOT_PARAM& param);
static size_t turnOffImpl(const IOT_PARAM& param);
#endif // USING_N938
#ifdef USING_N938
#define GPIO_NODE_N938 "/sys/devices/platform/1000b000.pinctrl/mt_gpio"
#else
#define GPIO_NODE_MP "/dev/mtkgpioctrl"
#endif // USING_N938
public:
// Power
static size_t TurnOn(int cmd);
static size_t TurnOn(const std::vector<int>& cmds);
static size_t TurnOff(int cmd, uint32_t delayedCloseTime = 0);
static size_t TurnOff(const std::vector<int>& cmds, uint32_t delayedCloseTime = 0);
static size_t TurnOff(const std::vector<std::pair<int, uint32_t> >& cmds);
static size_t TurnOffImmediately(int cmd);
static bool SetCamerastatus(int cmd, bool status);
static bool GetCamerastatus();
static bool GetSelftestStatus(time_t wait_time);
static time_t GetSelfTestRemain(time_t wait_time);
class GpioControl
{
public:
static void setInt(int cmd, int value);
static int getInt(int cmd);
static void setLong(int cmd, long value);
@ -218,94 +133,20 @@ public:
static void setOtgState(bool on)
{
on ? TurnOn(CMD_SET_OTG_STATE) : TurnOff(CMD_SET_OTG_STATE);
setInt(CMD_SET_OTG_STATE, on ? 1 : 0);
}
static bool getOtgState()
{
#ifndef USING_N938
return getInt(CMD_GET_OTG_STATE) != 0;
#else
return false;
#endif
}
static void setCam3V3Enable(bool enabled, uint32_t delayedCloseTime)
{
#ifdef USING_PTZ
enabled ? TurnOn(CMD_SET_3V3_PWR_EN) : TurnOff(CMD_SET_3V3_PWR_EN, delayedCloseTime);
#else
enabled ? TurnOn(CMD_SET_3V3_PWR_EN) : TurnOff(CMD_SET_3V3_PWR_EN, delayedCloseTime);
#endif
return getInt(CMD_SET_OTG_STATE) != 0;
}
static void setCam3V3Enable(bool enabled)
{
#ifdef USING_PTZ
enabled ? TurnOn(CMD_SET_3V3_PWR_EN) : TurnOff(CMD_SET_3V3_PWR_EN);
#else
enabled ? TurnOn(CMD_SET_3V3_PWR_EN) : TurnOff(CMD_SET_3V3_PWR_EN);
#endif
}
static void setBeeOn(bool z)
{
#ifndef USING_N938
#ifndef USING_PTZ
z ? TurnOn(CMD_SET_PWM_BEE_STATE) : TurnOff(CMD_SET_PWM_BEE_STATE);
#endif
#endif
}
static void setJidianqiState(bool z) {
#ifndef USING_N938
#ifndef USING_PTZ
z ? TurnOn(CMD_SET_ALM_MODE) : TurnOff(CMD_SET_ALM_MODE);
#endif
#endif
}
static void setSpiPower(bool on) {
on ? TurnOn(CMD_SET_SPI_POWER) : TurnOff(CMD_SET_SPI_POWER);
if (on)
{
std::this_thread::sleep_for(std::chrono::milliseconds(40));
}
}
static void setRS485Enable(bool z, uint32_t delayedCloseTime)
{
#ifndef USING_N938
#ifdef USING_PTZ
z ? TurnOn(CMD_SET_485_ENABLE) : TurnOff(CMD_SET_485_ENABLE, delayedCloseTime);
#else
z ? TurnOn(CMD_SET_485_EN_STATE) : TurnOff(CMD_SET_485_EN_STATE, delayedCloseTime);
#endif
#endif
}
static void set12VEnable(bool z, uint32_t delayedCloseTime)
{
#ifndef USING_N938
z ? TurnOn(CMD_SET_12V_EN_STATE) : TurnOff(CMD_SET_12V_EN_STATE, delayedCloseTime);
#endif
}
static void setRS485Enable(bool z)
{
#ifndef USING_N938
#ifdef USING_PTZ
z ? TurnOn(CMD_SET_485_ENABLE) : TurnOff(CMD_SET_485_ENABLE);
#ifdef ENABLE_3V3_ALWAYS
setInt(CMD_SET_CAM_3V3_EN_STATE, 1);
#else
z ? TurnOn(CMD_SET_485_EN_STATE) : TurnOff(CMD_SET_485_EN_STATE);
#endif
#endif
}
static void set12VEnable(bool z)
{
#ifndef USING_N938
z ? TurnOn(CMD_SET_12V_EN_STATE) : TurnOff(CMD_SET_12V_EN_STATE);
setInt(CMD_SET_CAM_3V3_EN_STATE, enabled ? 1 : 0);
#endif
}
@ -314,378 +155,110 @@ public:
setInt(CMD_SET_SYSTEM_RESET, 1);
}
static void reboot2()
{
setInt(CMD_SET_SYSTEM_RESET2, 1);
}
static void setLightAdc(int i)
{
#ifndef USING_N938
#ifdef USING_PTZ
setInt(CMD_SET_LIGHT1_RESISTOR_ENABLE, i);
#else
setInt(CMD_SET_LIGHT_ADC, i);
#endif
#endif
}
static int getLightAdc()
{
#ifndef USING_N938
#ifdef USING_PTZ
return getInt(CMD_SET_LIGHT1_RESISTOR_ENABLE);
#else
return getInt(CMD_GET_LIGHT_ADC);
#endif
#else
return -1;
#endif
}
static int getChargingVoltage()
{
#ifndef USING_N938
return getInt(CMD_GET_CHARGING_BUS_VOLTAGE_STATE);
#else
return -1;
#endif
return getInt(CMD_GET_CHARGING_VOL_STATE);
}
#if 0
static int getChargingShuntVoltage()
{
#ifndef USING_N938
return getInt(CMD_GET_CHARGING_SHUNT_VOLTAGE_STATE);
#else
return -1;
#endif
}
#endif
static int getChargingBusVoltage() {
return getInt(CMD_GET_CHARGING_BUS_VOLTAGE_STATE);
}
#if 0
static int getChargingPower() {
#ifndef USING_N938
return getInt(CMD_GET_CHARGING_POWER_STATE);
#else
return -1;
#endif
}
static int getChargingCurrent() {
#ifndef USING_N938
return getInt(CMD_GET_CHARGING_CURRENT_STATE);
#else
return -1;
#endif
}
#endif
static int getBatteryVoltage() {
return getInt(CMD_GET_BAT_BUS_VOLTAGE_STATE);
return getInt(CMD_GET_BAT_VOL_STATE);
}
#if 0
static int getBatteryShuntVoltage() {
#ifndef USING_N938
return getInt(CMD_GET_BAT_SHUNT_VOLTAGE_STATE);
#else
return -1;
#endif
}
#endif
static int getBatteryBusVoltage() {
return getInt(CMD_GET_BAT_BUS_VOLTAGE_STATE);
}
#if 0
static int getBatteryPower() {
#ifndef USING_N938
return getInt(CMD_GET_BAT_POWER_STATE);
#else
return -1;
#endif
}
static int getBatteryCurrent() {
#ifndef USING_N938
return getInt(CMD_GET_BAT_CURRENT_STATE);
#else
return -1;
#endif
}
#endif
static void set485WriteMode() {
#if 0
setInt(CMD_SET_485_STATE, 1);
#endif
}
static void set485ReadMode() {
#if 0
setInt(CMD_SET_485_STATE, 0);
#endif
}
static void setSpiMode(int i) {
#ifndef USING_N938
setInt(CMD_SET_SPI_MODE, i);
#endif
}
static void setSpiBitsPerWord(int i) {
#ifndef USING_N938
setInt(CMD_SET_SPI_BITS_PER_WORD, i);
#endif
}
static void setSpiMaxSpeedHz(long j) {
#ifndef USING_N938
setLong(CMD_SET_SPI_MAXSPEEDHZ, j);
#endif
}
};
class PowerControl
{
public:
PowerControl(int cmd1) : m_delayCloseTime(0)
{
m_cmds.resize(1, cmd1);
TurnOn();
}
PowerControl(const std::vector<int>& cmds) : m_delayCloseTime(0)
{
m_cmds = cmds;
TurnOn();
}
PowerControl(int cmd1, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(1, cmd1);
TurnOn();
}
PowerControl(int cmd1, int cmd2, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(2, cmd1);
m_cmds[1] = cmd2;
TurnOn();
}
PowerControl(int cmd1, int cmd2, int cmd3, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(3, cmd1);
m_cmds[1] = cmd2;
m_cmds[2] = cmd3;
TurnOn();
}
PowerControl(int cmd1, int cmd2, int cmd3, int cmd4, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(4, cmd1);
m_cmds[1] = cmd2;
m_cmds[2] = cmd3;
m_cmds[3] = cmd4;
TurnOn();
}
PowerControl(int cmd1, int cmd2, int cmd3, int cmd4, int cmd5, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(5, cmd1);
m_cmds[1] = cmd2;
m_cmds[2] = cmd3;
m_cmds[3] = cmd4;
m_cmds[4] = cmd5;
TurnOn();
}
PowerControl(int cmd1, int cmd2, int cmd3, int cmd4, int cmd5, int cmd6, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(6, cmd1);
m_cmds[1] = cmd2;
m_cmds[2] = cmd3;
m_cmds[3] = cmd4;
m_cmds[4] = cmd5;
m_cmds[5] = cmd6;
TurnOn();
}
PowerControl(int cmd1, int cmd2, int cmd3, int cmd4, int cmd5, int cmd6, int cmd7, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(7, cmd1);
m_cmds[1] = cmd2;
m_cmds[2] = cmd3;
m_cmds[3] = cmd4;
m_cmds[4] = cmd5;
m_cmds[5] = cmd6;
m_cmds[6] = cmd7;
TurnOn();
static void setBeeOn(bool z) {
setInt(CMD_SET_PWM_BEE_STATE, z ? 1 : 0);
}
PowerControl(int cmd1, int cmd2, int cmd3, int cmd4, int cmd5, int cmd6, int cmd7, int cmd8, uint32_t closeDelayTime) : m_delayCloseTime(closeDelayTime)
{
m_cmds.resize(8, cmd1);
m_cmds[1] = cmd2;
m_cmds[2] = cmd3;
m_cmds[3] = cmd4;
m_cmds[4] = cmd5;
m_cmds[5] = cmd6;
m_cmds[6] = cmd7;
m_cmds[7] = cmd8;
TurnOn();
}
virtual ~PowerControl()
{
GpioControl::TurnOff(m_cmds, m_delayCloseTime);
#if !defined(NDEBUG) && defined(OUTPUT_DBG_INFO)
std::string status = GetStatus();
XYLOG(XYLOG_SEVERITY_INFO, "PWR After TurnOff %s, DelayCloseTime=%u", status.c_str(), m_delayCloseTime);
#endif
static void setJidianqiState(bool z) {
setInt(CMD_SET_ALM_MODE, z ? 1 : 0);
}
#if !defined(NDEBUG) && defined(OUTPUT_DBG_INFO)
std::string GetStatus()
{
std::string result;
for (auto it = m_cmds.cbegin(); it != m_cmds.cend(); ++it)
static void setSpiPower(bool on) {
setInt(CMD_SET_SPI_POWER, on ? 1 : 0);
if (on)
{
if (*it == 0)
{
continue;
}
result += std::to_string(*it) + "=" + std::to_string(GpioControl::getInt(*it)) + " ";
std::this_thread::sleep_for(std::chrono::milliseconds(40));
}
return result;
}
#endif // #if !defined(NDEBUG) && defined(OUTPUT_DBG_INFO)
protected:
void TurnOn()
{
#if !defined(NDEBUG) && defined(OUTPUT_DBG_INFO)
// std::string status = GetStatus();
// XYLOG(XYLOG_SEVERITY_INFO, "PWR Before TurnOn %s", status.c_str());
#endif
GpioControl::TurnOn(m_cmds);
static void setRS485Enable(bool z) {
setInt(CMD_SET_485_EN_STATE, z ? 1 : 0);
}
protected:
std::vector<int> m_cmds;
uint32_t m_delayCloseTime;
};
class CameraPowerCtrl : public PowerControl
{
public:
CameraPowerCtrl(uint32_t closeDelayTime) :
#ifdef USING_N938
PowerControl(0, closeDelayTime)
#else // USING_N938
#ifdef USING_PTZ
PowerControl(CMD_SET_3V3_PWR_EN, closeDelayTime)
#else // USING_PTZ
PowerControl(CMD_SET_3V3_PWR_EN, closeDelayTime)
#endif // USING_PTZ
#endif // USING_N938
{
}
};
class NetCameraPowerCtrl : public PowerControl
{
public:
NetCameraPowerCtrl(uint32_t closeDelayTime) :
#ifdef USING_N938
PowerControl(CMD_SET_PIC1_POWER, CMD_SET_485_EN_STATE, closeDelayTime)
#else // USING_N938
#ifdef USING_PTZ
PowerControl(CMD_SET_12V_EN_STATE, closeDelayTime)
#else // USING_PTZ
// MicroPhoto
PowerControl(CMD_SET_12V_EN_STATE, CMD_SET_485_EN_STATE, closeDelayTime)
#endif // USING_PTZ
#endif // USING_N938
{
static void set12VEnable(bool z) {
setInt(CMD_SET_12V_EN_STATE, z ? 1 : 0);
}
};
class PlzCameraPowerCtrl : public PowerControl
{
public:
PlzCameraPowerCtrl(uint32_t closeDelayTime) :
#ifdef USING_N938
PowerControl(CMD_SET_PIC1_POWER, CMD_SET_485_EN_STATE, closeDelayTime)
#else // USING_N938
#ifdef USING_PTZ
PowerControl(CMD_SET_3V3_PWR_EN, CMD_SET_OTG_STATE, CMD_SET_485_ENABLE, CMD_SET_PTZ_PWR_ENABLE, CMD_SET_12V_EN_STATE, CMD_SET_100M_SWITCH_PWR_ENABLE, closeDelayTime)
#else // USING_PTZ
PowerControl(CMD_SET_OTG_STATE, CMD_SET_12V_EN_STATE, closeDelayTime)
#endif // USING_PTZ
#endif // USING_N938
{
}
};
class EthernetPowerCtrl : public PowerControl
{
public:
EthernetPowerCtrl(uint32_t closeDelayTime) :
#ifdef USING_N938
PowerControl(CMD_SET_OTG_STATE, CMD_SET_NETWORK_POWER_EN, closeDelayTime)
#else // USING_N938
#ifdef USING_PTZ
// PowerControl(CMD_SET_3V3_PWR_EN, CMD_SET_OTG_STATE, CMD_SET_5V_PWR_ENABLE, CMD_SET_100M_ENABLE, CMD_SET_100M_SWITCH_PWR_ENABLE, closeDelayTime)
PowerControl(CMD_SET_3V3_PWR_EN, CMD_SET_OTG_STATE, CMD_SET_100M_ENABLE, closeDelayTime)
#else // USING_PTZ
// Micro Photo
PowerControl(CMD_SET_OTG_STATE, CMD_SET_485_EN_STATE/* Only for wp6*/, closeDelayTime)
#endif // USING_PTZ
#endif // USING_N938
{
}
};
class UsbCameraPowerCtrl : public PowerControl
{
public:
UsbCameraPowerCtrl(uint32_t closeDelayTime) :
#ifdef USING_N938
PowerControl(CMD_SET_OTG_STATE, CMD_SET_NETWORK_POWER_EN, CMD_SET_PIC1_POWER, CMD_SET_485_EN_STATE, closeDelayTime)
#else // USING_N938
#ifdef USING_PTZ
PowerControl(CMD_SET_PTZ_PWR_ENABLE, CMD_SET_100M_ENABLE, CMD_SET_100M_SWITCH_PWR_ENABLE, CMD_SET_12V_EN_STATE, closeDelayTime)
#else // USING_PTZ
PowerControl(CMD_SET_OTG_STATE, CMD_SET_12V_EN_STATE, closeDelayTime)
#endif // USING_PTZ
#endif // USING_N938
{
}
};
static bool SetN938Cmd(int cmd, int val);
static bool OpenSensors();
static bool CloseSensors();
#endif
class SerialCameraPowerCtrl : public PowerControl
{
public:
SerialCameraPowerCtrl(uint32_t closeDelayTime) :
#ifdef USING_N938
PowerControl(CMD_SET_SPI_POWER, CMD_SPI2SERIAL_POWER_EN, CMD_RS485_3V3_EN, CMD_SET_PIC1_POWER, CMD_SET_485_EN4, closeDelayTime)
#else // USING_N938
#ifdef USING_PTZ
PowerControl(CMD_SET_12V_EN_STATE, CMD_SET_485_ENABLE, CMD_SET_3V3_PWR_EN, CMD_SET_SPI_POWER, CMD_SET_PTZ_PWR_ENABLE, closeDelayTime)
#else // USING_PTZ
PowerControl(CMD_SET_12V_EN_STATE, CMD_SET_3V3_PWR_EN, CMD_SET_SPI_POWER, CMD_SET_485_EN_STATE, closeDelayTime)
#endif // USING_PTZ
#endif // USING_N938
{
}
};
#endif //MICROPHOTO_GPIOCONTROL_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -13,7 +13,6 @@
#include <atomic>
#include <filesystem>
#include <thread>
#include <memory>
#include <camera/NdkCameraManager.h>
#include <camera/NdkCameraError.h>
@ -28,11 +27,6 @@
#include <opencv2/opencv.hpp>
#include <android/bitmap.h>
#include <android/multinetwork.h>
#include "SensorsProtocol.h"
#include "PtzController.h"
#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "error", __VA_ARGS__))
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, "debug", __VA_ARGS__))
@ -155,42 +149,18 @@ void MatToBitmap(JNIEnv *env, cv::Mat& mat, jobject& bitmap) {
}
#endif
class PowerControl;
class VendorCtrl;
class Streaming;
struct STREAMING_CONTEXT
{
std::shared_ptr<Streaming> stream;
std::shared_ptr<PowerControl> powerCtrl;
std::shared_ptr<PowerControl> ethernetPowerCtrl;
};
class CPhoneDevice : public IDevice
{
public:
friend PtzController;
struct NETWORK
{
std::string iface;
std::string ip;
std::string netmask;
std::string gateway;
};
class CPhoneCamera : public NdkCamera
{
public:
CPhoneCamera(CPhoneDevice* dev, int32_t width, int32_t height, const NdkCamera::CAMERA_PARAMS& params);
virtual ~CPhoneCamera();
virtual bool on_image(cv::Mat rgb);
virtual bool on_image(cv::Mat& rgb);
virtual void on_error(const std::string& msg);
virtual void onDisconnected(ACameraDevice* device);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::vector<uint8_t> >& frames);
virtual bool onOneCapture(std::shared_ptr<ACameraMetadata> characteristics, std::shared_ptr<ACameraMetadata> results, uint32_t ldr, uint32_t duration, cv::Mat rgb);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::shared_ptr<AImage> >& frames);
protected:
CPhoneDevice* m_dev;
@ -203,9 +173,6 @@ public:
virtual void onImageAvailable(AImageReader* reader);
virtual int32_t getOutputFormat() const;
virtual bool onOneCapture(std::shared_ptr<ACameraMetadata> characteristics, std::shared_ptr<ACameraMetadata> results, uint32_t ldr, uint32_t duration, cv::Mat rgb);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::vector<uint8_t> >& frames);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::shared_ptr<AImage> >& frames);
protected:
std::string m_path;
@ -215,13 +182,13 @@ public:
{
CPhoneDevice* device;
unsigned int timerType;
uint64_t times;
unsigned long times;
void* data;
uint64_t expectedTimes;
uint64_t uid;
unsigned long expectedTimes;
unsigned long uid;
};
CPhoneDevice(JavaVM* vm, jobject service, const std::string& appPath, uint64_t activeNetHandle, unsigned int versionCode, const std::string& nativeLibDir);
CPhoneDevice(JavaVM* vm, jobject service, const std::string& appPath, unsigned int netId, unsigned int versionCode);
virtual ~CPhoneDevice();
virtual void SetListener(IListener* listener);
@ -232,40 +199,27 @@ public:
virtual bool UpdateSchedules();
virtual bool QuerySystemProperties(map<string, string>& properties);
virtual bool InstallAPP(const std::string& path, unsigned int delayedTime);
virtual bool Reboot(int resetType, bool manually, const std::string& reason, uint32_t timeout = 1000);
virtual bool Reboot(int resetType);
virtual bool EnableGPS(bool enabled);
virtual int QueryBattaryVoltage(int timesForAvg, int* isCharging);
virtual uint32_t QueryLdr();
virtual float QueryBattaryVoltage(int timesForAvg, bool* isCharging);
virtual bool RequestPosition();
virtual timer_uid_t RegisterHeartbeat(unsigned int timerType, unsigned int timeout, time_t tsForNextPhoto);
virtual bool TakePhoto(const IDevice::PHOTO_INFO& photoInfo, const vector<OSD_INFO>& osds, const std::string& path);
virtual bool CloseCamera();
virtual timer_uid_t RegisterTimer(unsigned int timerType, unsigned int timeout, void* data, uint64_t times = 1);
virtual timer_uid_t RegisterTimer(unsigned int timerType, unsigned int timeout, void* data, unsigned long times = 0);
virtual bool UnregisterTimer(timer_uid_t uid);
virtual uint64_t RequestWakelock(uint64_t timeout);
virtual bool ReleaseWakelock(uint64_t wakelock);
virtual std::string GetVersion() const;
virtual int GetWData(WEATHER_INFO *weatherInfo, D_SENSOR_PARAM *sensorParam);
virtual int GetIceData(ICE_INFO *iceInfo, ICE_TAIL *icetail, D_SENSOR_PARAM *sensorParam);
virtual bool OpenSensors(int sensortype);
virtual bool CloseSensors(int sensortype, uint32_t delayedCloseTime);
virtual bool OpenPTZSensors(uint32_t sec);
virtual bool ClosePTZSensors(uint32_t delayedCloseTime);
virtual bool GetPTZSensorsStatus(time_t waittime);
virtual bool GetCameraStatus();
virtual void CameraCtrl(unsigned short waitTime, unsigned short delayTime, unsigned char channel, int cmdidx, unsigned char presetno, const char *serfile, unsigned int baud, int addr);
virtual int GetSerialPhoto(int devno, D_IMAGE_DEF *photo);
virtual void InitSerialComm(D_SENSOR_PARAM *sensorParam, char *filedir,const char *logpath);
bool LoadNetworkInfo();
virtual unsigned long RequestWakelock(unsigned long timeout);
virtual bool ReleaseWakelock(unsigned long wakelock);
virtual int GetWData(WEATHER_INFO *weatherInfo);
virtual int GetIceData(ICE_INFO *iceInfo, ICE_TAIL *icetail, SENSOR_PARAM *sensorParam);
virtual bool OpenSensors();
virtual bool CloseSensors();
bool GetNextScheduleItem(uint32_t tsBasedZero, uint32_t scheduleTime, vector<uint32_t>& items);
void UpdatePosition(double lon, double lat, double radius, time_t ts);
bool OnVideoReady(bool photoOrVideo, bool result, const char* path, unsigned int photoId);
bool OnCaptureReady(bool photoOrVideo, bool result, cv::Mat mat, unsigned int photoId);
bool OnVideoReady(bool result, const char* path, unsigned int photoId);
void UpdateSignalLevel(int signalLevel);
void UpdateTfCardPath(const std::string& tfCardPath)
{
@ -276,24 +230,22 @@ public:
mBuildTime = buildTime;
}
void UpdateSimcard(const std::string& simcard);
void UpdateNetwork(net_handle_t nethandle, bool available, bool defaultOrEthernet, bool& changed);
net_handle_t GetEthnetHandle() const;
static void TurnOnCameraPower(JNIEnv* env);
static void TurnOffCameraPower(JNIEnv* env);
VendorCtrl* MakeVendorCtrl(int vendor, uint8_t channel, const std::string& ip, const std::string& userName, const std::string& password, net_handle_t netHandle, bool syncTime);
static void TurnOnOtg(JNIEnv* env);
static void TurnOffOtg(JNIEnv* env);
protected:
std::string GetFileName() const;
std::string GetVersion() const;
bool SendBroadcastMessage(std::string action, int value);
// bool MatchCaptureSizeRequest(ACameraManager *cameraManager, const char *selectedCameraId, unsigned int width, unsigned int height, uint32_t cameraOrientation_,
bool TakePhotoWithNetCamera(const IDevice::PHOTO_INFO& localPhotoInfo, const std::string& path, const std::vector<IDevice::OSD_INFO>& osds, std::shared_ptr<PowerControl> powerCtrlPtr);
bool TakeVideoWithNetCamera(const IDevice::PHOTO_INFO& localPhotoInfo, const std::string& path, const std::vector<IDevice::OSD_INFO>& osds, std::shared_ptr<PowerControl> powerCtrlPtr);
bool StartPushStreaming(const IDevice::PHOTO_INFO& localPhotoInfo, const std::string& url, const std::vector<IDevice::OSD_INFO>& osds, std::shared_ptr<PowerControl> powerCtrlPtr);
bool PostProcessPhoto(const PHOTO_INFO& photoInfo, const vector<IDevice::OSD_INFO>& osds, const std::string& path, const std::string& cameraInfo, cv::Mat mat, time_t takingTime);
inline bool TakePhotoCb(int res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime, const std::vector<IDevice::RECOG_OBJECT>& objects) const
inline bool TakePhotoCb(bool res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime, const std::vector<IDevice::RECOG_OBJECT>& objects) const
{
if (m_listener != NULL)
{
@ -302,49 +254,26 @@ protected:
return false;
}
inline bool TakePhotoCb(int result, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime) const
{
if (m_listener != NULL)
{
std::vector<IDevice::RECOG_OBJECT> objects;
return m_listener->OnPhotoTaken(result, photoInfo, path, photoTime, objects);
}
return false;
}
inline bool TakePTZPhotoCb(int result, const IDevice::PHOTO_INFO& photoInfo) const
inline bool TakePhotoCb(bool res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime) const
{
if (m_listener != NULL)
{
std::vector<IDevice::RECOG_OBJECT> objects;
return m_listener->OnPTZPhotoTaken(result, photoInfo);
}
return false;
}
inline bool GetPhotoSerialsParamCb(SerialsPhotoParam &param) const
{
if (m_listener != NULL)
{
return m_listener->OnPhotoSerialsParamGet(param);
return m_listener->OnPhotoTaken(res, photoInfo, path, photoTime, objects);
}
return false;
}
void QueryFlowInfo(std::map<std::string, std::string>& powerInfo);
void QueryPowerInfo(std::map<std::string, std::string>& powerInfo);
std::string QueryCpuTemperature();
bool OnImageReady(cv::Mat mat);
bool onOneCapture(std::shared_ptr<ACameraMetadata> characteristics, std::shared_ptr<ACameraMetadata> results, uint32_t ldr, uint32_t duration, cv::Mat rgb);
bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::vector<uint8_t> >& frames);
bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::shared_ptr<AImage> >& frames);
bool OnImageReady(cv::Mat& mat);
void onError(const std::string& msg);
void onDisconnected(ACameraDevice* device);
void CloseCamera2(CPhoneCamera* camera, unsigned int photoId, unsigned char cameraType);
void CloseCamera2(CPhoneCamera* camera, unsigned int photoId, bool turnOffOtg);
static void handleSignal(int sig, siginfo_t *si, void *uc);
bool RegisterHandlerForSignal(int sig);
@ -352,33 +281,18 @@ protected:
void handleTimerImpl(TIMER_CONTEXT* context);
void static handleRebootTimer(union sigval v);
// void handleRebootTimerImpl();
void RestartApp(int rebootType, long timeout, const std::string& reason);
void RestartApp(int rebootType, long timeout);
int QueryBatteryVoltage(int retries);
int CallExecv(int rotation, int frontCamera, const std::string& outputPath, const std::vector<std::string>& images);
void SetStaticIp(const std::string& iface, const std::string& ip, const std::string& netmask, const std::string& gateway);
void ConvertDngToPng(const std::string& dngPath, const std::string& pngPath);
void SetStaticIp();
void ShutdownEthernet();
int ExecuteCommand(const std::string& cmd);
static std::string BuildCaptureResultInfo(ACameraMetadata* result, uint32_t ldr, uint32_t duration, bool burst);
protected:
mutable std::mutex m_devLocker;
std::mutex m_devLocker;
JavaVM* m_vm;
jobject m_javaService;
std::string m_appPath;
std::string m_tfCardPath;
std::string m_nativeLibraryDir;
NETWORK* m_network;
net_handle_t m_defNetHandle;
net_handle_t m_ethnetHandle;
jmethodID mRegisterHeartbeatMid;
jmethodID mUpdateCaptureScheduleMid;
@ -388,19 +302,12 @@ protected:
jmethodID mRequestWakelockMid;
jmethodID mReleaseWakelockMid;
jmethodID mGetFlowInfoMid;
jmethodID mGetSystemInfoMid;
jmethodID mRebootMid;
jmethodID mInstallAppMid;
jmethodID mEnableGpsMid;
jmethodID mRequestPositionMid;
jmethodID mExecHdrplusMid;
jmethodID mSetStaticIpMid;
jmethodID mExecuteCmdMid;
jmethodID mConvertDngToPngMid;
jmethodID mCallSysCameraMid;
std::string mPath;
IDevice::PHOTO_INFO mPhotoInfo;
@ -408,12 +315,12 @@ protected:
IListener* m_listener;
const CFG_RECOGNIZATION* m_pRecognizationCfg;
bool mAIInitialized;
unsigned int mNetId;
unsigned int mVersionCode;
time_t mBuildTime;
atomic_ullong m_timerUidFeed;
atomic_ullong m_wakelockIdFeed;
atomic_ullong m_uniqueIdFeed;
atomic_ulong m_timerUidFeed;
atomic_ulong m_wakelockIdFeed;
std::map<IDevice::timer_uid_t, TIMER_CONTEXT*> mTimers;
mutable CPhoneCamera* mCamera;
@ -421,33 +328,16 @@ protected:
time_t mHeartbeatStartTime;
unsigned int mHeartbeatDuration;
static std::mutex m_powerLocker;
static long mCameraPowerCount;
static long mOtgCount;
std::thread m_threadClose;
std::shared_ptr<PowerControl> m_powerCtrlPtr;
uint32_t m_ethernetFailures;
int m_signalLevel;
time_t m_signalLevelUpdateTime;
std::string m_simcard;
mutable std::mutex m_cameraLocker;
time_t m_lastTime;
std::atomic<bool> m_shouldStopWaiting;
std::atomic<bool> m_isSelfTesting{false};
IDevice::ICE_TAIL m_tempData;
mutable std::mutex m_dataLocker;
mutable std::mutex m_collectDataLocker;
std::condition_variable m_CollectDatacv;
std::atomic<bool> m_collecting;
unsigned long long localDelayTime;
std::map<uint8_t, STREAMING_CONTEXT > m_streamings;
PtzController* m_ptzController;
};

@ -0,0 +1,912 @@
#include "TerminalDevice.h"
/*
* Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#define LOG_TAG "CameraTestHelpers"
#include "PhoneDevice2.h"
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
// #include <opencv2/objdetect.hpp>
// #include <opencv2/features2d.hpp>
// #include <opencv2/core/types.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <android/log.h>
#include <AndroidHelper.h>
extern bool GetJniEnv(JavaVM *vm, JNIEnv **env, bool& didAttachThread);
// This value is 2 ^ 18 - 1, and is used to clamp the RGB values before their
// ranges
// are normalized to eight bits.
static const int kMaxChannelValue = 262143;
static inline uint32_t YUV2RGB(int nY, int nU, int nV) {
nY -= 16;
nU -= 128;
nV -= 128;
if (nY < 0) nY = 0;
// This is the floating point equivalent. We do the conversion in integer
// because some Android devices do not have floating point in hardware.
// nR = (int)(1.164 * nY + 1.596 * nV);
// nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
// nB = (int)(1.164 * nY + 2.018 * nU);
int nR = (int)(1192 * nY + 1634 * nV);
int nG = (int)(1192 * nY - 833 * nV - 400 * nU);
int nB = (int)(1192 * nY + 2066 * nU);
nR = std::min(kMaxChannelValue, std::max(0, nR));
nG = std::min(kMaxChannelValue, std::max(0, nG));
nB = std::min(kMaxChannelValue, std::max(0, nB));
nR = (nR >> 10) & 0xff;
nG = (nG >> 10) & 0xff;
nB = (nB >> 10) & 0xff;
return 0xff000000 | (nR << 16) | (nG << 8) | nB;
}
CPhoneDevice2::CPhoneDevice2(JavaVM* vm, jobject service)
{
m_vm = vm;
JNIEnv* env = NULL;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
}
m_javaService = env->NewGlobalRef(service);
jclass classService = env->GetObjectClass(m_javaService);
mRegisterTimerMid = env->GetMethodID(classService, "registerTimer", "(JI)Z");
mRegisterHeartbeatMid = env->GetMethodID(classService, "registerHeartbeatTimer", "(I)V");
mUnregisterTimerMid = env->GetMethodID(classService, "unregisterTimer", "(J)Z");
mUpdateTimeMid = env->GetMethodID(classService, "updateTime", "(J)Z");
env->DeleteLocalRef(classService);
if (attached)
{
vm->DetachCurrentThread();
}
m_timerUidFeed = time(NULL);
presentRotation_ = 0;
}
CPhoneDevice2::~CPhoneDevice2()
{
JNIEnv* env = NULL;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
}
env->DeleteGlobalRef(m_javaService);
if (attached)
{
m_vm->DetachCurrentThread();
}
m_javaService = NULL;
}
void CPhoneDevice2::SetListener(IListener* listener)
{
m_listener = listener;
}
bool CPhoneDevice2::UpdateTime(time_t ts)
{
JNIEnv* env = NULL;
jboolean ret = JNI_FALSE;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
return false;
}
jlong timeInMillis = ((jlong)ts) * 1000;
ret = env->CallBooleanMethod(m_javaService, mUpdateTimeMid, timeInMillis);
if (attached)
{
m_vm->DetachCurrentThread();
}
return (ret == JNI_TRUE);
}
bool CPhoneDevice2::Reboot()
{
return false;
}
IDevice::timer_uid_t CPhoneDevice2::RegisterTimer(unsigned int timerType, unsigned int timeout)
{
IDevice::timer_uid_t uid = m_timerUidFeed.fetch_add(1);
ALOGI("NDK RegTimer: uid=%lld Type=%u timeout=%u", uid, timerType, timeout);
JNIEnv* env = NULL;
jboolean ret = JNI_FALSE;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
return 0;
}
ret = env->CallBooleanMethod(m_javaService, mRegisterTimerMid, (jlong)uid, (jint)timeout);
if (attached)
{
m_vm->DetachCurrentThread();
}
if (ret == JNI_TRUE)
{
unsigned long val = timerType;
mTimers.insert(mTimers.end(), std::pair<IDevice::timer_uid_t, unsigned long>(uid, val));
return uid;
}
return 0;
}
bool CPhoneDevice2::UnregisterTimer(IDevice::timer_uid_t uid)
{
JNIEnv* env = NULL;
jboolean ret = JNI_FALSE;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
return false;
}
ret = env->CallBooleanMethod(m_javaService, mUnregisterTimerMid, (jlong)uid);
if (attached)
{
m_vm->DetachCurrentThread();
}
if (ret == JNI_TRUE)
{
mTimers.erase(uid);
return true;
}
return false;
}
bool CPhoneDevice2::FireTimer(timer_uid_t uid)
{
std::map<IDevice::timer_uid_t, unsigned long>::iterator it = mTimers.find(uid);
if (it == mTimers.end())
{
return false;
}
unsigned long timerType = it->second & 0xFFFFFFFF;
unsigned long times = (it->second & 0xFFFFFFFF00000000) >> 32;
times++;
if (timerType != 100)
{
int aa = 0;
}
it->second = timerType | (times << 32);
if (m_listener == NULL)
{
return false;
}
m_listener->OnTimeout(uid, timerType, NULL, times);
return true;
}
IDevice::timer_uid_t CPhoneDevice2::RegisterHeartbeat(unsigned int timerType, unsigned int timeout)
{
IDevice::timer_uid_t uid = m_timerUidFeed.fetch_add(1);
JNIEnv* env = NULL;
jboolean ret = JNI_FALSE;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
return 0;
}
env->CallVoidMethod(m_javaService, mRegisterHeartbeatMid, (jint)timeout);
if (attached)
{
m_vm->DetachCurrentThread();
}
return uid;
}
bool CPhoneDevice2::TakePhoto(const IDevice::PHOTO_INFO& photoInfo, const vector<OSD_INFO>& osds, const string& path)
{
ALOGI("TAKE_PHOTO: CH=%u PR=%u\n", (unsigned int)photoInfo.channel, (unsigned int)photoInfo.preset);
mPhotoInfo = photoInfo;
mPath = path;
mDisplayDimension = DisplayDimension(photoInfo.width, photoInfo.height);
ALOGE("Image Buffer Size: %d", photoInfo.width * photoInfo.height * 4);
imageBuffer_ = (uint8_t*)malloc(photoInfo.width * photoInfo.height * 4);
AASSERT(imageBuffer_ != nullptr, "Failed to allocate imageBuffer_");
int cameraId = (int)photoInfo.channel - 1;
ACameraIdList *cameraIdList = NULL;
ACameraMetadata *cameraMetadata = NULL;
const char *selectedCameraId = NULL;
camera_status_t camera_status = ACAMERA_OK;
ACameraManager *cameraManager = ACameraManager_create();
camera_status = ACameraManager_getCameraIdList(cameraManager, &cameraIdList);
if (camera_status != ACAMERA_OK) {
ALOGI("Failed to get camera id list (reason: %d)\n", camera_status);
TakePhotoCb(false, photoInfo, path, 0);
return false;
}
if (cameraIdList->numCameras < 1 ) {
ALOGI("No camera device detected.\n");
TakePhotoCb(false, photoInfo, path, 0);
return false;
}
if (cameraIdList->numCameras <= cameraId ) {
ALOGI("No required camera device %d detected.\n", cameraId);
TakePhotoCb(false, photoInfo, path, 0);
return false;
}
selectedCameraId = cameraIdList->cameraIds[cameraId];
ALOGI("Trying to open Camera2 (id: %s, num of camera : %d)\n", selectedCameraId,
cameraIdList->numCameras);
camera_status = ACameraManager_getCameraCharacteristics(cameraManager, selectedCameraId,
&cameraMetadata);
if (camera_status != ACAMERA_OK) {
ALOGI("Failed to get camera meta data of ID:%s\n", selectedCameraId);
}
ACameraMetadata_const_entry face, orientation;
camera_status = ACameraMetadata_getConstEntry(cameraMetadata, ACAMERA_LENS_FACING, &face);
uint32_t cameraFacing_ = static_cast<int32_t>(face.data.u8[0]);
if (cameraFacing_ == ACAMERA_LENS_FACING_FRONT)
{
int aa = 0;
}
camera_status = ACameraMetadata_getConstEntry(cameraMetadata, ACAMERA_SENSOR_ORIENTATION, &orientation);
ALOGI("====Current SENSOR_ORIENTATION: %8d", orientation.data.i32[0]);
uint32_t cameraOrientation_ = orientation.data.i32[0];
if (cameraOrientation_ == 90 || cameraOrientation_ == 270)
{
mDisplayDimension.Flip();
}
ImageFormat resCap = {(int32_t)photoInfo.width, (int32_t)photoInfo.height, AIMAGE_FORMAT_YUV_420_888};
MatchCaptureSizeRequest(cameraManager, selectedCameraId, photoInfo.width, photoInfo.height, cameraOrientation_, &resCap);
deviceStateCallbacks.onDisconnected = camera_device_on_disconnected;
deviceStateCallbacks.onError = camera_device_on_error;
camera_status = ACameraManager_openCamera(cameraManager, selectedCameraId,
&deviceStateCallbacks, &cameraDevice);
if (camera_status != ACAMERA_OK) {
ALOGI("Failed to open camera device (id: %s)\n", selectedCameraId);
}
camera_status = ACameraDevice_createCaptureRequest(cameraDevice, TEMPLATE_STILL_CAPTURE/*TEMPLATE_PREVIEW*/,
&captureRequest);
if (camera_status != ACAMERA_OK) {
ALOGI("Failed to create preview capture request (id: %s)\n", selectedCameraId);
}
ACaptureSessionOutputContainer_create(&captureSessionOutputContainer);
captureSessionStateCallbacks.onReady = capture_session_on_ready;
captureSessionStateCallbacks.onActive = capture_session_on_active;
captureSessionStateCallbacks.onClosed = capture_session_on_closed;
ACameraMetadata_free(cameraMetadata);
ACameraManager_deleteCameraIdList(cameraIdList);
ACameraManager_delete(cameraManager);
media_status_t status;
// status = AImageReader_new(1920, 1080, AIMAGE_FORMAT_YUV_420_888, 5, &mAImageReader);
status = AImageReader_new(resCap.width, resCap.height, resCap.format, 5, &mAImageReader);
if (status != AMEDIA_OK)
{
ALOGI("AImageReader_new error\n");
TakePhotoCb(false, photoInfo, path, 0);
return false;
}
AImageReader_ImageListener listener{
.context = this,
.onImageAvailable = OnImageCallback,
};
AImageReader_setImageListener(mAImageReader, &listener);
//ANativeWindow *mNativeWindow;
status = AImageReader_getWindow(mAImageReader, &theNativeWindow);
if (status != AMEDIA_OK)
{
ALOGI("AImageReader_getWindow error\n");
TakePhotoCb(false, photoInfo, path, 0);
return false;
}
ALOGI("Surface is prepared in %p.\n", theNativeWindow);
// theNativeWindow
ACameraOutputTarget_create(theNativeWindow, &cameraOutputTarget);
ACaptureRequest_addTarget(captureRequest, cameraOutputTarget);
ACaptureSessionOutput_create(theNativeWindow, &sessionOutput);
ACaptureSessionOutputContainer_add(captureSessionOutputContainer, sessionOutput);
ACameraDevice_createCaptureSession(cameraDevice, captureSessionOutputContainer,
&captureSessionStateCallbacks, &captureSession);
// ACameraCaptureSession_setRepeatingRequest(captureSession, NULL, 1, &captureRequest, NULL);
ACameraCaptureSession_capture(captureSession, NULL, 1, &captureRequest, NULL);
ALOGI("Surface is prepared in here.\n");
return true;
}
ACameraCaptureSession_stateCallbacks* CPhoneDevice2::GetSessionListener()
{
static ACameraCaptureSession_stateCallbacks sessionListener = {
.context = this,
.onClosed = CPhoneDevice2::capture_session_on_closed,
.onReady = CPhoneDevice2::capture_session_on_ready,
.onActive = CPhoneDevice2::capture_session_on_active,
};
return &sessionListener;
}
void CPhoneDevice2::ImageCallback(AImageReader *reader)
{
bool res = false;
AImage *image = nullptr;
media_status_t status = AImageReader_acquireNextImage(reader, &image);
if (status == AMEDIA_OK && image)
{
int32_t srcFormat = -1;
AImage_getFormat(image, &srcFormat);
AASSERT(AIMAGE_FORMAT_YUV_420_888 == srcFormat, "Failed to get format");
int32_t srcPlanes = 0;
AImage_getNumberOfPlanes(image, &srcPlanes);
AASSERT(srcPlanes == 3, "Is not 3 planes");
AImageCropRect srcRect;
AImage_getCropRect(image, &srcRect);
int32_t width = srcRect.right - srcRect.left;
int32_t height = srcRect.bottom - srcRect.top;
// int32_t height = srcRect.right - srcRect.left;
// int32_t width = srcRect.bottom - srcRect.top;
uint8_t *yPixel = nullptr;
uint8_t *uPixel = nullptr;
uint8_t *vPixel = nullptr;
int32_t yLen = 0;
int32_t uLen = 0;
int32_t vLen = 0;
AImage_getPlaneData(image, 0, &yPixel, &yLen);
AImage_getPlaneData(image, 1, &uPixel, &uLen);
AImage_getPlaneData(image, 2, &vPixel, &vLen);
uint8_t * data = new uint8_t[yLen + vLen + uLen];
memcpy(data, yPixel, yLen);
memcpy(data+yLen, vPixel, vLen);
memcpy(data+yLen+vLen, uPixel, uLen);
cv::Mat mYUV = cv::Mat(((height * 3) >> 1), width, CV_8UC1, data);
// cv::cvtColor(mYUV, _yuv_rgb_img, cv::COLOR_YUV2RGB_NV21, 3);
// cv::Mat mYUV = cv::Mat(height, yStride, CV_8UC4, data);
cv::Mat _yuv_rgb_img(height, width, CV_8UC4), _yuv_gray_img;
cv::cvtColor(mYUV, _yuv_rgb_img, cv::COLOR_YUV2RGB_NV21, 3);
cv::rotate(_yuv_rgb_img, _yuv_rgb_img, cv::ROTATE_180);
// cv::Mat rgbMat(height, width, CV_8UC3);
// 通过cv::cvtColor将yuv420转换为rgb格式
// cvtColor(_yuv_rgb_img, rgbMat, cv::COLOR_YUV2RGB_I420);
// cv::Mat mat = cv::Mat(buffer.height, buffer.stride, CV_8UC4, buffer.bits);
const char *str = "OSD";
putText(_yuv_rgb_img, str, cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 0, 0), 4,cv::LINE_AA);
putText(_yuv_rgb_img, str, cv::Point(50, 50), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(255, 255, 255), 2,cv::LINE_AA);
vector <int> compression_params;
compression_params.push_back(cv::IMWRITE_JPEG_QUALITY);
compression_params.push_back(80);
res = cv::imwrite(mPath.c_str(), _yuv_rgb_img, compression_params);
// ANativeWindow_unlockAndPost(theNativeWindow);
if (res)
{
int aa = 0;
}
// res = WriteFile(image, GetFileName() + ".org.jpg");
AImage_delete(image);
// delete pThis;
TakePhotoCb(res, mPhotoInfo, mPath, time(NULL));
}
}
void CPhoneDevice2::OnImageCallback(void *ctx, AImageReader *reader)
{
CPhoneDevice2* pThis = reinterpret_cast<CPhoneDevice2*>(ctx);
if (pThis != NULL)
{
pThis->ImageCallback(reader);
}
}
bool CPhoneDevice2::WriteFile(AImage *image, const string& path)
{
int planeCount = 0;
media_status_t status = AImage_getNumberOfPlanes(image, &planeCount);
ALOGI("Info: getNumberOfPlanes() planeCount = %d", planeCount);
if (!(status == AMEDIA_OK && planeCount == 1))
{
ALOGE("Error: getNumberOfPlanes() planeCount = %d", planeCount);
return false;
}
uint8_t *data = nullptr;
int len = 0;
AImage_getPlaneData(image, 0, &data, &len);
bool res = false;
FILE *file = fopen(path.c_str(), "wb");
if (file && data && len)
{
fwrite(data, 1, len, file);
fclose(file);
ALOGI("Capture: %s", path.c_str());
res = true;
}
else
{
if (file)
fclose(file);
}
return res;
}
bool CPhoneDevice2::WriteFile(CPhoneDevice2* pThis, AImage *image)
{
return pThis->WriteFile(image, pThis->GetFileName());
}
std::string CPhoneDevice2::GetFileName() const
{
return mPath;
}
/*
const char *selectedCameraId = NULL;
ACameraManager *cameraManager = ACameraManager_create();
*/
bool CPhoneDevice2::MatchCaptureSizeRequest(ACameraManager *cameraManager, const char *selectedCameraId, unsigned int width, unsigned int height, uint32_t cameraOrientation_,
ImageFormat* resCap) {
DisplayDimension disp(resCap->width,resCap->height);
if (cameraOrientation_ == 90 || cameraOrientation_ == 270) {
disp.Flip();
}
ACameraMetadata* metadata;
camera_status_t camera_status = ACAMERA_OK;
camera_status = ACameraManager_getCameraCharacteristics(cameraManager, selectedCameraId, &metadata);
ACameraMetadata_const_entry entry;
camera_status = ACameraMetadata_getConstEntry(metadata, ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, &entry);
// format of the data: format, width, height, input?, type int32
bool foundIt = false;
DisplayDimension foundRes(16384, 16384);
DisplayDimension maxJPG(0, 0);
for (int i = 0; i < entry.count; i += 4) {
int32_t input = entry.data.i32[i + 3];
int32_t format = entry.data.i32[i + 0];
if (input) continue;
if (format == AIMAGE_FORMAT_YUV_420_888 || format == AIMAGE_FORMAT_JPEG) {
DisplayDimension res(entry.data.i32[i + 1], entry.data.i32[i + 2]);
ALOGI("Camera Resolution: %d x %d fmt=%d", res.width(), res.height(), format);
if (!disp.IsSameRatio(res)) continue;
if (format == AIMAGE_FORMAT_YUV_420_888 && res > disp) {
foundIt = true;
foundRes = res;
} else if (format == AIMAGE_FORMAT_JPEG && res > maxJPG) {
maxJPG = res;
}
}
}
if (foundIt) {
// resView->width = foundRes.org_width();
// resView->height = foundRes.org_height();
resCap->width = foundRes.org_width();
resCap->height = foundRes.org_height();
} else {
ALOGI("Did not find any compatible camera resolution, taking 640x480");
resCap->width = disp.org_width();
resCap->height = disp.org_height();
// *resCap = *resView;
}
// resView->format = AIMAGE_FORMAT_YUV_420_888;
// resCap->format = AIMAGE_FORMAT_JPEG;
return foundIt;
}
/**
* Convert yuv image inside AImage into ANativeWindow_Buffer
* ANativeWindow_Buffer format is guaranteed to be
* WINDOW_FORMAT_RGBX_8888
* WINDOW_FORMAT_RGBA_8888
* @param buf a {@link ANativeWindow_Buffer } instance, destination of
* image conversion
* @param image a {@link AImage} instance, source of image conversion.
* it will be deleted via {@link AImage_delete}
*/
bool CPhoneDevice2::DisplayImage(ANativeWindow_Buffer *buf, AImage *image) {
AASSERT(buf->format == WINDOW_FORMAT_RGBX_8888 ||
buf->format == WINDOW_FORMAT_RGBA_8888,
"Not supported buffer format");
int32_t srcFormat = -1;
AImage_getFormat(image, &srcFormat);
AASSERT(AIMAGE_FORMAT_YUV_420_888 == srcFormat, "Failed to get format");
int32_t srcPlanes = 0;
AImage_getNumberOfPlanes(image, &srcPlanes);
AASSERT(srcPlanes == 3, "Is not 3 planes");
switch (presentRotation_) {
case 0:
PresentImage(buf, image);
break;
case 90:
PresentImage90(buf, image);
break;
case 180:
PresentImage180(buf, image);
break;
case 270:
PresentImage270(buf, image);
break;
default:
AASSERT(0, "NOT recognized display rotation: %d", presentRotation_);
}
AImage_delete(image);
image = nullptr;
return true;
}
/*
* PresentImage()
* Converting yuv to RGB
* No rotation: (x,y) --> (x, y)
* Refer to:
* https://mathbits.com/MathBits/TISection/Geometry/Transformations2.htm
*/
void CPhoneDevice2::PresentImage(ANativeWindow_Buffer *buf, AImage *image) {
AImageCropRect srcRect;
AImage_getCropRect(image, &srcRect);
AImage_getPlaneRowStride(image, 0, &yStride);
AImage_getPlaneRowStride(image, 1, &uvStride);
yPixel = imageBuffer_;
AImage_getPlaneData(image, 0, &yPixel, &yLen);
vPixel = imageBuffer_ + yLen;
AImage_getPlaneData(image, 1, &vPixel, &vLen);
uPixel = imageBuffer_ + yLen + vLen;
AImage_getPlaneData(image, 2, &uPixel, &uLen);
AImage_getPlanePixelStride(image, 1, &uvPixelStride);
int32_t rowStride;
AImage_getPlaneRowStride(image, 0, &rowStride);
int32_t height = std::min(buf->height, (srcRect.bottom - srcRect.top));
int32_t width = std::min(buf->width, (srcRect.right - srcRect.left));
uint32_t *out = static_cast<uint32_t *>(buf->bits);
for (int32_t y = 0; y < height; y++) {
const uint8_t *pY = yPixel + yStride * (y + srcRect.top) + srcRect.left;
int32_t uv_row_start = uvStride * ((y + srcRect.top) >> 1);
const uint8_t *pU = uPixel + uv_row_start + (srcRect.left >> 1);
const uint8_t *pV = vPixel + uv_row_start + (srcRect.left >> 1);
for (int32_t x = 0; x < width; x++) {
const int32_t uv_offset = (x >> 1) * uvPixelStride;
out[x] = YUV2RGB(pY[x], pU[uv_offset], pV[uv_offset]);
}
out += buf->stride;
}
}
/*
* PresentImage90()
* Converting YUV to RGB
* Rotation image anti-clockwise 90 degree -- (x, y) --> (-y, x)
*/
void CPhoneDevice2::PresentImage90(ANativeWindow_Buffer *buf, AImage *image) {
AImageCropRect srcRect;
AImage_getCropRect(image, &srcRect);
AImage_getPlaneRowStride(image, 0, &yStride);
AImage_getPlaneRowStride(image, 1, &uvStride);
yPixel = imageBuffer_;
AImage_getPlaneData(image, 0, &yPixel, &yLen);
vPixel = imageBuffer_ + yLen;
AImage_getPlaneData(image, 1, &vPixel, &vLen);
uPixel = imageBuffer_ + yLen + vLen;
AImage_getPlaneData(image, 2, &uPixel, &uLen);
AImage_getPlanePixelStride(image, 1, &uvPixelStride);
int32_t height = std::min(buf->width, (srcRect.bottom - srcRect.top));
int32_t width = std::min(buf->height, (srcRect.right - srcRect.left));
uint32_t *out = static_cast<uint32_t *>(buf->bits);
out += height - 1;
for (int32_t y = 0; y < height; y++) {
const uint8_t *pY = yPixel + yStride * (y + srcRect.top) + srcRect.left;
int32_t uv_row_start = uvStride * ((y + srcRect.top) >> 1);
const uint8_t *pU = uPixel + uv_row_start + (srcRect.left >> 1);
const uint8_t *pV = vPixel + uv_row_start + (srcRect.left >> 1);
for (int32_t x = 0; x < width; x++) {
const int32_t uv_offset = (x >> 1) * uvPixelStride;
// [x, y]--> [-y, x]
int testb = pU[uv_offset];
int testc = pV[uv_offset];
int testA = pY[x];
out[x * buf->stride] = YUV2RGB(testA, testb, testc);
}
out -= 1; // move to the next column
}
}
/*
* PresentImage180()
* Converting yuv to RGB
* Rotate image 180 degree: (x, y) --> (-x, -y)
*/
void CPhoneDevice2::PresentImage180(ANativeWindow_Buffer *buf, AImage *image) {
AImageCropRect srcRect;
AImage_getCropRect(image, &srcRect);
AImage_getPlaneRowStride(image, 0, &yStride);
AImage_getPlaneRowStride(image, 1, &uvStride);
yPixel = imageBuffer_;
AImage_getPlaneData(image, 0, &yPixel, &yLen);
vPixel = imageBuffer_ + yLen;
AImage_getPlaneData(image, 1, &vPixel, &vLen);
uPixel = imageBuffer_ + yLen + vLen;
AImage_getPlaneData(image, 2, &uPixel, &uLen);
AImage_getPlanePixelStride(image, 1, &uvPixelStride);
int32_t height = std::min(buf->height, (srcRect.bottom - srcRect.top));
int32_t width = std::min(buf->width, (srcRect.right - srcRect.left));
uint32_t *out = static_cast<uint32_t *>(buf->bits);
out += (height - 1) * buf->stride;
for (int32_t y = 0; y < height; y++) {
const uint8_t *pY = yPixel + yStride * (y + srcRect.top) + srcRect.left;
int32_t uv_row_start = uvStride * ((y + srcRect.top) >> 1);
const uint8_t *pU = uPixel + uv_row_start + (srcRect.left >> 1);
const uint8_t *pV = vPixel + uv_row_start + (srcRect.left >> 1);
for (int32_t x = 0; x < width; x++) {
const int32_t uv_offset = (x >> 1) * uvPixelStride;
// mirror image since we are using front camera
out[width - 1 - x] = YUV2RGB(pY[x], pU[uv_offset], pV[uv_offset]);
// out[x] = YUV2RGB(pY[x], pU[uv_offset], pV[uv_offset]);
}
out -= buf->stride;
}
}
/*
* PresentImage270()
* Converting image from YUV to RGB
* Rotate Image counter-clockwise 270 degree: (x, y) --> (y, x)
*/
void CPhoneDevice2::PresentImage270(ANativeWindow_Buffer *buf, AImage *image) {
AImageCropRect srcRect;
AImage_getCropRect(image, &srcRect);
AImage_getPlaneRowStride(image, 0, &yStride);
AImage_getPlaneRowStride(image, 1, &uvStride);
yPixel = imageBuffer_;
AImage_getPlaneData(image, 0, &yPixel, &yLen);
vPixel = imageBuffer_ + yLen;
AImage_getPlaneData(image, 1, &vPixel, &vLen);
uPixel = imageBuffer_ + yLen + vLen;
AImage_getPlaneData(image, 2, &uPixel, &uLen);
AImage_getPlanePixelStride(image, 1, &uvPixelStride);
int32_t height = std::min(buf->width, (srcRect.bottom - srcRect.top));
int32_t width = std::min(buf->height, (srcRect.right - srcRect.left));
uint32_t *out = static_cast<uint32_t *>(buf->bits);
for (int32_t y = 0; y < height; y++) {
const uint8_t *pY = yPixel + yStride * (y + srcRect.top) + srcRect.left;
int32_t uv_row_start = uvStride * ((y + srcRect.top) >> 1);
const uint8_t *pU = uPixel + uv_row_start + (srcRect.left >> 1);
const uint8_t *pV = vPixel + uv_row_start + (srcRect.left >> 1);
for (int32_t x = 0; x < width; x++) {
const int32_t uv_offset = (x >> 1) * uvPixelStride;
int testb = pU[uv_offset];
int testc = pV[uv_offset];
int testA = pY[x];
out[(width - 1 - x) * buf->stride] =
YUV2RGB(testA, testb, testc);
}
out += 1; // move to the next column
}
}
/*
bool CPhoneDevice2::SendBroadcastMessage(String16 action, int value)
{
TM_INFO_LOG("sendBroadcastMessage(): Action: %s, Value: %d ", action.string(), value);
sp <IServiceManager> sm = defaultServiceManager();
sp <IBinder> am = sm->getService(String16("activity"));
if (am != NULL) {
Parcel data, reply;
data.writeInterfaceToken(String16("android.app.IActivityManager"));
data.writeStrongBinder(NULL);
// intent begin
data.writeString16(action); // action
data.writeInt32(0); // URI data type
data.writeString16(NULL, 0); // type
data.writeInt32(0); // flags
data.writeString16(NULL, 0); // package name
data.writeString16(NULL, 0); // component name
data.writeInt32(0); // source bound - size
data.writeInt32(0); // categories - size
data.writeInt32(0); // selector - size
data.writeInt32(0); // clipData - size
data.writeInt32(-2); // contentUserHint: -2 -> UserHandle.USER_CURRENT
data.writeInt32(-1); // bundle extras length
data.writeInt32(0x4C444E42); // 'B' 'N' 'D' 'L'
int oldPos = data.dataPosition();
data.writeInt32(1); // size
// data.writeInt32(0); // VAL_STRING, need to remove because of analyze common intent
data.writeString16(String16("type"));
data.writeInt32(1); // VAL_INTEGER
data.writeInt32(value);
int newPos = data.dataPosition();
data.setDataPosition(oldPos - 8);
data.writeInt32(newPos - oldPos); // refill bundle extras length
data.setDataPosition(newPos);
// intent end
data.writeString16(NULL, 0); // resolvedType
data.writeStrongBinder(NULL); // resultTo
data.writeInt32(0); // resultCode
data.writeString16(NULL, 0); // resultData
data.writeInt32(-1); // resultExtras
data.writeString16(NULL, 0); // permission
data.writeInt32(0); // appOp
data.writeInt32(-1); // option
data.writeInt32(1); // serialized: != 0 -> ordered
data.writeInt32(0); // sticky
data.writeInt32(-2); // userId: -2 -> UserHandle.USER_CURRENT
status_t ret = am->transact(IBinder::FIRST_CALL_TRANSACTION + 13, data,
&reply); // BROADCAST_INTENT_TRANSACTION
if (ret == NO_ERROR) {
int exceptionCode = reply.readExceptionCode();
if (exceptionCode) {
TM_INFO_LOG("sendBroadcastMessage(%s) caught exception %d\n",
action.string(), exceptionCode);
return false;
}
} else {
return false;
}
} else {
TM_INFO_LOG("getService() couldn't find activity service!\n");
return false;
}
return true;
}
*/
void CPhoneDevice2::camera_device_on_disconnected(void *context, ACameraDevice *device)
{
ALOGI("Camera(id: %s) is diconnected.\n", ACameraDevice_getId(device));
CPhoneDevice2* pThis = (CPhoneDevice2*)context;
// delete pThis;
}
void CPhoneDevice2::camera_device_on_error(void *context, ACameraDevice *device, int error)
{
ALOGI("Error(code: %d) on Camera(id: %s).\n", error, ACameraDevice_getId(device));
}
void CPhoneDevice2::capture_session_on_ready(void *context, ACameraCaptureSession *session)
{
ALOGI("Session is ready. %p\n", session);
}
void CPhoneDevice2::capture_session_on_active(void *context, ACameraCaptureSession *session)
{
ALOGI("Session is activated. %p\n", session);
}
void CPhoneDevice2::capture_session_on_closed(void *context, ACameraCaptureSession *session)
{
ALOGI("Session is closed. %p\n", session);
}

@ -0,0 +1,124 @@
#ifndef __PHONE_DEVICE2_H__
#define __PHONE_DEVICE2_H__
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <map>
#include <atomic>
#include <camera/NdkCameraManager.h>
#include <camera/NdkCameraError.h>
#include <camera/NdkCameraDevice.h>
#include <camera/NdkCameraMetadataTags.h>
#include <media/NdkImageReader.h>
#include <Client/Device.h>
#include <string>
#include "camera2/Camera2Helper.h"
class CPhoneDevice2 : public IDevice
{
public:
CPhoneDevice2(JavaVM* vm, jobject service);
virtual ~CPhoneDevice2();
virtual void SetListener(IListener* listener);
virtual bool UpdateTime(time_t ts);
virtual bool Reboot();
virtual timer_uid_t RegisterHeartbeat(unsigned int timerType, unsigned int timeout);
virtual bool TakePhoto(const IDevice::PHOTO_INFO& photoInfo, const vector<OSD_INFO>& osds, const string& path);
virtual timer_uid_t RegisterTimer(unsigned int timerType, unsigned int timeout);
virtual bool UnregisterTimer(timer_uid_t uid);
virtual bool FireTimer(timer_uid_t uid);
protected:
ACameraCaptureSession_stateCallbacks *GetSessionListener();
std::string GetFileName() const;
bool SendBroadcastMessage(std::string action, int value);
bool MatchCaptureSizeRequest(ACameraManager *cameraManager, const char *selectedCameraId, unsigned int width, unsigned int height, uint32_t cameraOrientation_,
ImageFormat* resCap);
bool DisplayImage(ANativeWindow_Buffer* buf, AImage* image);
void PresentImage(ANativeWindow_Buffer* buf, AImage* image);
void PresentImage90(ANativeWindow_Buffer* buf, AImage* image);
void PresentImage180(ANativeWindow_Buffer* buf, AImage* image);
void PresentImage270(ANativeWindow_Buffer* buf, AImage* image);
static void camera_device_on_disconnected(void *context, ACameraDevice *device);
static void camera_device_on_error(void *context, ACameraDevice *device, int error);
static void capture_session_on_ready(void *context, ACameraCaptureSession *session);
static void capture_session_on_active(void *context, ACameraCaptureSession *session);
static void capture_session_on_closed(void *context, ACameraCaptureSession *session);
void ImageCallback(AImageReader *reader);
static void OnImageCallback(void *ctx, AImageReader *reader);
bool WriteFile(AImage *image, const string& path);
static bool WriteFile(CPhoneDevice2* pThis, AImage *image);
inline bool TakePhotoCb(bool res, const IDevice::PHOTO_INFO& photoInfo, const string& path, time_t photoTime)
{
if (m_listener != NULL)
{
std::vector<IDevice::RECOG_OBJECT> objects;
return m_listener->OnPhotoTaken(res, photoInfo, path, photoTime, objects);
}
return false;
}
protected:
JavaVM* m_vm;
jobject m_javaService;
jmethodID mRegisterTimerMid;
jmethodID mRegisterHeartbeatMid;
jmethodID mUnregisterTimerMid;
jmethodID mUpdateTimeMid;
std::string mPath;
IDevice::PHOTO_INFO mPhotoInfo;
IListener* m_listener;
atomic_ulong m_timerUidFeed;
std::map<IDevice::timer_uid_t, unsigned long> mTimers;
AImageReader *mAImageReader;
ANativeWindow *theNativeWindow;
ACameraDevice *cameraDevice;
ACaptureRequest *captureRequest;
ACameraOutputTarget *cameraOutputTarget;
ACaptureSessionOutput *sessionOutput;
ACaptureSessionOutputContainer *captureSessionOutputContainer;
ACameraCaptureSession *captureSession;
ACameraDevice_StateCallbacks deviceStateCallbacks;
ACameraCaptureSession_stateCallbacks captureSessionStateCallbacks;
DisplayDimension mDisplayDimension;
int32_t presentRotation_;
int32_t imageHeight_;
int32_t imageWidth_;
uint8_t* imageBuffer_;
int32_t yStride, uvStride;
uint8_t *yPixel, *uPixel, *vPixel;
int32_t yLen, uLen, vLen;
int32_t uvPixelStride;
};
#endif // __PHONE_DEVICE2_H__

@ -1,57 +0,0 @@
#ifndef __POSITION_HELPER_H__
#define __POSITION_HELPER_H__
#include <cmath>
#define _USE_MATH_DEFINES
inline double transformLat(double x, double y)
{
double ret = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * std::sqrt(std::abs(x));
ret += (20.0 * std::sin(6.0 * x * M_PI) + 20.0 * std::sin(2.0 * x * M_PI)) * 2.0 / 3.0;
ret += (20.0 * std::sin(y * M_PI) + 40.0 * std::sin(y / 3.0 * M_PI)) * 2.0 / 3.0;
ret += (160.0 * std::sin(y / 12.0 * M_PI) + 320 * std::sin(y * M_PI / 30.0)) * 2.0 / 3.0;
return ret;
}
inline double transformLng(double x, double y)
{
double ret = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * std::sqrt(std::abs(x));
ret += (20.0 * std::sin(6.0 * x * M_PI) + 20.0 * std::sin(2.0 * x * M_PI)) * 2.0 / 3.0;
ret += (20.0 * std::sin(x * M_PI) + 40.0 * std::sin(x / 3.0 * M_PI)) * 2.0 / 3.0;
ret += (150.0 * std::sin(x / 12.0 * M_PI) + 300.0 * std::sin(x / 30.0 * M_PI)) * 2.0 / 3.0;
return ret;
}
inline void transformPosition(double& lat, double& lng)
{
// 卫星椭球坐标投影到平面地图坐标系的投影因子
#define AXIS 6378245.0
// 椭球的偏心率(a^2 - b^2) / a^2
#define OFFSET 0.00669342162296594323
double dLat = transformLat(lng - 105.0, lat - 35.0);
double dLon = transformLng(lng - 105.0, lat - 35.0);
double radLat = lat / 180.0 * M_PI;
double magic = std::sin(radLat);
magic = 1 - OFFSET * magic * magic;
double sqrtMagic = std::sqrt(magic);
dLat = (dLat * 180.0) / ((AXIS * (1 - OFFSET)) / (magic * sqrtMagic) * M_PI);
dLon = (dLon * 180.0) / (AXIS / sqrtMagic * std::cos(radLat) * M_PI);
lat += dLat;
lng += dLon;
}
inline bool shouldConvertPosition(double lat, double lon)
{
if (lon < 72.004 || lon > 137.8347)
{
return false;
}
if (lat < 0.8293 || lat > 55.8271)
{
return false;
}
return true;
}
#endif // __POSITION_HELPER_H__

@ -1,462 +0,0 @@
//
// Created by Matthew on 2025/3/5.
//
#include "PtzController.h"
#include "SensorsProtocol.h"
#include "GPIOControl.h"
#include "PhoneDevice.h"
#include "time.h"
#include <memory>
PtzController::PtzController(CPhoneDevice* pPhoneDevice) : m_pPhoneDevice(pPhoneDevice)
{
m_exit = false;
}
void PtzController::Startup()
{
m_thread = std::thread(PtzThreadProc, this);
}
void PtzController::PtzThreadProc(PtzController* pThis)
{
pThis->PtzProc();
}
void PtzController::AddCommand(uint8_t channel, int cmdidx, uint8_t bImageSize, uint8_t preset, const char *serfile, uint32_t baud, int addr)
{
SERIAL_CMD cmd = { 0 };
cmd.channel = channel;
cmd.preset = preset;
cmd.cmdidx = cmdidx;
cmd.bImageSize = bImageSize;
strcpy(cmd.serfile, serfile);
cmd.baud = baud;
cmd.addr = addr;
cmd.ts = time(NULL);
m_locker.lock();
m_cmds.push_back(cmd);
m_locker.unlock();
m_sem.release();
}
void PtzController::AddPhotoCommand(IDevice::PHOTO_INFO& photoInfo, const std::string& path, const std::vector<IDevice::OSD_INFO>& osds)
{
IDevice::SerialsPhotoParam param = { "", 0, 0 };
m_pPhoneDevice->GetPhotoSerialsParamCb(param);
SERIAL_CMD cmdPreset = { 0 };
time_t ts = time(NULL);
#if 1
// if (photoInfo.preset != 0 && photoInfo.preset != 0xFF)
{
cmdPreset.ts = photoInfo.selfTestingTime;
cmdPreset.delayTime = photoInfo.closeDelayTime;
cmdPreset.channel = photoInfo.channel;
cmdPreset.channel = photoInfo.preset;
cmdPreset.cmdidx = PHOTO_OPEN_POWER;
strcpy(cmdPreset.serfile, param.serfile);
cmdPreset.baud = param.baud;
cmdPreset.addr = param.addr;
}
#endif
SERIAL_CMD cmd = { 0 };
cmd.ts = ts;
cmd.delayTime = photoInfo.closeDelayTime;
cmd.channel = photoInfo.channel;
cmd.preset = photoInfo.preset;
cmd.cmdidx = Take_Photo;
cmd.bImageSize = photoInfo.resolution;
strcpy(cmd.serfile, param.serfile);
cmd.baud = param.baud;
cmd.addr = param.addr;
PtzPhotoParams* ppp = new PtzPhotoParams(photoInfo, path, osds);
cmd.photoParams.reset(ppp);
// cmd.delayTime;
// uint8_t bImageSize;
// char serfile[128];
// uint32_t baud;
// int addr;
m_locker.lock();
#if 1
if (cmdPreset.cmdidx != 0)
{
m_cmds.push_back(cmdPreset);
}
#endif
m_cmds.push_back(cmd);
m_locker.unlock();
m_sem.release();
m_sem.release();
}
void PtzController::ExitAndWait()
{
m_exit = true;
m_sem.release();
if (m_thread.joinable())
{
m_thread.join();
}
}
void PtzController::PtzProc()
{
PROC_PTZ_STATE state = PTZS_POWER_OFF;
SERIAL_CMD cmd;
PTZ_STATE ptz_state;
bool hasCmd = false;
int i=0;
int closecmd=0;
std::shared_ptr<PowerControl> powerCtrl;
time_t selfTestingStartTime = 0;
time_t selfTestingWaitTime = 0;
time_t PTZ_preset_start_time = 0;
time_t PTZ_preset_wait_time = 0;
time_t close_delay_time = CAMERA_CLOSE_DELAYTIME;
time_t start_delay_time = 0;
time_t auto_delay_time = 0;
time_t auto_wait_time = WAIT_TIME_AUTO_CLOSE;
time_t photo_move_preset_time = 0;
int iwaitime = 0;
while(true)
{
m_sem.acquire();
if (m_exit)
{
break;
}
hasCmd = false;
m_locker.lock();
for (auto it = m_cmds.begin(); it != m_cmds.end(); ++it)
{
if ((state == PTZS_SELF_TESTING) || (PTZS_PHOTO_SELF_TESTING == state))
{
// find first non-taking-photo cmd
if (it->cmdidx != Take_Photo)
{
cmd = *it;
m_cmds.erase(it);
hasCmd = true;
break;
}
}
else
{
cmd = *it;
m_cmds.erase(it);
hasCmd = true;
break;
}
}
m_locker.unlock();
if (!hasCmd)
{
if ((state == PTZS_SELF_TESTING) || (PTZS_PHOTO_SELF_TESTING == state))
{
time_t timeout = time(NULL) - selfTestingStartTime;
if(timeout < 0)
selfTestingStartTime = time(NULL);
if (timeout >= selfTestingWaitTime)
{
XYLOG(XYLOG_SEVERITY_INFO, "超时(%u秒)未收到云台自检结束应答,状态改为空闲!", (uint32_t)timeout);
state = PTZS_IDLE;
m_sem.release();
continue;
}
else
{
//if(timeout >= CAMERA_SELF_TEST_TIME)
{
#ifndef NDEBUG
if (timeout == 1 || ((timeout % 10) == 0))
#endif
{
XYLOG(XYLOG_SEVERITY_INFO, "开始查询云台自检状态timeout=%u秒", (uint32_t)timeout);
}
if(0 == QueryPtzState(&ptz_state, QUERY_PTZ_STATE, cmd.serfile, cmd.baud, cmd.addr))
{
if(0 == ptz_state.ptz_status)
{
XYLOG(XYLOG_SEVERITY_INFO, "收到云台自检结束应答状态改为空闲timeout=%u秒", (uint32_t)timeout);
state = PTZS_IDLE;
m_sem.release();
continue;
}
}
}
}
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
m_sem.release();
continue;
}
if(0 == start_delay_time)
{
if(0 == iwaitime)
{
auto_delay_time = time(NULL);
iwaitime += 1;
m_sem.release();
continue;
}
else
{
if(time(NULL) - auto_delay_time < 0)
{
auto_delay_time = time(NULL);
}
if(time(NULL) - auto_delay_time >= auto_wait_time)
{
iwaitime = 0;
XYLOG(XYLOG_SEVERITY_INFO, "摄像机自动上电延时时间超过%u秒准备关闭摄像机", (uint32_t)auto_wait_time);
}
else
{
m_sem.release();
continue;
}
}
}
else
{
if(time(NULL) - start_delay_time < 0)
{/* 防止等待关机期间,其他线程发生对时,改变了系统时间,导致长时间不会关摄像机电源*/
start_delay_time = time(NULL);
}
if(time(NULL) - start_delay_time >= close_delay_time)
{
XYLOG(XYLOG_SEVERITY_INFO, "摄像机空闲时间超过%u秒准备关闭摄像机", (uint32_t)close_delay_time);
}
else
{
m_sem.release();
continue;
}
}
if (state == PTZS_POWER_OFF)
{
closecmd = 0;
XYLOG(XYLOG_SEVERITY_INFO, "自动关机触发,摄像机本来就处于关机状态!");
// Do Nothing
}
else
{
XYLOG(XYLOG_SEVERITY_INFO, "自动关机触发通知云台准备关机state=%d", state);
for(i=0; i<3; i++)
{
if(0 == QueryPtzState(&ptz_state, NOTIFY_PTZ_CLOSE, cmd.serfile, cmd.baud, cmd.addr))
break;
}
powerCtrl.reset();
closecmd = 0;
state = PTZS_POWER_OFF;
XYLOG(XYLOG_SEVERITY_INFO, "自动触发关闭云台电源state=%d", state);
}
start_delay_time = 0;
continue;
}
switch (cmd.cmdidx)
{
case Take_Photo:
{
if (state == PTZS_POWER_OFF)
{
if (!powerCtrl)
{
//powerCtrl = std::make_shared<PlzCameraPowerCtrl>(cmd.photoParams->mPhotoInfo.closeDelayTime);
powerCtrl = std::make_shared<PlzCameraPowerCtrl>(0);
selfTestingStartTime = time(NULL);
selfTestingWaitTime = cmd.photoParams->mPhotoInfo.selfTestingTime;
state = PTZS_PHOTO_SELF_TESTING;
XYLOG(XYLOG_SEVERITY_INFO, "1、收到拍照指令摄像机从关机状态改为自检状态");
m_locker.lock();
m_cmds.insert(m_cmds.begin(), cmd);
m_locker.unlock();
m_sem.release();
continue;
}
}
if(cmd.photoParams->mPhotoInfo.scheduleTime == 0)
{
if(1 == closecmd)
{
XYLOG(XYLOG_SEVERITY_INFO, "3、收到手动拍照指令但同时后续收到关机指令等待拍完照片再关机。state=%d", state);
}
else
{
start_delay_time = time(NULL);
XYLOG(XYLOG_SEVERITY_INFO, "3、收到手动拍照指令state=%d", state);
}
}
else
XYLOG(XYLOG_SEVERITY_INFO, "2、收到自动拍照指令state=%d", state);
state = PTZS_TAKING_PHOTO;
if (cmd.preset != 0 && cmd.preset != 0xFF)
{
CameraPhotoCmd(0, cmd.channel, MOVE_PRESETNO, 0, cmd.preset, cmd.serfile, cmd.baud, cmd.addr);
#if 0
if(START_ONCE_SELF == cmd.preset)
{
selfTestingStartTime = time(NULL);
selfTestingWaitTime = CAMERA_SELF_TEST_TIME;
state = PTZS_SELF_TESTING;
m_sem.release();
XYLOG(XYLOG_SEVERITY_INFO, "拍照调用200号预置点指令摄像机启动一次性自检从拍照状态改为自检状态取消拍照动作设置的自检等待时间%u秒", (uint32_t)selfTestingWaitTime);
break;
}
#endif
PTZ_preset_start_time = time(NULL);
if(START_ONCE_SELF == cmd.preset)
PTZ_preset_wait_time = CAMERA_SELF_TEST_TIME;
else
PTZ_preset_wait_time = MOVE_PRESET_WAIT_TIME;
XYLOG(XYLOG_SEVERITY_INFO, "摄像机拍照前开始调用预置点%ustate=%d", (uint32_t)cmd.preset, state);
for(;;)
{
if(0 == QueryPtzState(&ptz_state, QUERY_PTZ_STATE, cmd.serfile, cmd.baud, cmd.addr))
{
if(0 == ptz_state.ptz_status)
{
XYLOG(XYLOG_SEVERITY_INFO, "摄像机拍照前调用预置点%u收到移动结束应答移动时长=%d秒 state=%d", (uint32_t)cmd.preset, (uint32_t)(time(NULL)-PTZ_preset_start_time), state);
break;
}
}
if(time(NULL) - PTZ_preset_start_time < 0)
{/* 防止等待关机期间,其他线程发生对时,改变了系统时间,导致长时间等待摄像机到达预置点*/
PTZ_preset_start_time = time(NULL);
}
if(time(NULL) - PTZ_preset_start_time >= PTZ_preset_wait_time)
{
XYLOG(XYLOG_SEVERITY_INFO, "摄像机拍照前调用预置点%u摄像机在%u秒内未收到调用预置点结束应答state=%d", (uint32_t)cmd.preset, (uint32_t)PTZ_preset_wait_time, state);
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
photo_move_preset_time = time(NULL);
}
}
if(cmd.photoParams->mPhotoInfo.mediaType == 1)
m_pPhoneDevice->TakeVideoWithNetCamera(cmd.photoParams->mPhotoInfo, cmd.photoParams->mPath, cmd.photoParams->mOsds, powerCtrl);
else if ((cmd.photoParams->mPhotoInfo.mediaType == XY_MEDIA_TYPE_STREAM || cmd.photoParams->mPhotoInfo.mediaType == XY_MEDIA_TYPE_STREAM_OFF))
{
m_pPhoneDevice->StartPushStreaming(cmd.photoParams->mPhotoInfo, cmd.photoParams->mPath, cmd.photoParams->mOsds, powerCtrl);
}
else
m_pPhoneDevice->TakePhotoWithNetCamera(cmd.photoParams->mPhotoInfo, cmd.photoParams->mPath, cmd.photoParams->mOsds, powerCtrl);
state = PTZS_IDLE;
}
break;
case PHOTO_OPEN_POWER:
if (state == PTZS_POWER_OFF)
{
if (!powerCtrl)
{
powerCtrl = std::make_shared<PlzCameraPowerCtrl>(0);
selfTestingStartTime = time(NULL);
selfTestingWaitTime = CAMERA_SELF_TEST_TIME;
state = PTZS_PHOTO_SELF_TESTING;
m_sem.release();
XYLOG(XYLOG_SEVERITY_INFO, "收到拍照指令开机,摄像机从关机状态改为自检状态!设置的自检等待时间%u秒", (uint32_t)selfTestingWaitTime);
}
}
else
{
XYLOG(XYLOG_SEVERITY_INFO, "收到拍照指令开机摄像机处于state=%d", state);
}
break;
case OPEN_TOTAL:
if (state == PTZS_POWER_OFF)
{
if (!powerCtrl)
{
powerCtrl = std::make_shared<PlzCameraPowerCtrl>(0);
selfTestingStartTime = time(NULL);
selfTestingWaitTime = CAMERA_SELF_TEST_TIME;
state = PTZS_SELF_TESTING;
m_sem.release();
XYLOG(XYLOG_SEVERITY_INFO, "收到手动开机指令,摄像机从关机状态改为自检状态!设置的自检等待时间%u秒", (uint32_t)selfTestingWaitTime);
}
}
else
{
XYLOG(XYLOG_SEVERITY_INFO, "收到手动开机指令摄像机处于state=%d", state);
}
closecmd = 0;
start_delay_time = time(NULL);
XYLOG(XYLOG_SEVERITY_INFO, "收到手动打开摄像机指令刷新关机计时初始值state=%d", state);
break;
case CLOSE_TOTAL:
if (state == PTZS_POWER_OFF)
{
closecmd = 0;
XYLOG(XYLOG_SEVERITY_INFO, "收到关机指令,摄像机本来就处于关机状态!");
// Do Nothing
}
else if(PTZS_PHOTO_SELF_TESTING == state)
{
closecmd = 1;
XYLOG(XYLOG_SEVERITY_INFO, "在拍照自检过程中收到关机指令取消延时关机转到自动关机处理state=%d", state);
}
else
{
XYLOG(XYLOG_SEVERITY_INFO, "收到关机指令通知云台准备关机state=%d", state);
for(i=0; i<3; i++)
{
if(0 == QueryPtzState(&ptz_state, NOTIFY_PTZ_CLOSE, cmd.serfile, cmd.baud, cmd.addr))
break;
}
closecmd = 0;
powerCtrl.reset();
state = PTZS_POWER_OFF;
XYLOG(XYLOG_SEVERITY_INFO, "关闭云台电源state=%d", state);
}
start_delay_time = 0;
break;
default:
{
if (state == PTZS_POWER_OFF)
{
XYLOG(XYLOG_SEVERITY_INFO, "收到手动控制摄像机指令,摄像机处于关机状态,无法执行!");
CameraPhotoCmd(cmd.ts, cmd.channel, cmd.cmdidx, 0, cmd.preset, cmd.serfile, cmd.baud, cmd.addr);
break;
}
start_delay_time = time(NULL);
XYLOG(XYLOG_SEVERITY_INFO, "收到手动控制摄像机指令刷新关机计时初始值state=%d", state);
if(cmd.ts <= photo_move_preset_time)
{
XYLOG(XYLOG_SEVERITY_INFO, "丢弃拍照调预置点期间收到的控制云台指令,指令时间" FMT_TIME_T ",拍照时间" FMT_TIME_T "state=%d", cmd.ts, photo_move_preset_time, state);
}
else
{
if((MOVE_PRESETNO == cmd.cmdidx) && (START_ONCE_SELF == cmd.preset))
{
selfTestingStartTime = time(NULL);
selfTestingWaitTime = CAMERA_SELF_TEST_TIME;
state = PTZS_SELF_TESTING;
m_sem.release();
XYLOG(XYLOG_SEVERITY_INFO, "收到调用200号预置点指令摄像机启动一次性自检从当前状态改为自检状态设置的自检等待时间%u秒", (uint32_t)selfTestingWaitTime);
}
CameraPhotoCmd(cmd.ts, cmd.channel, cmd.cmdidx, 0, cmd.preset, cmd.serfile, cmd.baud, cmd.addr);
}
}
break;
}
}
}

@ -1,100 +0,0 @@
//
// Created by Matthew on 2025/3/5.
//
#ifndef MICROPHOTO_PTZCONTROLLER_H
#define MICROPHOTO_PTZCONTROLLER_H
#include <Buffer.h>
#include <thread>
#include <vector>
#include <memory>
#include <string>
#include <mutex>
#include <SemaphoreEx.h>
#include <Client/Device.h>
enum PROC_PTZ_STATE
{
PTZS_POWER_OFF = 0,
PTZS_IDLE = 1,
PTZS_SELF_TESTING = 2,
PTZS_MOVING = 3,
PTZS_TAKING_PHOTO = 4,
PTZS_PHOTO_SELF_TESTING = 5,
};
#define CAMERA_SELF_TEST_TIME 150 /* Camera self-test time (excluding PTZ self-test)*/
#define MOVE_PRESET_WAIT_TIME 20 /* Waiting for the maximum time for the PTZ to move to the preset position*/
#define CAMERA_CLOSE_DELAYTIME 360 /* Auto Power-Off Timer Setting After Manual Power-On (for Camera)*/
#define PHOTO_OPEN_POWER 16000
#define WAIT_TIME_AUTO_CLOSE 2 /* In order to automatically capture multiple preset point images at the same time and prevent the camera from self checking every time it takes a picture.*/
class PtzPhotoParams
{
public:
PtzPhotoParams(const IDevice::PHOTO_INFO& photoInfo, const std::string& path, const std::vector<IDevice::OSD_INFO>& osds) :
mPhotoInfo(photoInfo), mPath(path), mOsds(osds)
{
}
~PtzPhotoParams()
{
}
IDevice::PHOTO_INFO mPhotoInfo;
std::string mPath;
std::vector<IDevice::OSD_INFO> mOsds;
};
struct SERIAL_CMD
{
uint8_t channel;
uint8_t preset;
time_t ts;
int cmdidx;
uint32_t delayTime;
uint8_t bImageSize;
char serfile[128];
uint32_t baud;
int addr;
std::shared_ptr<PtzPhotoParams> photoParams;
};
class CPhoneDevice;
class PtzController
{
public:
PtzController(CPhoneDevice* pPhoneDevice);
void Startup();
// ();
void AddCommand(uint8_t channel, int cmdidx, uint8_t bImageSize, uint8_t preset, const char *serfile, uint32_t baud, int addr);
void AddPhotoCommand(IDevice::PHOTO_INFO& photoInfo, const std::string& path, const std::vector<IDevice::OSD_INFO>& osds);
void ExitAndWait();
protected:
static void PtzThreadProc(PtzController* pThis);
void PtzProc();
protected:
protected:
std::mutex m_locker;
std::vector<SERIAL_CMD> m_cmds;
CSemaphore m_sem;
bool m_exit;
std::thread m_thread;
CPhoneDevice* m_pPhoneDevice;
};
#endif //MICROPHOTO_PTZCONTROLLER_H

File diff suppressed because it is too large Load Diff

@ -1,557 +0,0 @@
//
// Created by hyz on 2024/6/5.
//
#ifndef __SENSOR_PROTOCOL_H__
#define __SENSOR_PROTOCOL_H__
#include <string>
#ifndef LOBYTE
#define LOBYTE(w) ((unsigned char)(w))
#endif
#ifndef HIBYTE
#define HIBYTE(w) ((unsigned char)(((unsigned short)(w) >> 8) & 0xFF))
#endif
#ifndef LOWORD
#define LOWORD(l) ((uint16_t)(l))
#endif
#ifndef HIWORD
#define HIWORD(l) ((uint16_t)((uint32_t)(l) >> 16))
#endif
#define MAX_STRING_LEN 32
#define IOT_PARAM_WRITE 0xAE
#define IOT_PARAM_READ 0xAF
#define MAX_FIELDS_NUM 20 /* BD_NMEA0183单组字符串数据内含数据最大数量*/
#define MAX_SERIAL_DEV_NUM 25 /* 最大接串口传感器数量*/
#define MAX_SERIAL_PORT_NUM 5
#define MAX_DEV_VALUE_NUM 12 /* 一台装置最大的采样值数量*/
#define WEATHER_PROTOCOL 1 /* 温湿度协议序号*/
#define WIND_PROTOCOL 2 /* 风速风向协议序号*/
#define SLANT_PROTOCOL 3 /* 倾斜角协议序号*/
#define RALLY_PROTOCOL 4 /* 拉力协议序号*/
#define PELCO_P_PROTOCOL 5 /* 摄像机Pelco_P协议序号*/
#define PELCO_D_PROTOCOL 6 /* 摄像机Pelco_D协议序号*/
#define SERIALCAMERA_PROTOCOL 8 /* 串口摄像机协议序号*/
#define MUTIWEATHER_PROTOCOL 9 /*多合一气象*/
#define NMEA0183_PROTOCOL 10 /* 单一北斗NMEA0183标准协议*/
#define RESERVE2_PROTOCOL 17 /* 备用2协议序号*/
#define RESERVE4_PROTOCOL 19 /* 备用4协议序号*/
#define RESERVE5_PROTOCOL 20 /* 备用5协议序号*/
#define INVALID_PROTOCOL 21 /* 无效协议序号*/
#define AirTempNo 0 /* 空气温度数据存储序号*/
#define HumidityNo 1 /* 相对湿度数据存储序号*/
#define WindSpeedNo 2 /* 风速数据存储序号*/
#define WindDirectionNo 3 /* 风向数据存储序号*/
#define RainfallNo 4 /* 雨量数据存储序号*/
#define AtmosNo 5 /* 大气压数据存储序号*/
#define OpticalRadiationNo 6 /* 日照(光辐射)数据存储序号*/
#define SER_IDLE 0 /* 传感器处于空闲状态,未启动采样*/
#define SER_SAMPLE 1 /* 正在采样过程中*/
#define SAMPLINGSUCCESS 2 /* 采样结束,正常读取到数据*/
#define SER_STARTSAMPLE 3 /* 启动采样*/
#define SER_SAMPLEFAIL -1 /* 采样失败,未采集到数据,传感器故障或未接*/
#define PHOTO_SAVE_SUCC 5 /* 图片保存成功*/
#define WEATHER_DATA_NUM 8 /* 气象数据最大数量(一般最多是6要素)*/
#define RALLY_DATA_NUM 2 /* 拉力数据最大数量(一般是1个)*/
#define SLANTANGLE_DATA_NUM 3 /* 倾角数据最大数量(一般只有X轴和Y轴值)*/
#define PTZ_MOVETIME 1 // 云台移动等待时间为1秒
#define MAX_CHANNEL_NUM 2 /* 视频通道最大通道*/
#define MAX_PHOTO_FRAME_LEN 1024 /* 图片数据一包最大长度*/
#define MAX_PHOTO_PACKET_NUM 1024 /* 图片最大包数图片最大定为1MB*/
#define RECVDATA_MAXLENTH 2048 /* 接收数据缓冲区最大值*/
#define TIMER_CNT 50 // Poll命令定时器时间 5 ms
#define SENDDATA_MAXLENTH RECVDATA_MAXLENTH /* 正常发送数据缓冲区最大值*/
// 摄像机控制命令宏定义
#define Cmd_Cancel 0x00000000 // 关闭功能
#define SET_PRESETNO 0x00030000 // 设置预置点
#define MOVE_TO_PRESETNO 0x00070000 // 调用预置点
/* 摄像机PELCO-P控制命令宏定义*/
#define P_Auto_Scan 0x20000000 /* 自动扫描功能控制(1/0 打开/关闭该功能)*/
#define P_IRIS_CLOSE 0x08000000 /* 光圈缩小(1 有效)*/
#define P_IRIS_OPEN 0x04000000 /* 光圈放大(1 有效)*/
#define P_FOCUS_NEAR 0x02000000 /* 近距离聚焦(1 有效)*/
#define P_FOCUS_FAR 0x01000000 /* 远距离聚焦(1 有效)*/
#define P_ZOOM_WIDE 0x00400000 /* 远离物体(1 有效)*/
#define P_ZOOM_TELE 0x00200000 /* 接近物体(1 有效)*/
#define P_MOVE_DOWN 0x0010001f /* 向下移动镜头(1 有效)*/
#define P_MOVE_UP 0x0008001f /* 向上移动镜头(1 有效)*/
#define P_MOVE_LEFT 0x00041f00 /* 向左移动镜头(1 有效)*/
#define P_MOVE_RIGHT 0x00021f00 /* 向右移动镜头(1 有效)*/
// 摄像机PELCO-D控制命令宏定义
#define D_Auto_Scan 0x10000000 /* 自动扫描功能控制(1/0 打开/关闭该功能)*/
#define D_IRIS_CLOSE 0x04000000 /* 光圈缩小(1 有效)*/
#define D_IRIS_OPEN 0x02000000 /* 光圈放大(1 有效)*/
#define D_FOCUS_NEAR 0x01000000 /* 近距离聚焦(1 有效)*/
#define D_FOCUS_FAR 0x00800000 /* 远距离聚焦(1 有效)*/
#define D_ZOOM_WIDE 0x00400000 /* 远离物体(1 有效)*/
#define D_ZOOM_TELE 0x00200000 /* 接近物体(1 有效)*/
#define D_MOVE_DOWN 0x0010002d /* 向下移动镜头(1 有效)*/
#define D_MOVE_UP 0x0008002d /* 向上移动镜头(1 有效)*/
#define D_MOVE_LEFT 0x00042d00 /* 向左移动镜头(1 有效)*/
#define D_MOVE_RIGHT 0x00022d00 /* 向右移动镜头(1 有效)*/
#define D_OPEN_TOTAL 0x0009000B /* 打开总电源(1 有效)*/
#define D_OPEN_MODULE_POWER 0x0009000C /* 打开机芯电源(1 有效)*/
/* 摄像机下发命令宏定义*/
#define TAKE_PHOTO 20000 /* 拍照*/
#define SET_BAUD 10000 /* 设置球机波特率*/
#define STOP_CMD 10005 /* 取消或停止指令*/
#define AUTO_SCAN 10006 /* 自动扫描功能控制(1/0 打开/关闭该功能)*/
#define IRIS_CLOSE 10007 /* 光圈缩小(1 有效)*/
#define IRIS_OPEN 10008 /* 光圈放大(1 有效)*/
#define FOCUS_NEAR 10009 /* 近距离聚焦(1 有效)*/
#define FOCUS_FAR 10010 /* 远距离聚焦(1 有效)*/
#define ZOOM_WIDE 10011 /* 远离物体(1 有效)*/
#define ZOOM_TELE 10012 /* 接近物体(1 有效)*/
#define MOVE_DOWN 10013 /* 向下移动镜头(1 有效)*/
#define MOVE_UP 10014 /* 向上移动镜头(1 有效)*/
#define MOVE_LEFT 10015 /* 向左移动镜头(1 有效)*/
#define MOVE_RIGHT 10016 /* 向右移动镜头(1 有效)*/
#define MOVE_PRESETNO 10017 // 调用预置点
#define SAVE_PRESETNO 10018 // 设置预置点
#define OPEN_TOTAL 10019 /* 打开总电源(1 有效)*/
#define OPEN_MODULE_POWER 10020 /* 打开机芯电源(1 有效)*/
#define NOTIFY_PTZ_CLOSE 10021 // 通知云台关闭
#define QUERY_PTZ_STATE 10022 // 查询云台状态
#define CLOSE_TOTAL 10040 /* 关闭总电源*/
#define SPEED_DOME_CAMERA 0 /* 球机摄像机*/
#define SERIAL_CAMERA 2 /* 串口摄像机a*/
#define START_ONCE_SELF 200 /* 一次性自检需要的调用的预置点200*/
#define COLLECT_DATA 0 /* 调试使用*/
#define HexCharToInt( c ) (((c) >= '0') && ((c) <= '9') ? (c) - '0' : ((c) >= 'a') && ((c) <= 'f') ? (c) - 'a' + 10 :((c) >= 'A') && ((c) <= 'F') ? (c) - 'A' + 10 : 0 )
//SDS包类型结构
typedef struct
{
uint8_t PortIdx; // 信息类型
uint16_t MsgType; // 信息类型
int MsgLen; // 信息长度
uint8_t MsgData[RECVDATA_MAXLENTH];
} RTUMSG;
typedef struct
{
float fFactor; // 数据系数
float EuValueDelta; // 数据工程值偏移
} AI_PARAM;
typedef struct
{
AI_PARAM AiParam; // 数据点配置参数
int AiState; // 数据标识(-1采样失败0:没有采样1正在采样2采样结束3启动采样
float EuValue; // 数据工程值
} AI_DEF;
typedef struct
{
uint8_t AiState; // 数据标识(-1采样失败0:没有采样1正在采样2采样结束3启动采样
float EuValue; // 数据工程值
} Data_DEF;
typedef struct
{
int imagelen; // 整个图片大小
int phototime; // 拍照时间
uint8_t presetno; // 拍照预置点
char photoname[512]; // 图片存储名称和路径
int state;// 标识(-1拍照失败0:没有拍照1正在取图2拍照成功3启动拍照
} IMAGE_DEF;
typedef struct
{
int imagelen; // 整个图片大小
int imagenum; // 整个图片的总包数
int phototime; // 拍照时间
uint8_t presetno; // 拍照预置点
char photoname[512]; // 图片存储名称和路径
uint8_t buf[MAX_PHOTO_PACKET_NUM][MAX_PHOTO_FRAME_LEN]; // 图片数据缓存
int ilen[MAX_PHOTO_PACKET_NUM]; // 相对应的每包图片数据的长度
int state;// 标识(-1拍照失败0:没有拍照1正在取图2拍照成功3启动拍照
} PHOTO_DEF;
// 上层调用采集传感器参数
typedef struct SENSOR_PARAM
{
unsigned int baudrate; /* 波特率*/
int databit; /* 数据位*/
float stopbit; /* 停止位*/
char parity; /* 校验位*/
char pathname[64]; /* 串口文件名及路径*/
//int commNo; /* 约定的串口序号例如我们PC机上显示的COM1。。。*/
uint8_t SensorsType; /* 传感器类型索引,大于 0*/
int devaddr; /* 装置(传感器)使用的地址*/
uint8_t IsNoInsta; /* 装置没有安装或者已经坏了(1:正常, 0:无效,坏了或没有安装)*/
uint8_t CameraChannel; /* 像机的通道号*/
uint8_t Phase; /* 传感器所安装相别指拉力和倾角11表示A1....*/
float multiple; /*系数*/
float offset; /*偏移值*/
} SENSOR_PARAM;
// 需要配置的串口装置参数
typedef struct
{
unsigned int baudrate; /* 波特率*/
int databit; /* 数据位*/
int stopbit; /* 停止位*/
char parity; /* 校验位*/
char pathname[64]; /* 串口文件名及路径*/
int commid; /* 串口序号 注意从0开始*/
uint8_t ProtocolIdx; /* 规约索引,大于 0*/
int devaddr; /* 装置使用的地址*/
uint8_t IsNoInsta; /* 装置没有安装或者已经坏了(1:正常, 0:无效,坏了或没有安装)*/
uint8_t CameraChannel; /* 像机的通道号*/
uint8_t Phase; /* 传感器所安装相别指拉力和倾角11表示A1....*/
} SERIAL_PARAM;
// 云台状态数据
typedef struct
{
uint8_t ptz_process; /* 云台所处过程(1:自检状态;2:调用预置点;3:一般状态;)*/
uint8_t ptz_status; /* 云台当前状态值(0:停止;1:运动;2:机芯未上电;其他:其他错误*/
int presetno; /* 云台所处预置点值*/
float x_coordinate; /* 云台所处位置水平方向坐标*/
float y_coordinate; /* 云台所处位置垂直方向坐标*/
} PTZ_STATE;
/*
$--RMC IDRMC --
2 UTCtime hhmmss.ss UTC
3 status
V=
A=
4 lat ddmm.mmmmm 2
5 uLat N-S-
6 lon dddmm.mmmm
m
3
7 uLon E-W-西
8 spd
9 cog
10 date ddmmyy dd mm yy
11 mv
12 mvE E-W-西
13 mode [1]
14 navStatus V
NMEA 4.1
15 CS 16 $*$**/
// 北斗卫星数据
typedef struct
{
struct tm UTC_time; /* UTC时间*/
int ms_time; /* 毫秒*/
double lat; /* 纬度,原值(前 2 字符表示度,后面的字符表示分)转换后为° */
char uLat; /* 纬度方向N-北S-南*/
double lon; /* 经度,原值(前 3 字符表示度,后面的字符表示分)转换后为°*/
char uLon; /* 经度'E'-东,'W'-西*/
char status; /* 'A'=数据有效 其他字符表示数据无效*/
} BD_GNSS_DATA;
typedef struct
{
int m_iRevStatus; /* */
int m_iRecvLen; /* */
int m_iNeedRevLength; /* */
int iRecvTime; /* */
uint8_t m_au8RecvBuf[RECVDATA_MAXLENTH];/* */
int fd; /* 串口打开的文件句柄*/
uint8_t PollCmd[SENDDATA_MAXLENTH];
int cmdlen; // 发送缓冲区命令长度
//******************** Poll Cmd ****************************
uint8_t Retry; /* 重试命令次数 */
uint8_t RetryCnt; /* 重试命令计数*/
int64_t RetryTime; /* 重试命令时间 */
int64_t RetryTimeCnt; /* 重试命令时间计数*/
int64_t WaitTime; /* 命令间隔时间 */
int64_t WaitTimeCnt; /* 命令间隔时间计数*/
uint8_t ForceWaitFlag; /* 强制等待标志*/
uint16_t ForceWaitCnt; /* 强制等待计数*/
uint8_t ReSendCmdFlag; /* 重发命令标志 */
uint8_t SendCmdFlag; /* 命令发送标志 */
uint8_t RevCmdFlag; /* 命令正常接收标志*/
//**********************************************************
int64_t lsendtime; /* 命令发送绝对时间计时(毫秒)*/
int cameraaddr; /* 摄像机地址*/
int SerialCmdidx; /* 正在使用的串口发送命令的命令序号(-1:表示没有命令发送)
使*/
PHOTO_DEF image; /* 临时存储图片数据*/
int64_t FirstCmdTimeCnt; /* 串口读取数据起始时间*/
PTZ_STATE ptz_state;
int sendptzstatecmd; // 查询命令次数控制
BD_GNSS_DATA bd_data;
} SIO_PARAM_SERIAL_DEF;
typedef const struct
{
//char *account; // 命令说明
char *cmd_name; // 命令名称
int (*recv_process)(SIO_PARAM_SERIAL_DEF *); /* urc数据处理*/
}BD_NMEA0183_PROC_FUNC;
//串口相关装置所有参数集中定义
typedef struct
{
//******************** 端口基本信息 ************************
uint8_t IsNeedSerial; /* 是否需要使用串口通讯*/
int CmdWaitTime; /* 没有使用*/
uint8_t UseSerialidx; /* 使用的串口序号*/
int SerialCmdidx; /* 正在使用的串口发送命令的命令序号(-1:表示没有命令发送)
使*/
int enrecvtime; /* 发送加密命令后接收到应答计时*/
int64_t FirstCmdTimeCnt; /* 串口读取数据起始时间*/
uint8_t nextcmd; /* 第二次发送读取气象雨量命令 */
uint8_t SameTypeDevIdx; /* 相同类型装置顺序排列序号(从0开始)*/
uint8_t uOpenPowerFlag; /* 传感器上电标志(0:不需要打开; 1:需要打开)*/
int recvdatacnt; /* 接收到有效数据*/
PHOTO_DEF image; /* 临时存储图片数据*/
AI_DEF aiValue[MAX_DEV_VALUE_NUM]; /* 传感器采样值*/
} SERIAL_DEV_DEF;
//串口相关装置所有参数集中定义
typedef struct
{
uint8_t clcyesampling; /* 正在进行采样(0:没有进行采样;1:正在进行采样;)*/
uint8_t camerauseserial; /* 摄像机使用那个串口*/
uint32_t PtzCmdType; /* 云台指令类型*/
int usecameradevidx; /* 有像机指令需要执行*/
/* 执行指令的装置序号(-1:表示没有需要执行的指令;)*/
int SendStopPtzCmdTimeCnt; /* 发送云台停止指令*/
uint8_t serialstatus[MAX_SERIAL_PORT_NUM]; /* 串口是否可以使用状态分别对应串口1、2、3*/
SERIAL_DEV_DEF ms_dev[MAX_SERIAL_DEV_NUM]; /* 装置所接传感器数量*/
int UseingSerialdev[MAX_SERIAL_PORT_NUM]; /* 正在使用串口通讯的装置序号(-1,表示串口空闲)*/
int curdevidx[MAX_SERIAL_PORT_NUM]; /* 当前正在通讯的装置序号(-1表示没有装置需要通讯)*/
uint8_t IsReadWireTem; /* 是否在开始读取测温数据(0:表示没有;1:是)*/
//int proruntime; /* 程序运行时间*/
int IsSleep; /* 是否使程序休眠(1:不休眠;2:休眠)*/
int tempsamplingstartime; /* 测温启动距离采样启动时间间隔*/
int tempsamplingsucctime; /* 测温启动距离采样成功时间间隔*/
int samplingtimeSec; /* 高速采样数据秒级时间控制*/
int SectimesamplingCnt[3]; /* 高速采样数据秒级采样数*/
int SunshineSensorsFault; /* 控制日照传感器故障发送*/
int TempSensorsFault; /* 控制测温传感器故障发送*/
int FirstSensorsFault; /* 第一次检测传感器故障发送*/
int SensorsIsUse; /* 传感器是否启用与自检位置匹配*/
int sequsampling; /* 顺序采样控制序号-1:无采样;其他对应相应装置序号*/
int imagepacketnum; /* 串口摄像机拍照图片总包数*/
int historyimagenum[MAX_CHANNEL_NUM]; /* 球机保存的历史图片数量*/
#if 1
//int sendflag; /* 临时上送泄露电流值标志*/
int sendphototime; /* 临时上送图片数据统计*/
int sendphotocmdcnt; /* 一次拍照过程中发送拍照指令计数*/
int photographtime; /* 图片拍摄的时间*/
int iLastGetPhotoNo; /* 设置串口摄像机参数时暂存拍照命令序号*/
uint8_t bImageSize; /* 用于临时存储接收上层命令的图片大小*/
uint8_t presetno; /* 用于临时存储接收上层命令的预置点*/
char filedir[512]; /* 用于摄像机拍照之后暂时存放的路径*/
#endif
uint8_t errorPhotoNoCnt; /* 串口摄像机拍照时回应错误包号计数(如:召第6包回应第3包)*/
uint8_t RephotographCnt; /* 串口摄像机重拍计数(只在读照片数据应答出错时才重拍)*/
} SRDT_DEF;
static void PortDataProcess( void );
static int64_t get_msec();
int serial_port_comm();
static int weather_comm(SERIAL_PARAM weatherport);
static void setRS485Enable(bool z);
static void set485WriteMode();
static void set485ReadMode();
static void set12VEnable(bool z);
static void setCam3V3Enable(bool enabled);
// 串口相关的所有函数定义
/* 打开串口电源*/
void Gm_OpenSerialPower();
uint8_t getdevtype(int devno);
// 打开传感器电源
void Gm_OpenSensorsPower();
// 关闭传感器电源
void Gm_CloseSensorsPower(int port);
// 打开串口通讯
void Gm_OpenSerialPort(int devidx);
// 关闭串口通讯
void Gm_CloseSerialPort();
void DBG_LOG(int commid, char flag, const char* format, ...);
int SaveLogTofile(int commid, const char *szbuf);
// 功能说明:串口发送数据 返回实际发送的字节数
int GM_SerialComSend(const unsigned char * cSendBuf, size_t nSendLen, int commid);
void Gm_InitSerialComm(SENSOR_PARAM *sensorParam, const char *filedir,const char *log);
// 启动串口通讯
void GM_StartSerialComm();
// 启动使用串口拍照
int GM_StartSerialCameraPhoto(int phototime, unsigned char channel, int cmdidx, unsigned char bImageSize, unsigned char presetno, const char *serfile, unsigned int baud, int addr);
void delete_old_files(const char *path, int days);
// 串口轮询通讯定时器
int GM_SerialTimer();
//轮询所有串口和传感器是否需要生成下发命令
void Gm_FindAllSensorsCommand();
//检查所有传感器是否采集完毕,采集完毕的关闭传感器电源
void GM_IsCloseSensors();
//检查所有串口是否有数据接收,有则启动接收
void GM_AllSerialComRecv();
//判断是否需要关闭定时器
int GM_CloseTimer();
void testComm();
void Gm_InitSerialComm_Test();
// 串口接收数据处理
void SerialDataProcess(int devidx, uint8_t *buf, int len);
void CameraRecvData(SIO_PARAM_SERIAL_DEF *pPortParam, uint8_t *buf, int len);
// 串口摄像机数据处理
void CameraPhotoPortDataProcess(SIO_PARAM_SERIAL_DEF *curserial);
// 发送命令
void SendCmdFormPollCmdBuf( int port );
// 清除发送命令的所有标识
void ClearCmdAllFlag(int commid);
// 下发串口拍照指令控制
int FindNextCameraPhotoCommand(SIO_PARAM_SERIAL_DEF *pPortParam);
// 生成 CameraPhoto命令
void MakeCameraPhotoCommand(SIO_PARAM_SERIAL_DEF *pPortParam, uint8_t cmdidx, int OneParam, uint16_t TwoParam, uint8_t Threep, int phototime);
// 清除命令缓冲区
void ClearCmdFormPollCmdBuf(int port);
// 准备发送云台指令
int Gm_CtrlPtzCmd(SIO_PARAM_SERIAL_DEF *pPortParam, uint32_t ptzcmd);
// 发送转动摄像机云台命令定时器
int Gm_Camera_Timer();
// 生成 PELCO_P 命令 *
void Gm_SendPelco_pCommand( uint32_t cmdtype);
// 计算Pelco_p校验
uint8_t Gm_Pelco_pXORCheck( uint8_t *msg, int len );
// 生成 PELCO_D 命令 *
void Gm_SendPelco_DCommand(SIO_PARAM_SERIAL_DEF *pPortParam, uint32_t cmdtype);
// 计算Pelco_D校验
uint8_t Gm_Pelco_DCheck( uint8_t *msg, int len );
// 查询传感器电源状态
char Gm_GetSensorsPowerState(int port);
// 通过传感器使用的航空头查找传感器使用的串口序号
void FindDevUseSerialCommNo();
// 寻找并生成下一条倾角命令
int FindNextShxyProtocolCommand( int devidx );
// 倾角命令校验码计算
unsigned char CalLpc(unsigned char *msg, int len);
// 读上海欣影传感器协议数据
void ShxyProtocolRecvData(int commid, uint8_t *buf, int len);
// 检查检验和是否正确
int CheckShxyProtocolLpcError( uint8_t* msg, int len );
// 把16进制和10进制ASCII字符串转换成int整数
int ATOI(char *buf);
//生成倾角命令
void MakeShxyProtocolPollCommand(int portno, uint8_t cmdidx);
// 上海欣影传感器协议数据处理
void ShxyProtocolDataProcess( int commid);
// 控制关闭传感器电源
//void Gm_CtrlCloseSensorsPower(int devidx);
// 检查传感器电源是否应该关闭或打开
//void Gm_CheckSensorsPower(void);
int SaveImageDataTofile(int devno);
void Collect_sensor_data();
int CameraPhotoCmd(int phototime, unsigned char channel, int cmdidx, unsigned char bImageSize, unsigned char presetno, const char *serfile, unsigned int baud, int addr);
/* 数据和图片采集数据返回函数 开始*/
int GetWeatherData(Data_DEF *data, int datano);
int GetAirTempData(Data_DEF *airt);
int GetHumidityData(Data_DEF *airt);
int GetWindSpeedData(Data_DEF *airt);
int GetWindDirectionData(Data_DEF *airt);
int GetRainfallData(Data_DEF *airt);
int GetAtmosData(Data_DEF *airt);
int GetOpticalRadiationData(Data_DEF *airt);
int GetPullValue(int devno, Data_DEF *data);
int GetAngleValue(int devno, Data_DEF *data, int Xy);
int GetImage(int devno, IMAGE_DEF *photo);
/* 数据和图片采集数据返回函数 结束*/
// 生成一个随机整数
int GeneratingRandomNumber();
int Gm_SetSerialPortParam(int commid);
void ClearCameraCmdAllFlag(SIO_PARAM_SERIAL_DEF *pPortParam);
void ClearCameraCmdFormPollCmdBuf(SIO_PARAM_SERIAL_DEF *pPortParam);
int Gm_OpenCameraSerial(SIO_PARAM_SERIAL_DEF *pPortParam, const char *serfile, unsigned int baud);
int Gm_SetCameraSerialPortParam(int fd, unsigned int baud);
int GM_CameraComSend(unsigned char * cSendBuf, size_t nSendLen, int fd);
void SendCameraCmdFormPollCmdBuf(SIO_PARAM_SERIAL_DEF *pPortParam);
void Gm_FindCameraCommand(SIO_PARAM_SERIAL_DEF *pPortParam);
void GM_CameraSerialComRecv(SIO_PARAM_SERIAL_DEF *pPortParam);
int GM_IsCloseCamera(SIO_PARAM_SERIAL_DEF *pPortParam);
int GM_CameraSerialTimer(SIO_PARAM_SERIAL_DEF *pPortParam);
int QueryPtzState(PTZ_STATE *ptz_state, int cmdidx, const char *serfile, unsigned int baud, int addr);
void MakePtzStateQueryCommand(SIO_PARAM_SERIAL_DEF *pPortParam, uint8_t cmdidx);
int Query_BDGNSS_Data(BD_GNSS_DATA *BD_data, int samptime, const char *serfile, unsigned int baud);
int GM_BdSerialTimer(SIO_PARAM_SERIAL_DEF *pPortParam);
void GM_BdSerialComRecv(SIO_PARAM_SERIAL_DEF *pPortParam);
void BdRecvData(SIO_PARAM_SERIAL_DEF *pPortParam, u_char *buf, int len);
unsigned char BDXorCheck(unsigned char *msg, int len);
void BD_NMEA0183_PortDataProcess(SIO_PARAM_SERIAL_DEF *curserial);
char** BD_NMEA0183_SplitString(char *str, int *total_fields);
int BD_get_BDRMC_data(SIO_PARAM_SERIAL_DEF *curserial);
#endif // __SENSOR_PROTOCOL_H__

@ -51,16 +51,16 @@ static void set_parity (struct termios *opt, char parity)
{
switch (parity)
{
case 'N':/* 无校验 */
case'N':/* 无校验 */
case 'n':
opt->c_cflag &= ~PARENB;
break;
case 'E':/*偶校验*/
case'E':/*偶校验*/
case 'e':
opt->c_cflag |= PARENB;
opt->c_cflag &= ~PARODD;
break;
case 'O':/* 奇校验 */
case'O':/* 奇校验 */
case 'o':
opt->c_cflag |= PARENB;
opt->c_cflag |= ~PARODD;

@ -0,0 +1,79 @@
#include "TerminalDevice.h"
#include <dlfcn.h>
#include "Camera.h"
#include <AndroidHelper.h>
typedef jbyteArray (*TakePhotoFunc)(int, int, int, int);
extern bool GetJniEnv(JavaVM *vm, JNIEnv **env, bool& didAttachThread);
CTerminalDevice::CTerminalDevice(JavaVM* vm, jobject service)
{
m_vm = vm;
JNIEnv* env = NULL;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
}
m_javaService = env->NewGlobalRef(service);
if (attached)
{
vm->DetachCurrentThread();
}
}
CTerminalDevice::~CTerminalDevice()
{
JNIEnv* env = NULL;
bool attached = false;
bool res = GetJniEnv(m_vm, &env, attached);
if (!res)
{
ALOGE("Failed to get JNI Env");
}
env->DeleteGlobalRef(m_javaService);
if (attached)
{
m_vm->DetachCurrentThread();
}
m_javaService = NULL;
}
bool CTerminalDevice::TakePhoto(unsigned char channel, unsigned char preset, const string& path, bool photo)
{
jboolean res = JNI_FALSE;
CCamera camera;
camera.initCamera(NULL);
if (camera.isCameraReady())
{
camera.takePicture();
}
camera.closeCamera();
#if 0
JNIEnv* env = NULL;
bool attached = GetJniEnv(m_vm, &env);
jclass serviceClass = env->GetObjectClass(m_javaService);
jmethodID mid = env->GetMethodID(serviceClass, "takePhoto", "(SSLjava/lang/String;)Z");
jstring str = env->NewStringUTF(path.c_str());
res = env->CallBooleanMethod (m_javaService, mid, (jint)channel, (jint)preset, str);
env->ReleaseStringUTFChars(str, path.c_str());
env->DeleteLocalRef(serviceClass);
if (!res)
{
int aa = 1;
}
if (attached)
{
m_vm->DetachCurrentThread();
}
#endif
return res == JNI_TRUE;
}

@ -0,0 +1,21 @@
#ifndef __TERMINAL_DEVICE_H__
#define __TERMINAL_DEVICE_H__
#include <Client/Device.h>
#include <jni.h>
class CTerminalDevice : public IDevice
{
public:
CTerminalDevice(JavaVM* vm, jobject service);
~CTerminalDevice();
virtual bool TakePhoto(unsigned char channel, unsigned char preset, const string& path, bool photo);
private:
JavaVM* m_vm;
jobject m_javaService;
};
#endif // __TERMINAL_DEVICE_H__

@ -17,11 +17,6 @@
#ifndef __CAMERA2_HELPER_H__
#define __CAMERA2_HELPER_H__
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui.hpp>
#include "mat.h"
template <typename T>
class RangeValue {
@ -108,107 +103,4 @@ private:
};
inline void ConvertYUV21ToMat(const uint8_t* nv21, int nv21_width, int nv21_height, int orgWidth, int orgHeight,
int sensorOrientation, bool front, int rotation, cv::Mat& rgb)
{
int w = 0;
int h = 0;
int rotate_type = 0;
cv::Mat nv21_rotated;
const unsigned char* yuv420data = nv21;
if (rotation != 0)
{
int co = 0;
if (front)
{
co = (sensorOrientation + (rotation - 1) * 90) % 360;
co = (360 - co) % 360;
}
else
{
co = (sensorOrientation - (rotation - 1) * 90 + 360) % 360;
}
// XYLOG(XYLOG_SEVERITY_DEBUG, "Orientation=%d Facing=%d", co, camera_facing);
// int co = 0;
if (co == 0)
{
w = nv21_width;
h = nv21_height;
rotate_type = front ? 2 : 1;
}
else if (co == 90)
{
w = nv21_height;
h = nv21_width;
int tmp = orgWidth;
orgWidth = orgHeight;
orgHeight = tmp;
rotate_type = front ? 5 : 6;
}
else if (co == 180)
{
w = nv21_width;
h = nv21_height;
rotate_type = front ? 4 : 3;
}
else if (co == 270)
{
w = nv21_height;
h = nv21_width;
int tmp = orgWidth;
orgWidth = orgHeight;
orgHeight = tmp;
rotate_type = front ? 7 : 8;
}
nv21_rotated.create(h + h / 2, w, CV_8UC1);
ncnn::kanna_rotate_yuv420sp(nv21, nv21_width, nv21_height, nv21_rotated.data, w, h, rotate_type);
yuv420data = nv21_rotated.data;
}
else
{
w = nv21_width;
h = nv21_height;
}
// nv21_rotated to rgb
if (w == orgWidth && h == orgHeight)
{
rgb.create(h, w, CV_8UC3);
// ncnn::yuv420sp2rgb(nv21_rotated.data, w, h, rgb.data);
ncnn::yuv420sp2rgb_nv12(yuv420data, w, h, rgb.data);
}
else
{
cv::Mat org(h, w, CV_8UC3);
ncnn::yuv420sp2rgb_nv12(yuv420data, w, h, org.data);
if (w * orgHeight == h * orgWidth) // Same Ratio
{
cv::resize(org, rgb, cv::Size(orgWidth, orgHeight));
}
else
{
// Crop image
if (w > orgWidth && h >= orgHeight)
{
int left = (w - orgWidth) / 2;
int top = (h - orgHeight) / 2;
rgb = org(cv::Range(top, top + orgHeight), cv::Range(left, left + orgWidth));
}
else
{
rgb = org;
}
}
}
}
#endif /* __CAMERA2_HELPER_H__ */

@ -9,7 +9,6 @@
using namespace std;
using namespace cv;
// https://zhuanlan.zhihu.com/p/38176640
void Debevec(vector<Mat>exposureImages, vector<float>exposureTimes, Mat& output);
void Robertson(vector<Mat>exposureImages, vector<float>exposureTimes, Mat& output);

@ -1,4 +1,3 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*

File diff suppressed because it is too large Load Diff

@ -1,724 +0,0 @@
/* Copyright Statement:
*
* This software/firmware and related documentation ("MediaTek Software") are
* protected under relevant copyright laws. The information contained herein is
* confidential and proprietary to MediaTek Inc. and/or its licensors. Without
* the prior written permission of MediaTek inc. and/or its licensors, any
* reproduction, modification, use or disclosure of MediaTek Software, and
* information contained herein, in whole or in part, shall be strictly
* prohibited.
*
* MediaTek Inc. (C) 2010. All rights reserved.
*
* BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER
* ON AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL
* WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
* NONINFRINGEMENT. NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH
* RESPECT TO THE SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY,
* INCORPORATED IN, OR SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES
* TO LOOK ONLY TO SUCH THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO.
* RECEIVER EXPRESSLY ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO
* OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK
* SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE
* RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
* STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S
* ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE
* RELEASED HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE
* MEDIATEK SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE
* CHARGE PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
*
* The following software/firmware and/or related documentation ("MediaTek
* Software") have been modified by MediaTek Inc. All revisions are subject to
* any receiver's applicable license agreements with MediaTek Inc.
*/
#ifndef _MTK_HARDWARE_MTKCAM_INCLUDE_MTKCAM_UTILS_METADATA_HAL_MTKPLATFORMMETADATATAG_H_
#define _MTK_HARDWARE_MTKCAM_INCLUDE_MTKCAM_UTILS_METADATA_HAL_MTKPLATFORMMETADATATAG_H_
/******************************************************************************
*
******************************************************************************/
typedef enum mtk_platform_metadata_section {
MTK_HAL_REQUEST = 0xC000, // MTK HAL internal metadata become from 0xC000 0000
MTK_P1NODE,
MTK_P2NODE,
MTK_3A_TUNINING,
MTK_3A_EXIF,
MTK_MF_EXIF,
MTK_EIS,
MTK_STEREO,
MTK_FRAMESYNC,
MTK_VHDR,
MTK_PIPELINE,
MTK_NR,
MTK_PLUGIN,
MTK_DUALZOOM,
MTK_FEATUREPIPE,
MTK_POSTPROC,
MTK_FEATURE,
MTK_FSC,
} mtk_platform_metadata_section_t;
/******************************************************************************
*
******************************************************************************/
typedef enum mtk_platform_metadata_section_start {
MTK_HAL_REQUEST_START = MTK_HAL_REQUEST << 16,
MTK_P1NODE_START = MTK_P1NODE << 16,
MTK_P2NODE_START = MTK_P2NODE << 16,
MTK_3A_TUNINING_START = MTK_3A_TUNINING << 16,
MTK_3A_EXIF_START = MTK_3A_EXIF << 16,
MTK_EIS_START = MTK_EIS << 16,
MTK_STEREO_START = MTK_STEREO << 16,
MTK_FRAMESYNC_START = MTK_FRAMESYNC << 16,
MTK_VHDR_START = MTK_VHDR << 16,
MTK_PIPELINE_START = MTK_PIPELINE << 16,
MTK_NR_START = MTK_NR << 16,
MTK_PLUGIN_START = MTK_PLUGIN << 16,
MTK_DUALZOOM_START = MTK_DUALZOOM << 16,
MTK_FEATUREPIPE_START = MTK_FEATUREPIPE << 16,
MTK_POSTPROC_START = MTK_POSTPROC << 16,
MTK_FEATURE_START = MTK_FEATURE << 16,
MTK_FSC_START = MTK_FSC << 16,
} mtk_platform_metadata_section_start_t;
/******************************************************************************
*
******************************************************************************/
typedef enum mtk_platform_metadata_tag {
MTK_HAL_REQUEST_REQUIRE_EXIF = MTK_HAL_REQUEST_START, //MUINT8
MTK_HAL_REQUEST_DUMP_EXIF, //MUINT8
MTK_HAL_REQUEST_REPEAT, //MUINT8
MTK_HAL_REQUEST_DUMMY, //MUINT8
MTK_HAL_REQUEST_SENSOR_SIZE, //MSize
MTK_HAL_REQUEST_SENSOR_ID, //MINT32
MTK_HAL_REQUEST_DEVICE_ID, //MINT32
MTK_HAL_REQUEST_HIGH_QUALITY_CAP, //MUINT8
MTK_HAL_REQUEST_ISO_SPEED, //MINT32
MTK_HAL_REQUEST_BRIGHTNESS_MODE, //MINT32
MTK_HAL_REQUEST_CONTRAST_MODE, //MINT32
MTK_HAL_REQUEST_HUE_MODE, //MINT32
MTK_HAL_REQUEST_SATURATION_MODE, //MINT32
MTK_HAL_REQUEST_EDGE_MODE, //MINT32
MTK_HAL_REQUEST_PASS1_DISABLE, //MINT32
MTK_HAL_REQUEST_ERROR_FRAME, // used for error handling //MUINT8
MTK_HAL_REQUEST_PRECAPTURE_START, // 4cell //MUINT8
MTK_HAL_REQUEST_AF_TRIGGER_START, // 4cell //MUINT8
MTK_HAL_REQUEST_IMG_IMGO_FORMAT, //MINT32
MTK_HAL_REQUEST_IMG_RRZO_FORMAT, //MINT32
MTK_HAL_REQUEST_INDEX, //MINT32
MTK_HAL_REQUEST_COUNT, //MINT32
MTK_HAL_REQUEST_SMVR_FPS, //MUINT8 // 0: NOT batch request
MTK_HAL_REQUEST_REMOSAIC_ENABLE, //MUINT8 // 0: preview mode 1: capture mode
MTK_HAL_REQUEST_INDEX_BSS, //MINT32
MTK_HAL_REQUEST_ZSD_CAPTURE_INTENT, //MUINT8
MTK_HAL_REQUEST_REAL_CAPTURE_SIZE, //MSize
MTK_HAL_REQUEST_VIDEO_SIZE, //MSize
MTK_HAL_REQUEST_RAW_IMAGE_INFO, //MINT32 // index[0]: raw fmt, index[1]: raw stride, index[2]: raw size(width), index[3]: raw size(height)
MTK_HAL_REQUEST_ISP_PIPELINE_MODE, //MINT32
MTK_P1NODE_SCALAR_CROP_REGION = MTK_P1NODE_START, //MRect
MTK_P1NODE_BIN_CROP_REGION, //MRect
MTK_P1NODE_DMA_CROP_REGION, //MRect
MTK_P1NODE_BIN_SIZE, //MSize
MTK_P1NODE_RESIZER_SIZE, //MSize
MTK_P1NODE_RESIZER_SET_SIZE, //MSize
MTK_P1NODE_CTRL_RESIZE_FLUSH, //MBOOL
MTK_P1NODE_CTRL_READOUT_FLUSH, //MBOOL
MTK_P1NODE_CTRL_RECONFIG_SENSOR_SETTING, //MBOOL
MTK_P1NODE_PROCESSOR_MAGICNUM, //MINT32
MTK_P1NODE_MIN_FRM_DURATION, //MINT64
MTK_P1NODE_RAW_TYPE, //MINT32
MTK_P1NODE_SENSOR_CROP_REGION, //MRect
MTK_P1NODE_YUV_RESIZER1_CROP_REGION, //MRect
MTK_P1NODE_YUV_RESIZER2_CROP_REGION, //MRect
MTK_P1NODE_YUV_RESIZER1_SIZE, //MSize
MTK_P1NODE_SENSOR_MODE, //MINT32
MTK_P1NODE_SENSOR_VHDR_MODE, //MINT32
MTK_P1NODE_METADATA_TAG_INDEX, //MINT32
MTK_P1NODE_RSS_SIZE, //MSize
MTK_P1NODE_SENSOR_STATUS, //MINT32
MTK_P1NODE_SENSOR_RAW_ORDER, //MINT32
MTK_P1NODE_TWIN_SWITCH, //MINT32
MTK_P1NODE_TWIN_STATUS, //MINT32
MTK_P1NODE_RESIZE_QUALITY_SWITCH, //MINT32
MTK_P1NODE_RESIZE_QUALITY_STATUS, //MINT32
MTK_P1NODE_RESIZE_QUALITY_LEVEL, //MINT32
MTK_P1NODE_RESIZE_QUALITY_SWITCHING, //MBOOL
MTK_P1NODE_RESUME_SHUTTER_TIME_US, //MINT32
MTK_P1NODE_FRAME_START_TIMESTAMP, //MINT64
MTK_P1NODE_FRAME_START_TIMESTAMP_BOOT, //MINT64
MTK_P1NODE_REQUEST_PROCESSED_WITHOUT_WB, //MBOOL
MTK_P1NODE_ISNEED_GMV, //MBOOL
MTK_P2NODE_HIGH_SPEED_VDO_FPS = MTK_P2NODE_START, //MINT32
MTK_P2NODE_HIGH_SPEED_VDO_SIZE, //MSize
MTK_P2NODE_CTRL_CALTM_ENABLE, //MBOOL
MTK_P2NODE_FD_CROP_REGION, //MRect
MTK_P2NODE_CROP_REGION, //MRect // for removing black edge
MTK_P2NODE_DSDN_ENABLE, //MBOOL // for DSDN on/off controled by Policy
MTK_P2NODE_SENSOR_CROP_REGION, //MRect
MTK_3A_AE_HIGH_ISO_BINNING, //MBOOL // for 3HDR high iso binning mode
MTK_SENSOR_SCALER_CROP_REGION, //MRect
MTK_PROCESSOR_CAMINFO = MTK_3A_TUNINING_START, //IMemory
MTK_ISP_ATMS_MAPPING_INFO, //IMemory
MTK_3A_ISP_PROFILE, //MUINT8
MTK_3A_ISP_P1_PROFILE, //MUINT8
MTK_CAMINFO_LCSOUT_INFO, //IMemory
MTK_3A_ISP_BYPASS_LCE, //MBOOL
MTK_3A_ISP_DISABLE_NR, //MBOOL
MTK_3A_ISP_NR3D_SW_PARAMS, //MINT32[14] //GMVX, GMVY, confX, confY, MAX_GMV, frameReset, GMV_Status,ISO_cutoff
MTK_3A_ISP_NR3D_HW_PARAMS, //IMemory
MTK_3A_ISP_LCE_GAIN, //MINT32, bits[0:15]: LCE gain, bits[16:31]: LCE gain confidence ratio (0-100)
MTK_3A_ISP_FUS_NUM, //MINT32
MTK_3A_AE_CAP_PARAM, //IMemory
MTK_3A_AE_CAP_SINGLE_FRAME_HDR, //MUINT8
MTK_3A_AE_BV_TRIGGER, //MBOOL
MTK_3A_AF_LENS_POSITION, //MINT32
MTK_3A_FLICKER_RESULT, //MINT32
MTK_3A_DUMMY_BEFORE_REQUEST_FRAME, //MBOOL // Dummy frame before capture, only for capture intent, preview don't use
MTK_3A_DUMMY_AFTER_REQUEST_FRAME, //MBOOL // Dummy frame after capture, only for capture intent, preview don't use
MTK_3A_MANUAL_AWB_COLORTEMPERATURE_MAX, //MINT32
MTK_3A_MANUAL_AWB_COLORTEMPERATURE_MIN, //MINT32
MTK_3A_MANUAL_AWB_COLORTEMPERATURE, //MINT32
MTK_3A_HDR_MODE, //MUINT8
MTK_3A_AE_HDR_MIXED_ISO, //MUINT32
MTK_3A_AE_ZSL_STABLE, //MINT32 ( MBOOL )
MTK_3A_PGN_ENABLE, //MUINT8
MTK_3A_SKIP_HIGH_QUALITY_CAPTURE, //MUINT8
MTK_3A_AI_SHUTTER, //MBOOL
MTK_3A_FEATURE_AE_EXPOSURE_LEVEL, //MINT32
MTK_3A_FEATURE_AE_TARGET_MODE, //MINT32
MTK_3A_OPEN_ID, //MINT32
MTK_LSC_TBL_DATA, //IMemory
MTK_LSC_TSF_DATA, //IMemory
MTK_LSC_TSF_DUMP_NO, //IMemory
MTK_ISP_P2_ORIGINAL_SIZE, //MSize
MTK_ISP_P2_CROP_REGION, //MRect
MTK_ISP_P2_RESIZER_SIZE, //MSize
MTK_ISP_P2_IN_IMG_FMT, //MINT32, 0 or not exist: RAW->YUV, 1: YUV->YUV
MTK_ISP_P2_TUNING_UPDATE_MODE, //MUINT8, [0 or not exist]: as default; [1]: keep existed parameters but some parts will be updated; [2]: keep all existed parameters (force mode) [3] LPCNR Pass1 [4] LPCNR Pass2
MTK_ISP_P2_IN_IMG_RES_REVISED, //MINT32, describes P2 input image revised resolution. bit[0:15] width in pixel, bit[16:31] height in pixel. May be not exist.
MTK_ISP_APP_TARGET_SIZE, //MINT32, describes APP Target resolution. bit[0:15] width in pixel, bit[16:31] height in pixel. May be not exist.
MTK_MSF_SCALE_INDEX, //MINT32, which scale stage index, would only exist with scaling flow
MTK_MSF_FRAME_NUM, //MINT32, After BSS which frame number is this stage using
MTK_TOTAL_MULTI_FRAME_NUM, //MINT32, MSYUV fuction used this input to know frame nunber
MTK_TOTAL_MULTI_FRAME_NUM_CAPTURED, //MINT32, MSF function used
MTK_SW_DSDN_VERSION, //MINT32, distinguish different dsdn version
MTK_ISP_COLOR_SPACE, //MINT32
MTK_ISP_DRC_CURVE, //IMemory
MTK_ISP_DRC_CURVE_SIZE, //MINT32
MTK_ISP_FEO_DATA, //IMemory
MTK_ISP_FEO_ENABLE, //MINT32
MTK_ISP_FEO_INFO, //IMemory
MTK_ISP_HLR_RATIO, //MINT32, which is a HDR ratio applied in HLR
MTK_ISP_STAGE, //MINT32
MTK_FOCUS_AREA_POSITION, //MINT32
MTK_FOCUS_AREA_SIZE, //MSize
MTK_FOCUS_AREA_RESULT, //MUINT8
MTK_FOCUS_PAUSE, //MUINT8
MTK_FOCUS_MZ_ON, //MUINT8
MTK_3A_AF_FOCUS_VALUE, //MINT64
MTK_3A_PRV_CROP_REGION, //MRect
MTK_3A_ISP_MDP_TARGET_SIZE, //MSize
MTK_3A_REPEAT_RESULT, //MUINT8
MTK_3A_SKIP_PRECAPTURE, //MBOOL //if CUST_ENABLE_FLASH_DURING_TOUCH is true, MW can skip precapture
MTK_3A_SKIP_BAD_FRAME, //MBOOL
MTK_3A_FLARE_IN_MANUAL_CTRL_ENABLE, //MBOOL
MTK_3A_DYNAMIC_SUBSAMPLE_COUNT, //MINT32 30fps = 1, 60fps = 2, ... , 120fps = 4
MTK_3A_AE_LV_VALUE, //MINT32
MTK_APP_CONTROL, //MINT32
MTK_3A_CUST_PARAMS, //IMemory
MTK_3A_SETTING_CUST_PARAMS, //IMemory
MTK_3A_PERFRAME_INFO, //IMemory
MTK_SENSOR_MODE_INFO_ACTIVE_ARRAY_CROP_REGION, //MRect
MTK_3A_AE_BV, //MINT32
MTK_3A_AE_CWV, //MINT32
MTK_ISP_P2_PROCESSED_RAW, //MINT32
MTK_3A_EXIF_METADATA = MTK_3A_EXIF_START, //IMetadata
MTK_EIS_REGION = MTK_EIS_START, //MINT32
MTK_EIS_INFO, //MINT64
MTK_EIS_VIDEO_SIZE, //MRect
MTK_EIS_NEED_OVERRIDE_TIMESTAMP, //MBOOL
MTK_EIS_LMV_DATA, //IMemory
MTK_STEREO_JPS_MAIN1_CROP = MTK_STEREO_START, //MRect
MTK_STEREO_JPS_MAIN2_CROP, //MRect
MTK_STEREO_SYNC2A_MODE, //MINT32
MTK_STEREO_SYNCAF_MODE, //MINT32
MTK_STEREO_HW_FRM_SYNC_MODE, //MINT32
MTK_STEREO_NOTIFY, //MINT32
MTK_STEREO_SYNC2A_MASTER_SLAVE, //MINT32[2]
MTK_STEREO_SYNC2A_STATUS, //IMemory
MTK_JPG_ENCODE_TYPE, //MINT8
MTK_CONVERGENCE_DEPTH_OFFSET, //MFLOAT
MTK_N3D_WARPING_MATRIX_SIZE, //MUINT32
MTK_P1NODE_MAIN2_HAL_META, //IMetadata
MTK_P2NODE_BOKEH_ISP_PROFILE, //MUINT8
MTK_STEREO_FEATURE_DENOISE_MODE, //MINT32
MTK_STEREO_FEATURE_SENSOR_PROFILE, //MINT32
MTK_P1NODE_MAIN2_APP_META, //IMetadata
MTK_STEREO_FEATURE_OPEN_ID, //MINT32
MTK_STEREO_FRAME_PER_CAPTURE, //MINT32
MTK_STEREO_ENABLE_MFB, //MINT32
MTK_STEREO_BSS_RESULT, //MINT32
MTK_STEREO_FEATURE_FOV_CROP_REGION, //MINT32[6] // p.x, p.y, p.w, p.h, srcW, srcH
MTK_STEREO_DCMF_FEATURE_MODE, //MINT32 // mtk_platform_metadata_enum_dcmf_feature_mode
MTK_STEREO_HDR_EV, //MINT32
MTK_STEREO_DELAY_FRAME_COUNT, //MINT32
MTK_STEREO_DCMF_DEPTHMAP_SIZE, //MSize
MTK_STEREO_WITH_CAMSV, //MBOOL
MTK_FRAMESYNC_ID = MTK_FRAMESYNC_START, //MINT32
MTK_FRAMESYNC_TOLERANCE, //MINT64
MTK_FRAMESYNC_FAILHANDLE, //MINT32
MTK_FRAMESYNC_RESULT, //MINT64
MTK_FRAMESYNC_TYPE, //MINT32
MTK_FRAMESYNC_MODE, //MUINT8
MTK_VHDR_LCEI_DATA = MTK_VHDR_START, //Memory
MTK_VHDR_IMGO_3A_ISP_PROFILE, //MUINT8
MTK_HDR_FEATURE_HDR_HAL_MODE,
MTK_3A_FEATURE_AE_VALID_EXPOSURE_NUM,
MTK_VHDR_MULTIFRAME_TIMESTAMP, //MINT64
MTK_VHDR_MULTIFRAME_EXPOSURE_TIME, //MINT64
MTK_PIPELINE_UNIQUE_KEY = MTK_PIPELINE_START, //MINT32
MTK_PIPELINE_FRAME_NUMBER, //MINT32
MTK_PIPELINE_REQUEST_NUMBER, //MINT32
MTK_PIPELINE_EV_VALUE, //MINT32
MTK_PIPELINE_DUMP_UNIQUE_KEY, //MINT32
MTK_PIPELINE_DUMP_FRAME_NUMBER, //MINT32
MTK_PIPELINE_DUMP_REQUEST_NUMBER, //MINT32
MTK_PIPELINE_VIDEO_RECORD, //MINT32
MTK_NR_MODE = MTK_NR_START, //MINT32
MTK_NR_MNR_THRESHOLD_ISO, //MINT32
MTK_NR_SWNR_THRESHOLD_ISO, //MINT32
MTK_REAL_LV, //MINT32
MTK_ANALOG_GAIN, //MUINT32
MTK_AWB_RGAIN, //MINT32
MTK_AWB_GGAIN, //MINT32
MTK_AWB_BGAIN, //MINT32
MTK_PLUGIN_MODE = MTK_PLUGIN_START, //MINT64
MTK_PLUGIN_COMBINATION_KEY, //MINT64
MTK_PLUGIN_P2_COMBINATION, //MINT64
MTK_PLUGIN_PROCESSED_FRAME_COUNT, //MINT32
MTK_PLUGIN_CUSTOM_HINT, //MINT32
MTK_PLUGIN_DETACT_JOB_SYNC_TOKEN, //MINT64, may be not exists.
MTK_PLUGIN_UNIQUEKEY,
MTK_DUALZOOM_DROP_REQ = MTK_DUALZOOM_START, //MINT32
MTK_DUALZOOM_FORCE_ENABLE_P2, //MINT32
MTK_DUALZOOM_DO_FRAME_SYNC, //MINT32
MTK_DUALZOOM_ZOOM_FACTOR, //MINT32
MTK_DUALZOOM_DO_FOV, //MINT32
MTK_DUALZOOM_FOV_RECT_INFO, //MINT32
MTK_DUALZOOM_FOV_CALB_INFO, //MINT32
MTK_DUALZOOM_FOV_MARGIN_PIXEL, //MSize
MTK_DUALCAM_AF_STATE, //MUINT8
MTK_DUALCAM_LENS_STATE, //MUINT8
MTK_DUALCAM_TIMESTAMP, //MINT64
MTK_DUALZOOM_3DNR_MODE, //MINT32
MTK_DUALZOOM_ZOOMRATIO, //MINT32
MTK_DUALZOOM_CENTER_SHIFT, //MINT32
MTK_DUALZOOM_FOV_RATIO, //MFLOAT
MTK_DUALZOOM_REAL_MASTER, //MINT32
MTK_DUALZOOM_FD_TARGET_MASTER, //MINT32
MTK_DUALZOOM_FD_REAL_MASTER, //MINT32 // maybe not set
MTK_LMV_SEND_SWITCH_OUT, //MINT32
MTK_LMV_SWITCH_OUT_RESULT, //MINT32
MTK_LMV_VALIDITY, //MINT32
MTK_VSDOF_P1_MAIN1_ISO, //MINT32
MTK_DUALZOOM_IS_STANDBY, //MBOOL
MTK_DUALZOOM_CAP_CROP, //MRect
MTK_DUALZOOM_MASTER_UPDATE_MODE, //MBOOL
MTK_DUALZOOM_STREAMING_NR, //MINT32
MTK_FEATUREPIPE_APP_MODE = MTK_FEATUREPIPE_START, //MINT32
MTK_POSTPROC_TYPE = MTK_POSTPROC_START, //MINT32
MTK_FEATURE_STREAMING = MTK_FEATURE_START, //MINT64
MTK_FEATURE_CAPTURE, //MINT64
MTK_FEATURE_CAPTURE_PHYSICAL, //MINT64
MTK_FEATURE_FREE_MEMORY_MBYTE, //MINT32
MTK_FEATURE_MFNR_NVRAM_QUERY_INDEX, //MINT32
MTK_FEATURE_MFNR_NVRAM_DECISION_ISO, //MINT32
MTK_FEATURE_MFNR_TUNING_INDEX_HINT, //MINT64
MTK_FEATURE_MFNR_FINAL_EXP, //MINT32
MTK_FEATURE_MFNR_OPEN_ID, //MINT32
MTK_FEATURE_AINR_MDLA_MODE, //MINT32
MTK_ISP_AINR_MDLA_MODE, //MINT32
MTK_ISP_LTM_BIT_MODE, //MINT32
MTK_FEATURE_BSS_SELECTED_FRAME_COUNT, //MINT32
MTK_FEATURE_BSS_FORCE_DROP_NUM, //MINT32
MTK_FEATURE_BSS_FIXED_LSC_TBL_DATA, //MUINT8
MTK_FEATURE_BSS_PROCESS, //MINT32
MTK_FEATURE_BSS_ISGOLDEN, //MBOOL
MTK_FEATURE_BSS_REORDER, //MBOOL
MTK_FEATURE_BSS_MANUAL_ORDER, //MUINT8
MTK_FEATURE_BSS_RRZO_DATA, //MUINT8
MTK_FEATURE_BSS_DOWNSAMPLE, //MBOOL
MTK_FEATURE_PACK_RRZO, //MUINT8
MTK_FEATURE_FACE_RECTANGLES, //MRect array
MTK_FEATURE_FACE_POSE_ORIENTATIONS, //MINT32[n*3] array, each struct include: xAsix, yAsix, zAsix
MTK_FEATURE_CAP_YUV_PROCESSING, //MUINT8
MTK_FEATURE_CAP_PIPE_DCE_CONTROL, //MUINT8
MTK_FEATURE_MULTIFRAMENODE_BYPASSED, //MUINT8
MTK_FEATURE_FACE_APPLIED_GAMMA, //MINT32
MTK_FEATURE_CAP_PQ_USERID, //MINT64
MTK_FEATURE_FLIP_IN_P2A, //MINT32
MTK_FSC_CROP_DATA = MTK_FSC_START, //IMemory
MTK_FSC_WARP_DATA, //IMemory
MTK_STAGGER_ME_META, //IMetadata
MTK_STAGGER_SE_META, //IMetadata
MTK_STAGGER_BLOB_IMGO_ORDER //MUINT8
} mtk_platform_metadata_tag_t;
/******************************************************************************
*
******************************************************************************/
typedef enum mtk_platform_3a_exif_metadata_tag {
MTK_3A_EXIF_FNUMBER, //MINT32
MTK_3A_EXIF_FOCAL_LENGTH, //MINT32
MTK_3A_EXIF_FOCAL_LENGTH_35MM, //MINT32
MTK_3A_EXIF_SCENE_MODE, //MINT32
MTK_3A_EXIF_AWB_MODE, //MINT32
MTK_3A_EXIF_LIGHT_SOURCE, //MINT32
MTK_3A_EXIF_EXP_PROGRAM, //MINT32
MTK_3A_EXIF_SCENE_CAP_TYPE, //MINT32
MTK_3A_EXIF_FLASH_LIGHT_TIME_US, //MINT32
MTK_3A_EXIF_AE_METER_MODE, //MINT32
MTK_3A_EXIF_AE_EXP_BIAS, //MINT32
MTK_3A_EXIF_CAP_EXPOSURE_TIME, //MINT32
MTK_3A_EXIF_AE_ISO_SPEED, //MINT32
MTK_3A_EXIF_REAL_ISO_VALUE, //MINT32
MTK_3A_EXIF_AE_BRIGHTNESS_VALUE, //MINT32
MTK_3A_EXIF_FLASH_FIRING_STATUS, //MINT32
MTK_3A_EXIF_FLASH_RETURN_DETECTION, //MINT32
MTK_3A_EXIF_FLASH_MODE, //MINT32
MTK_3A_EXIF_FLASH_FUNCTION, //MINT32
MTK_3A_EXIF_FLASH_REDEYE, //MINT32
MTK_3A_EXIF_DEBUGINFO_BEGIN, // debug info begin
// key: MINT32
MTK_3A_EXIF_DBGINFO_AAA_KEY = MTK_3A_EXIF_DEBUGINFO_BEGIN, //MINT32
MTK_3A_EXIF_DBGINFO_AAA_DATA,
MTK_3A_EXIF_DBGINFO_SDINFO_KEY,
MTK_3A_EXIF_DBGINFO_SDINFO_DATA,
MTK_3A_EXIF_DBGINFO_ISP_KEY,
MTK_3A_EXIF_DBGINFO_ISP_DATA,
//
MTK_CMN_EXIF_DBGINFO_KEY,
MTK_CMN_EXIF_DBGINFO_DATA,
//
MTK_MF_EXIF_DBGINFO_MF_KEY,
MTK_MF_EXIF_DBGINFO_MF_DATA,
//
MTK_N3D_EXIF_DBGINFO_KEY,
MTK_N3D_EXIF_DBGINFO_DATA,
//
MTK_POSTNR_EXIF_DBGINFO_NR_KEY,
MTK_POSTNR_EXIF_DBGINFO_NR_DATA,
//
MTK_RESVB_EXIF_DBGINFO_KEY,
MTK_RESVB_EXIF_DBGINFO_DATA,
//
MTK_RESVC_EXIF_DBGINFO_KEY,
MTK_RESVC_EXIF_DBGINFO_DATA,
// data: Memory
MTK_3A_EXIF_DEBUGINFO_END, // debug info end
} mtk_platform_3a_exif_metadata_tag_t;
// MTK_3A_FEATURE_AE_EXPOSURE_LEVEL
typedef enum mtk_camera_metadata_enum_ae_exposure_level {
MTK_3A_FEATURE_AE_EXPOSURE_LEVEL_NONE = 0,
MTK_3A_FEATURE_AE_EXPOSURE_LEVEL_SHORT,
MTK_3A_FEATURE_AE_EXPOSURE_LEVEL_NORMAL,
MTK_3A_FEATURE_AE_EXPOSURE_LEVEL_LONG,
} mtk_camera_metadata_enum_ae_exposure_level_t;
// MTK_3A_FEATURE_AE_TARGET_MODE
typedef enum mtk_camera_metadata_enum_ae_target_mode {
MTK_3A_FEATURE_AE_TARGET_MODE_NORMAL = 0,
MTK_3A_FEATURE_AE_TARGET_MODE_IVHDR,
MTK_3A_FEATURE_AE_TARGET_MODE_MVHDR,
MTK_3A_FEATURE_AE_TARGET_MODE_ZVHDR,
MTK_3A_FEATURE_AE_TARGET_MODE_LE_FIX,
MTK_3A_FEATURE_AE_TARGET_MODE_SE_FIX,
MTK_3A_FEATURE_AE_TARGET_MODE_4CELL_MVHDR,
MTK_3A_FEATURE_AE_TARGET_MODE_MSTREAM_VHDR,
MTK_3A_FEATURE_AE_TARGET_MODE_MSTREAM_VHDR_RTO1X,
MTK_3A_FEATURE_AE_TARGET_MODE_STAGGER_2EXP,
MTK_3A_FEATURE_AE_TARGET_MODE_STAGGER_3EXP,
} mtk_camera_metadata_enum_ae_target_mode_t;
//MTK_3A_FEATURE_AE_VALID_EXPOSURE_NUM
typedef enum mtk_camera_metadata_enum_stagger_valid_exposure_num {
MTK_STAGGER_VALID_EXPOSURE_NON = 0,
MTK_STAGGER_VALID_EXPOSURE_1 = 1,
MTK_STAGGER_VALID_EXPOSURE_2 = 2,
MTK_STAGGER_VALID_EXPOSURE_3 = 3
} mtk_camera_metadata_enum_stagger_valid_exposure_num_t;
//MTK_3A_ISP_FUS_NUM
typedef enum mtk_camera_metadata_enum_3a_isp_fus_num {
MTK_3A_ISP_FUS_NUM_NON = 0,
MTK_3A_ISP_FUS_NUM_1 = 1,
MTK_3A_ISP_FUS_NUM_2 = 2,
MTK_3A_ISP_FUS_NUM_3 = 3,
} mtk_camera_metadata_enum_3a_isp_fus_num_t;
/******************************************************************************
*
******************************************************************************/
typedef enum mtk_platform_metadata_enum_nr_mode {
MTK_NR_MODE_OFF = 0,
MTK_NR_MODE_MNR,
MTK_NR_MODE_SWNR,
MTK_NR_MODE_AUTO
} mtk_platform_metadata_enum_nr_mode_t;
typedef enum mtk_platform_metadata_enum_mfb_mode {
MTK_MFB_MODE_OFF = 0,
MTK_MFB_MODE_MFLL,
MTK_MFB_MODE_AIS,
MTK_MFB_MODE_NUM,
} mtk_platform_metadata_enum_mfb_mode_t;
typedef enum mtk_platform_metadata_enum_custom_hint {
MTK_CUSTOM_HINT_0 = 0,
MTK_CUSTOM_HINT_1,
MTK_CUSTOM_HINT_2,
MTK_CUSTOM_HINT_3,
MTK_CUSTOM_HINT_4,
MTK_CUSTOM_HINT_NUM,
} mtk_platform_metadata_enum_custom_hint_t;
typedef enum mtk_platform_metadata_enum_plugin_mode {
MTK_PLUGIN_MODE_COMBINATION = 1 << 0,
MTK_PLUGIN_MODE_NR = 1 << 1,
MTK_PLUGIN_MODE_HDR = 1 << 2,
MTK_PLUGIN_MODE_MFNR = 1 << 3,
MTK_PLUGIN_MODE_COPY = 1 << 4,
MTK_PLUGIN_MODE_TEST_PRV = 1 << 5,
MTK_PLUGIN_MODE_BMDN = 1 << 6,
MTK_PLUGIN_MODE_MFHR = 1 << 7,
MTK_PLUGIN_MODE_BMDN_3rdParty = 1 << 8,
MTK_PLUGIN_MODE_MFHR_3rdParty = 1 << 9,
MTK_PLUGIN_MODE_FUSION_3rdParty = 1 << 10,
MTK_PLUGIN_MODE_VSDOF_3rdParty = 1 << 11,
MTK_PLUGIN_MODE_COLLECT = 1 << 12,
MTK_PLUGIN_MODE_HDR_3RD_PARTY = 1 << 13,
MTK_PLUGIN_MODE_MFNR_3RD_PARTY = 1 << 14,
MTK_PLUGIN_MODE_BOKEH_3RD_PARTY = 1 << 15,
MTK_PLUGIN_MODE_DCMF_3RD_PARTY = 1 << 16,
} mtk_platform_metadata_enum_plugin_mode_t;
typedef enum mtk_platform_metadata_enum_p2_plugin_combination {
MTK_P2_RAW_PROCESSOR = 1 << 0,
MTK_P2_ISP_PROCESSOR = 1 << 1,
MTK_P2_YUV_PROCESSOR = 1 << 2,
MTK_P2_MDP_PROCESSOR = 1 << 3,
MTK_P2_CAPTURE_REQUEST = 1 << 4,
MTK_P2_PREVIEW_REQUEST = 1 << 5
} mtk_platform_metadata_enum_p2_plugin_combination;
typedef enum mtk_platform_metadata_enum_isp_color_space {
MTK_ISP_COLOR_SPACE_SRGB = 0 ,
MTK_ISP_COLOR_SPACE_DISPLAY_P3 = 1 ,
MTK_ISP_COLOR_SPACE_CUSTOM_1 = 2
} mtk_platform_metadata_enum_isp_color_space;
typedef enum mtk_platform_metadata_enum_dualzoom_drop_req {
MTK_DUALZOOM_DROP_NEVER_DROP = 0,
MTK_DUALZOOM_DROP_NONE = 1,
MTK_DUALZOOM_DROP_DIRECTLY = 2,
MTK_DUALZOOM_DROP_NEED_P1,
MTK_DUALZOOM_DROP_NEED_SYNCMGR,
MTK_DUALZOOM_DROP_NEED_SYNCMGR_NEED_STREAM_F_PIPE,
} mtk_platform_metadata_enum_dualzoom_drop_req_t;
typedef enum mtk_platform_metadata_enum_p1_sensor_status {
MTK_P1_SENSOR_STATUS_NONE = 0,
MTK_P1_SENSOR_STATUS_STREAMING = 1,
MTK_P1_SENSOR_STATUS_SW_STANDBY = 2,
MTK_P1_SENSOR_STATUS_HW_STANDBY = 3,
} mtk_platform_metadata_enum_p1_sensor_status_t;
typedef enum mtk_platform_metadata_enum_p1_twin_switch {
MTK_P1_TWIN_SWITCH_NONE = 0,
MTK_P1_TWIN_SWITCH_ONE_TG = 1,
MTK_P1_TWIN_SWITCH_TWO_TG = 2
} mtk_platform_metadata_enum_p1_twin_switch_t;
typedef enum mtk_platform_metadata_enum_p1_twin_status {
MTK_P1_TWIN_STATUS_NONE = 0,
MTK_P1_TWIN_STATUS_TG_MODE_1 = 1,
MTK_P1_TWIN_STATUS_TG_MODE_2 = 2,
MTK_P1_TWIN_STATUS_TG_MODE_3 = 3,
} mtk_platform_metadata_enum_p1_twin_status_t;
typedef enum mtk_platform_metadata_enum_p1_resize_quality_switch {
MTK_P1_RESIZE_QUALITY_SWITCH_NONE = 0,
MTK_P1_RESIZE_QUALITY_SWITCH_L_L = 1,
MTK_P1_RESIZE_QUALITY_SWITCH_L_H = 2,
MTK_P1_RESIZE_QUALITY_SWITCH_H_L = 3,
MTK_P1_RESIZE_QUALITY_SWITCH_H_H = 4,
} mtk_platform_metadata_enum_p1_resize_quality_switch_t;
typedef enum mtk_platform_metadata_enum_p1_resize_quality_status {
MTK_P1_RESIZE_QUALITY_STATUS_NONE = 0,
MTK_P1_RESIZE_QUALITY_STATUS_ACCEPT = 1,
MTK_P1_RESIZE_QUALITY_STATUS_IGNORE = 2,
MTK_P1_RESIZE_QUALITY_STATUS_REJECT = 3,
MTK_P1_RESIZE_QUALITY_STATUS_ILLEGAL = 4,
} mtk_platform_metadata_enum_p1_resize_quality_status_t;
typedef enum mtk_platform_metadata_enum_p1_resize_quality_level {
MTK_P1_RESIZE_QUALITY_LEVEL_UNKNOWN = 0,
MTK_P1_RESIZE_QUALITY_LEVEL_L = 1,
MTK_P1_RESIZE_QUALITY_LEVEL_H = 2,
} mtk_platform_metadata_enum_p1_resize_quality_level_t;
typedef enum mtk_platform_metadata_enum_lmv_result {
MTK_LMV_RESULT_OK = 0,
MTK_LMV_RESULT_FAILED,
MTK_LMV_RESULT_SWITCHING
} mtk_platform_metadata_enum_lmv_result_t;
typedef enum mtk_platform_metadata_enum_featurepipe_app_mode {
MTK_FEATUREPIPE_PHOTO_PREVIEW = 0,
MTK_FEATUREPIPE_VIDEO_PREVIEW = 1,
MTK_FEATUREPIPE_VIDEO_RECORD = 2,
MTK_FEATUREPIPE_VIDEO_STOP = 3,
} mtk_platform_metadata_enum_featurepipe_app_mode_t;
typedef enum mtk_platform_metadata_enum_dcmf_feature_mode {
MTK_DCMF_FEATURE_BOKEH = 0,
MTK_DCMF_FEATURE_MFNR_BOKEH = 1,
MTK_DCMF_FEATURE_HDR_BOKEH = 2,
} mtk_platform_metadata_enum_dcmf_feature_mode_t;
typedef enum mtk_platform_metadata_enum_smvr_fps {
MTK_SMVR_FPS_30 = 0,
MTK_SMVR_FPS_120 = 1,
MTK_SMVR_FPS_240 = 2,
MTK_SMVR_FPS_480 = 3,
MTK_SMVR_FPS_960 = 4,
} mtk_platform_metadata_enum_smvr_fps_t;
//MTK_FRAMESYNC_FAILHANDLE
typedef enum mtk_platform_metadata_enum_fremesync_failhandle {
MTK_FRAMESYNC_FAILHANDLE_CONTINUE,
MTK_FRAMESYNC_FAILHANDLE_DROP,
} mtk_platform_metadata_enum_fremesync_failhandle_t;
//MTK_FRAMESYNC_RESULT
typedef enum mtk_platform_metadata_enum_fremesync_result {
MTK_FRAMESYNC_RESULT_PASS,
MTK_FRAMESYNC_RESULT_FAIL_CONTINUE,
MTK_FRAMESYNC_RESULT_FAIL_DROP,
} mtk_platform_metadata_enum_fremesync_result_t;
//MTK_FRAMESYNC_MODE
typedef enum mtk_platform_metadata_enum_fremesync_mode {
MTK_FRAMESYNC_MODE_VSYNC_ALIGNMENT,
MTK_FRAMESYNC_MODE_READOUT_CENTER_ALIGNMENT,
} mtk_platform_metadata_enum_fremesync_mode_t;
//MTK_FEATURE_MULTIFRAMENODE_BYPASSED
typedef enum mtk_platform_metadata_enum_multiframenode_bypassed {
MTK_FEATURE_MULTIFRAMENODE_NOT_BYPASSED = 0,
MTK_FEATURE_MULTIFRAMENODE_TO_BE_BYPASSED = 1
} mtk_platform_metadata_enum_mfllnode_bypassed_t;
//MTK_FEATURE_BSS_PROCESS
typedef enum mtk_platform_metadata_enum_bss_processing {
MTK_FEATURE_BSS_PROCESS_ENABLE = 0,
MTK_FEATURE_BSS_PROCESS_DISABLE = 1
} mtk_platform_metadata_enum_bss_processing_t;
//MTK_FEATURE_BSS_MANUAL_ORDER
typedef enum mtk_platform_metadata_enum_bss_manual_order {
MTK_FEATURE_BSS_MANUAL_ORDER_OFF = 0,
MTK_FEATURE_BSS_MANUAL_ORDER_GOLDEN = 1
} mtk_platform_metadata_enum_bss_manual_order_t;
//MTK_FEATURE_CAP_YUV_PROCESSING
typedef enum mtk_platform_metadata_enum_cap_yuv_processing {
MTK_FEATURE_CAP_YUV_PROCESSING_NOT_NEEDED = 0,
MTK_FEATURE_CAP_YUV_PROCESSING_NEEDED = 1
} mtk_platform_metadata_enum_cap_yuv_processing_t;
//MTK_FEATURE_CAP_PIPE_DCE_CONTROL
typedef enum mtk_platform_metadata_enum_cap_pipe_control {
MTK_FEATURE_CAP_PIPE_DCE_ENABLE_BUT_NOT_APPLY = 2,
MTK_FEATURE_CAP_PIPE_DCE_MANUAL_DISABLE = 1,
MTK_FEATURE_CAP_PIPE_DCE_DEFAULT_APPLY = 0
} mtk_platform_metadata_enum_cap_pipe_dce_control_t;
// MTK_FEATURE_AINR_MDLA_MODE, MTK_ISP_AINR_MDLA_MODE
typedef enum mtk_platform_metadata_enum_ainr_mdla_mode {
MTK_FEATURE_AINR_MDLA_MODE_NONE = 0,
MTK_FEATURE_AINR_MDLA_MODE_DRCOUT_16BIT = 1,
MTK_FEATURE_AINR_MDLA_MODE_NNOUT_12BIT = 2,
MTK_FEATURE_AINR_MDLA_MODE_NNOUT_16BIT = 3,
} mtk_platform_metadata_enum_ainr_mdla_mode_t;
//MTK_ISP_P2_PROCESSED_RAW
typedef enum mtk_platform_metadata_enum_p2_processed_raw {
MTK_ISP_P2_PROCESSED_RAW_NOT_NEEDED = 0,
MTK_ISP_P2_PROCESSED_RAW_NEEDED = 1
} mtk_platform_metadata_enum_p2_processed_raw_t;
//MTK_DUALZOOM_STREAMING_NR
typedef enum mtk_platform_metadata_enum_dualzoom_streaming_nr {
MTK_DUALZOOM_STREAMING_NR_AUTO = 0,
MTK_DUALZOOM_STREAMING_NR_OFF = 1
} mtk_platform_metadata_enum_dualzoom_streaming_nr_t;
//MTK_STAGGER_BLOB_IMGO_ORDER
typedef enum mtk_platform_metadata_enum_stagger_blob_imgo_order {
MTK_STAGGER_IMGO_NONE = 0,
MTK_STAGGER_IMGO_NE = 1,
MTK_STAGGER_IMGO_ME = 2,
MTK_STAGGER_IMGO_SE = 3
} mtk_platform_metadata_enum_stagger_blob_imgo_order_t;
//MTK_3A_EXIF_FLASH_FIRING_STATUS
typedef enum mtk_platform_metadata_enum_3a_exif_flash_firing_status_t {
MTK_3A_EXIF_FLASH_FIRING_STATUS_NOT_FIRED = 0,
MTK_3A_EXIF_FLASH_FIRING_STATUS_FIRED = 1,
} mtk_platform_metadata_enum_3a_exif_flash_firing_status_t;
//MTK_3A_EXIF_FLASH_RETURN_DETECTION
typedef enum mtk_platform_metadata_enum_3a_exif_flash_return_detection_t {
MTK_3A_EXIF_FLASH_RETURN_DETECTION_NOT_SUPPORT = 0,
MTK_3A_EXIF_FLASH_RETURN_DETECTION_RESERVED = 1,
MTK_3A_EXIF_FLASH_RETURN_DETECTION_STROBE_NOT_DETECTED = 2,
MTK_3A_EXIF_FLASH_RETURN_DETECTION_STROBE_DETECTED = 3,
} mtk_platform_metadata_enum_3a_exif_flash_return_detection_t;
//MTK_3A_EXIF_FLASH_MODE
typedef enum mtk_platform_metadata_enum_3a_exif_flash_mode_t {
MTK_3A_EXIF_FLASH_MODE_UNKNOWN = 0,
MTK_3A_EXIF_FLASH_MODE_COMPULSORY_FIRING = 1,
MTK_3A_EXIF_FLASH_MODE_COMPULSORY_SUPPRESSION = 2,
MTK_3A_EXIF_FLASH_MODE_AUTO = 3,
} mtk_platform_metadata_enum_3a_exif_flash_mode_t;
//MTK_3A_EXIF_FLASH_FUNCTION
typedef enum mtk_platform_metadata_enum_3a_exif_flash_function_t {
MTK_3A_EXIF_FLASH_FUNCTION_SUPPORT = 0,
MTK_3A_EXIF_FLASH_FUNCTION_NOT_SUPPORT = 1,
} mtk_platform_metadata_enum_3a_exif_flash_function_t;
//MTK_3A_EXIF_FLASH_REDEYE
typedef enum mtk_platform_metadata_enum_3a_exif_flash_redeye_t {
MTK_3A_EXIF_FLASH_REDEYE_NOT_SUPPORT = 0,
MTK_3A_EXIF_FLASH_REDEYE_SUPPORT = 1,
} mtk_platform_metadata_enum_3a_exif_flash_redeye_t;
//MTK_FEATURE_ABF
typedef enum mtk_platform_metadata_enum_abf_mode {
MTK_ABF_MODE_OFF = 0,
MTK_ABF_MODE_ON,
} mtk_platform_metadata_enum_abf_mode_t;
#endif

File diff suppressed because it is too large Load Diff

@ -23,7 +23,6 @@
#include <opencv2/core/core.hpp>
#include "Camera2Helper.h"
#include <mutex>
#include <map>
#include <set>
/**
@ -39,10 +38,8 @@ static const uint64_t kMaxExposureTime = static_cast<uint64_t>(250000000);
#define WAIT_AWB_LOCKED 2
#define WAIT_AF_LOCKED 4
#define PREVIEW_REQUEST_IDX 0
#define CAPTURE_REQUEST_IDX 1
#define DEFAULT_WARMUP_TIME 250 // 250ms
#define EXPECTED_CAPTURE_IDX 0
#define EXPECTED_CAPTURE_IDX 1
class CameraManager
{
@ -85,18 +82,13 @@ public:
unsigned int orientation:3;
unsigned int zoom : 1;
unsigned int wait3ALocked : 3;
unsigned int burstRawCapture : 3;
unsigned int customHdr : 1;
unsigned int hdrStep : 3;
unsigned int minFps : 4;
unsigned int reserved : 7;
unsigned int reserved : 18;
int64_t exposureTime;
unsigned int sensitivity;
int compensation;
float zoomRatio;
uint8_t requestTemplate;
uint8_t awbMode;
uint8_t burstCaptures;
unsigned short focusTimeout; // milli-seconds 65535
};
@ -113,8 +105,8 @@ public:
int32_t compensation;
uint8_t sceneMode;
uint8_t awbMode;
uint16_t avgY;
float zoomRatio;
uint8_t avgY;
uint64_t duration;
int64_t frameDuration;
@ -123,28 +115,6 @@ public:
uint8_t afLockSetted : 1;
};
struct CaptureRequest
{
/* For image capture */
NdkCamera* pThis;
AImageReader* imageReader;
ANativeWindow* imageWindow;
ACameraOutputTarget* imageTarget;
ACaptureSessionOutput* sessionOutput;
ACaptureRequest* request;
ACameraDevice_request_template templateId;
int sessionSequenceId;
};
struct CaptureResult
{
ACameraMetadata* result;
AImage* image;
int sequenceId;
};
NdkCamera(int32_t width, int32_t height, const CAMERA_PARAMS& params);
virtual ~NdkCamera();
@ -153,75 +123,37 @@ public:
void close();
int selfTest(const std::string& cameraId, int32_t& maxResolutionX, int32_t& maxResolutionY);
static void writeJpegFile(AImage *image, const char* path);
static void writeRawFile(AImage *image, ACameraMetadata* characteristics, ACameraMetadata* result, const char* path);
void onAvailabilityCallback(const char* cameraId);
void onUnavailabilityCallback(const char* cameraId);
virtual void onImageAvailable(AImageReader* reader);
virtual int32_t getOutputFormat() const;
virtual int32_t getBurstCaptures() const;
void CreateSession(ANativeWindow* previewWindow, ANativeWindow* jpgWindow, bool manaulPreview, int32_t imageRotation, int32_t width, int32_t height);
void CreateSession(ANativeWindow* previewWindow);
CaptureRequest* CreateRequest(bool isPreviewRequest, int32_t sensitivity = -1);
void DestroyRequest(CaptureRequest* request);
void DestroySession();
virtual bool on_image(cv::Mat rgb);
virtual bool on_image(cv::Mat& rgb);
virtual void on_error(const std::string& msg);
virtual void on_image(const unsigned char* nv21, int nv21_width, int nv21_height);
virtual void onDisconnected(ACameraDevice* device);
virtual bool onOneCapture(std::shared_ptr<ACameraMetadata> characteristics, std::shared_ptr<ACameraMetadata> result, uint32_t ldr, uint32_t duration, cv::Mat rgb);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::vector<uint8_t> >& frames);
virtual bool onBurstCapture(std::shared_ptr<ACameraMetadata> characteristics, std::vector<std::shared_ptr<ACameraMetadata> >& results, uint32_t ldr, uint32_t duration, std::vector<std::shared_ptr<AImage> >& frames);
void onCaptureProgressed(ACameraCaptureSession* session, ACaptureRequest* request, const ACameraMetadata* result);
void onCaptureCompleted(ACameraCaptureSession* session, ACaptureRequest* request, const ACameraMetadata* result);
void onCaptureFailed(ACameraCaptureSession* session, ACaptureRequest* request, ACameraCaptureFailure* failure);
void onSessionReady(ACameraCaptureSession *session);
void onError(ACameraDevice* device, int error);
void CopyPreviewRequest(ACaptureRequest* request, const ACameraMetadata* previewResult);
void FireBurstCapture();
void FireOneCapture(uint64_t ts);
uint32_t GetLdr() const
{
return mFinalLdr;
}
bool HasFatalError() const
const CAPTURE_RESULT& getCaptureResult() const
{
return m_fatalError;
return mFinalResult;
}
bool IsCameraAvailable(const std::string& cameraId);
int64_t GetTimestamp(const ACameraMetadata* result);
static bool convertAImageToNv21(AImage* image, uint8_t** nv21, int32_t& width, int32_t& height);
static void EnumCameraResult(ACameraMetadata* result, CAPTURE_RESULT& captureResult);
protected:
void SetupMFNR(ACameraMetadata* characteristics, ACaptureRequest* request, bool ais, int32_t sensitivity);
void Setup3DNR(ACameraMetadata* characteristics, ACaptureRequest* request, int32_t sensitivity);
void SetupHDR(ACameraMetadata* characteristics, ACaptureRequest* request, int32_t sensitivity);
bool SetupTonemapCurve(ACameraMetadata* characteristics, ACaptureRequest* request);
protected:
std::mutex m_locker;
std::set<std::string> m_availableCameras;
protected:
CAMERA_PARAMS m_params;
DisplayDimension foundRes;
int camera_facing;
int camera_orientation;
bool m_firstFrame;
bool m_photoTaken;
int32_t mWidth;
int32_t mHeight;
std::string mCameraId;
@ -232,7 +164,6 @@ protected:
uint8_t awbMode;
bool aeLockAvailable;
bool awbLockAvailable;
bool m_fatalError;
uint64_t numberOfPrecaptures;
unsigned long long m_precaptureStartTime;
@ -247,13 +178,11 @@ protected:
int32_t activeArraySize[2];
int32_t maxRegions[3];
bool mCaptureTriggered;
bool mFocusTriggered;
bool mCaptureDispatched;
uint32_t mStableFrameCount;
unsigned int m_imagesCaptured;
CAPTURE_RESULT mResult;
uint64_t m_startTime;
CAPTURE_RESULT mFinalResult;
unsigned long long m_startTime;
protected:
@ -261,38 +190,15 @@ protected:
CameraManager camera_manager;
ACameraDevice* camera_device;
AImageReader* image_reader;
ANativeWindow* image_reader_surface;
ACameraOutputTarget* image_reader_target;
ACaptureRequest* capture_request;
ACaptureSessionOutputContainer* capture_session_output_container;
AImageReader* mPreviewImageReader;
ANativeWindow* mPreviewImageWindow;
ACameraOutputTarget* mPreviewOutputTarget;
ACaptureSessionOutput* mPreviewSessionOutput;
AImageReader* mImageReader;
ANativeWindow* mImageWindow;
ACameraOutputTarget* mOutputTarget;
ACaptureSessionOutput* mSessionOutput;
std::shared_ptr<ACameraMetadata> mCharacteristics;
std::vector<CaptureRequest*> mCaptureRequests;
ACaptureSessionOutput* capture_session_output;
ACameraCaptureSession* capture_session;
std::shared_ptr<ACameraMetadata> mPreviewResults;
std::vector<std::shared_ptr<ACameraMetadata> > mCaptureResults;
std::map<int64_t, std::shared_ptr<ACameraMetadata> > mCaptureResultMap;
uint32_t mLdr;
uint32_t mFinalLdr;
uint32_t mFinalBurstCaptures;
int32_t mFinalOutputFormat;
std::vector<std::shared_ptr<AImage> > mCaptureFrames;
// cv::Mat mOneFrame;
std::vector<std::pair<int64_t, cv::Mat> > mOneFrame;
std::vector<std::vector<uint8_t> > mRawFrames;
int64_t m_minTimestamp;
int captureSequenceId;
};
#endif // NDKCAMERA_H

@ -1,72 +0,0 @@
#include "hdrplus/hdrplus_pipeline.h"
int main( int argc, char** argv )
{
int rotation = atoi(argv[1]);
bool frontCamera = atoi(argv[2]) != 0;
std::vector<std::string> paths;
for (int idx = 4; idx < argc; idx++)
{
paths.push_back(argv[idx]);
}
cv::Mat mat;
hdrplus::hdrplus_pipeline pipeline;
pipeline.run_pipeline( paths, 0, mat);
if (mat.empty())
{
printf("run_pipeline return empty mat");
}
mat = hdrplus::convert16bit2_8bit_(mat.clone());
if (rotation > 0)
{
if (rotation == 1) // 0
{
cv::Mat tempPic;
cv::transpose(mat, tempPic);
cv::flip(tempPic, mat, 0);
}
else if (rotation == 2) // 90
{
cv::Mat tempPic;
cv::transpose(mat, tempPic);
cv::flip(tempPic, mat, 1);
}
else if (rotation == 3) // 180
{
if (frontCamera)
{
cv::flip(mat, mat, 0);
}
else
{
cv::flip(mat, mat, -1);
}
}
else if (rotation == 4) // 270
{
cv::Mat tempPic;
cv::transpose(mat, tempPic);
cv::flip(tempPic, mat, 0);
}
}
cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
if (mat.empty())
{
printf("mat is empty before save");
}
bool res = cv::imwrite(argv[3], mat);
if (!res)
{
printf("Failed to write file %s err=%d", argv[3], errno);
}
return 0;
}

@ -1,38 +0,0 @@
#pragma once
#include <vector>
#include <utility> // std::pair
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
namespace hdrplus
{
class align
{
public:
align() = default;
~align() = default;
/**
* @brief Run alignment on burst of images
*
* @param burst_images collection of burst images
* @param aligements alignment in pixel value pair.
* Outer most vector is per alternative image.
* Inner most two vector is for horizontle & verticle tiles
*/
void process( const hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& aligements );
private:
// From original image to coarse image
const std::vector<int> inv_scale_factors = { 1, 2, 4, 4 };
const std::vector<int> distances = { 1, 2, 2, 2 }; // L1 / L2 distance
const std::vector<int> grayimg_search_radious = { 1, 4, 4, 4 };
const std::vector<int> grayimg_tile_sizes = { 16, 16, 16, 8 };
const int num_levels = 4;
};
} // namespace hdrplus

@ -1,54 +0,0 @@
#pragma once
#include <string>
#include <vector>
#include <utility> // std::pair
#include <memory> // std::shared_ptr
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
namespace hdrplus
{
class MemFile
{
public:
std::vector<uint8_t> content;
const std::vector<uint8_t> GetConstData() const
{
return content;
}
std::vector<uint8_t> GetData()
{
return content;
}
};
class bayer_image
{
public:
explicit bayer_image( const std::string& bayer_image_path );
explicit bayer_image( const std::vector<uint8_t>& bayer_image_content );
explicit bayer_image( std::shared_ptr<MemFile> bayer_image_file );
~bayer_image() = default;
std::pair<double, double> get_noise_params() const;
std::shared_ptr<LibRaw> libraw_processor;
cv::Mat raw_image;
cv::Mat grayscale_image;
int width;
int height;
int white_level;
std::vector<int> black_level_per_channel;
float iso;
private:
float baseline_lambda_shot = 3.24 * pow( 10, -4 );
float baseline_lambda_read = 4.3 * pow( 10, -6 );
};
} // namespace hdrplus

@ -1,46 +0,0 @@
#pragma once
#include <vector>
#include <string>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/bayer_image.h"
namespace hdrplus
{
class burst
{
public:
explicit burst( const std::string& burst_path, const std::string& reference_image_path );
explicit burst(const std::vector<std::string>& burst_paths, int reference_image_index);
explicit burst( const std::vector<std::vector<uint8_t> >& bayer_image_contents, int reference_image_index );
explicit burst( const std::vector<std::shared_ptr<MemFile> >& bayer_image_files, int reference_image_index );
~burst() = default;
// Reference image index in the array
int reference_image_idx;
// Source bayer images & grayscale unpadded image
std::vector<hdrplus::bayer_image> bayer_images;
// Image padded to upper level tile size (16*2)
// Use for alignment, merging, and finishing
std::vector<cv::Mat> bayer_images_pad;
// Padding information
std::vector<int> padding_info_bayer;
// Image padded to upper level tile size (16)
// Use for alignment, merging, and finishing
std::vector<cv::Mat> grayscale_images_pad;
// number of image (including reference) in burst
int num_images;
// Bayer image after merging, stored as cv::Mat
cv::Mat merged_bayer_image;
};
} // namespace hdrplus

@ -1,251 +0,0 @@
#pragma once
#include <opencv2/opencv.hpp> // all opencv header
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <unordered_map>
#include <hdrplus/bayer_image.h>
#include <dirent.h>
#include <hdrplus/params.h>
#include <hdrplus/burst.h>
namespace hdrplus
{
uint16_t uGammaCompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent);
uint16_t uGammaDecompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent);
cv::Mat uGammaCompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent);
cv::Mat uGammaDecompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent);
cv::Mat gammasRGB(cv::Mat img, bool mode);
class finish
{
public:
cv::Mat mergedBayer; // merged image from Merge Module
std::string burstPath; // path to burst images
std::vector<std::string> rawPathList; // a list or array of the path to all burst imgs under burst Path
int refIdx; // index of the reference img
Parameters params;
cv::Mat rawReference;
// LibRaw libraw_processor_finish;
bayer_image* refBayer;
std::string mergedImgPath;
finish()
{
refBayer = NULL;
}
// please use this initialization after merging part finish
finish(std::string burstPath, cv::Mat mergedBayer,int refIdx) {
refBayer = NULL;
this->refIdx = refIdx;
this->burstPath = burstPath;
this->mergedBayer = mergedBayer;
}
// for local testing only
finish(std::string burstPath, std::string mergedBayerPath,int refIdx){
this->refIdx = refIdx;
this->burstPath = burstPath;
this->mergedBayer = loadFromCSV(mergedBayerPath, CV_16UC1);//
load_rawPathList(burstPath);
refBayer= new bayer_image(this->rawPathList[refIdx]);
this->rawReference = refBayer->raw_image;//;grayscale_image
// initialize parameters in libraw_processor_finish
setLibRawParams();
showParams();
std::cout<<"Finish init() finished!"<<std::endl;
}
~finish()
{
if (refBayer != NULL)
{
delete refBayer;
refBayer = NULL;
}
}
// finish pipeline func
// void process(std::string burstPath, cv::Mat mergedBayer,int refIdx);
void process(const hdrplus::burst& burst_images, cv::Mat& finalOutputImage);
// replace Mat a with Mat b
void copy_mat_16U(cv::Mat& A, cv::Mat B);
void copy_rawImg2libraw(std::shared_ptr<LibRaw>& libraw_ptr, cv::Mat B);
// postprocess
// cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr);
void showImg(cv::Mat img)
{
int ch = CV_MAT_CN(CV_8UC1);
// cv::Mat tmp(4208,3120,CV_16UC1);
cv::Mat tmp(img);
// u_int16_t* ptr_tmp = (u_int16_t*)tmp.data;
// u_int16_t* ptr_img = (u_int16_t*)img.data;
// // col major to row major
// for(int r = 0; r < tmp.rows; r++) {
// for(int c = 0; c < tmp.cols; c++) {
// *(ptr_tmp+r*tmp.cols+c) = *(ptr_img+c*tmp.rows+r)/2048.0*255.0;
// }
// }
// std::cout<<"height="<<tmp.rows<<std::endl;
// std::cout<<"width="<<tmp.cols<<std::endl;
// cv::transpose(tmp, tmp);
u_int16_t* ptr = (u_int16_t*)tmp.data;
for(int r = 0; r < tmp.rows; r++) {
for(int c = 0; c < tmp.cols; c++) {
*(ptr+r*tmp.cols+c) = *(ptr+r*tmp.cols+c)/2048.0*255.0;
}
}
tmp = tmp.reshape(ch);
tmp.convertTo(tmp, CV_8UC1);
cv::imshow("test",tmp);
cv::imwrite("test2.jpg", tmp);
cv::waitKey(0);
std::cout<< this->mergedBayer.size()<<std::endl;
}
void showMat(cv::Mat img){
std::cout<<"size="<<img.size()<<std::endl;
std::cout<<"type="<<img.type()<<std::endl;
}
void showParams()
{
std::cout<<"Parameters:"<<std::endl;
std::cout<<"tuning_ltmGain = "<<this->params.tuning.ltmGain<<std::endl;
std::cout<<"tuning_gtmContrast = "<<this->params.tuning.gtmContrast<<std::endl;
for(auto key_val:this->params.flags){
std::cout<<key_val.first<<","<<key_val.second<<std::endl;
}
std::cout<<"demosaic_algorithm = "<<refBayer->libraw_processor->imgdata.params.user_qual<<std::endl;
std::cout<<"half_size = "<<refBayer->libraw_processor->imgdata.params.half_size<<std::endl;
std::cout<<"use_camera_wb = "<<refBayer->libraw_processor->imgdata.params.use_camera_wb<<std::endl;
std::cout<<"use_auto_wb = "<<refBayer->libraw_processor->imgdata.params.use_auto_wb<<std::endl;
std::cout<<"no_auto_bright = "<<refBayer->libraw_processor->imgdata.params.no_auto_bright<<std::endl;
std::cout<<"output_color = "<<refBayer->libraw_processor->imgdata.params.output_color <<std::endl;
std::cout<<"gamma[0] = "<<refBayer->libraw_processor->imgdata.params.gamm[0]<<std::endl;
std::cout<<"gamma[1] = "<<refBayer->libraw_processor->imgdata.params.gamm[1]<<std::endl;
std::cout<<"output_bps = "<<refBayer->libraw_processor->imgdata.params.output_bps<<std::endl;
// std::cout<<"demosaic_algorithm = "<<libraw_processor_finish.imgdata.params.user_qual<<std::endl;
// std::cout<<"half_size = "<<libraw_processor_finish.imgdata.params.half_size<<std::endl;
// std::cout<<"use_camera_wb = "<<libraw_processor_finish.imgdata.params.use_camera_wb<<std::endl;
// std::cout<<"use_auto_wb = "<<libraw_processor_finish.imgdata.params.use_auto_wb<<std::endl;
// std::cout<<"no_auto_bright = "<<libraw_processor_finish.imgdata.params.no_auto_bright<<std::endl;
// std::cout<<"output_color = "<<libraw_processor_finish.imgdata.params.output_color <<std::endl;
// std::cout<<"gamma[0] = "<<libraw_processor_finish.imgdata.params.gamm[0]<<std::endl;
// std::cout<<"gamma[1] = "<<libraw_processor_finish.imgdata.params.gamm[1]<<std::endl;
// std::cout<<"output_bps = "<<libraw_processor_finish.imgdata.params.output_bps<<std::endl;
std::cout<<"===================="<<std::endl;
}
void showRawPathList(){
std::cout<<"RawPathList:"<<std::endl;
for(auto pth:this->rawPathList){
std::cout<<pth<<std::endl;
}
std::cout<<"===================="<<std::endl;
}
private:
cv::Mat loadFromCSV(const std::string& path, int opencv_type)
{
cv::Mat m;
std::ifstream csvFile (path);
std::string line;
while (getline(csvFile, line))
{
std::vector<int> dvals;
std::stringstream ss(line);
std::string val;
// int count=0;
while (getline(ss, val, ','))
{
dvals.push_back(stod(val));//*255.0/2048.0
// count++;
}
// std::cout<<count<<std::endl;
cv::Mat mline(dvals, true);
cv::transpose(mline, mline);
m.push_back(mline);
}
int ch = CV_MAT_CN(opencv_type);
m = m.reshape(ch);
m.convertTo(m, opencv_type);
return m;
}
void load_rawPathList(std::string burstPath){
DIR *pDir; // pointer to root
struct dirent *ptr;
if (!(pDir = opendir(burstPath.c_str()))) {
std::cout<<"root dir not found!"<<std::endl;
return;
}
while ((ptr = readdir(pDir)) != nullptr) {
// ptr will move to the next file automatically
std::string sub_file = burstPath + "/" + ptr->d_name; // current filepath that ptr points to
if (ptr->d_type != 8 && ptr->d_type != 4) { // not normal file or dir
return;
}
// only need normal files
if (ptr->d_type == 8) {
if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
if (strstr(ptr->d_name, ".dng")) {
rawPathList.emplace_back(sub_file);
}
}
}
}
// close root dir
closedir(pDir);
}
void setLibRawParams(){
refBayer->libraw_processor->imgdata.params.user_qual = params.rawpyArgs.demosaic_algorithm;
refBayer->libraw_processor->imgdata.params.half_size = params.rawpyArgs.half_size;
refBayer->libraw_processor->imgdata.params.use_camera_wb = params.rawpyArgs.use_camera_wb;
refBayer->libraw_processor->imgdata.params.use_auto_wb = params.rawpyArgs.use_auto_wb;
refBayer->libraw_processor->imgdata.params.no_auto_bright = params.rawpyArgs.no_auto_bright;
refBayer->libraw_processor->imgdata.params.output_color = params.rawpyArgs.output_color;
refBayer->libraw_processor->imgdata.params.gamm[0] = params.rawpyArgs.gamma[0];
refBayer->libraw_processor->imgdata.params.gamm[1] = params.rawpyArgs.gamma[1];
refBayer->libraw_processor->imgdata.params.output_bps = params.rawpyArgs.output_bps;
// libraw_processor_finish.imgdata.params.user_qual = params.rawpyArgs.demosaic_algorithm;
// libraw_processor_finish.imgdata.params.half_size = params.rawpyArgs.half_size;
// libraw_processor_finish.imgdata.params.use_camera_wb = params.rawpyArgs.use_camera_wb;
// libraw_processor_finish.imgdata.params.use_auto_wb = params.rawpyArgs.use_auto_wb;
// libraw_processor_finish.imgdata.params.no_auto_bright = params.rawpyArgs.no_auto_bright;
// libraw_processor_finish.imgdata.params.output_color = params.rawpyArgs.output_color;
// libraw_processor_finish.imgdata.params.gamm[0] = params.rawpyArgs.gamma[0];
// libraw_processor_finish.imgdata.params.gamm[1] = params.rawpyArgs.gamma[1];
// libraw_processor_finish.imgdata.params.output_bps = params.rawpyArgs.output_bps;
}
};
} // namespace hdrplus

@ -1,54 +0,0 @@
#pragma once
#include <string>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
#include "hdrplus/align.h"
#include "hdrplus/merge.h"
#include "hdrplus/finish.h"
namespace hdrplus
{
inline cv::Mat convert16bit2_8bit_(cv::Mat ans) {
if(ans.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(255.0/USHRT_MAX);
(*it)[1] *=(255.0/USHRT_MAX);
(*it)[2] *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC3);
}else if(ans.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC1);
}else{
// std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
class hdrplus_pipeline
{
private:
hdrplus::align align_module;
hdrplus::merge merge_module;
hdrplus::finish finish_module;
public:
void run_pipeline( const std::string& burst_path, const std::string& reference_image_path );
bool run_pipeline( const std::vector<std::string>& burst_paths, int reference_image_index, cv::Mat& finalImg );
bool run_pipeline( const std::vector<std::vector<uint8_t> >& burst_contents, int reference_image_index, cv::Mat& finalImg );
bool run_pipeline( const std::vector<std::shared_ptr<MemFile> >& burst_contents, int reference_image_index, cv::Mat& finalImg );
hdrplus_pipeline() = default;
~hdrplus_pipeline() = default;
};
} // namespace hdrplus

@ -1,184 +0,0 @@
#pragma once
#include <vector>
#include <opencv2/opencv.hpp> // all opencv header
#include <cmath>
#include "hdrplus/burst.h"
#define TILE_SIZE 16
#define TEMPORAL_FACTOR 75
#define SPATIAL_FACTOR 0.1
namespace hdrplus
{
class merge
{
public:
int offset = TILE_SIZE / 2;
float baseline_lambda_shot = 3.24 * pow( 10, -4 );
float baseline_lambda_read = 4.3 * pow( 10, -6 );
merge() = default;
~merge() = default;
/**
* @brief Run alignment on burst of images
*
* @param burst_images collection of burst images
* @param alignments alignment in pixel value pair.
* Outer most vector is per alternative image.
* Inner most two vector is for horizontal & vertical tiles
*/
void process( hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments);
/*
std::vector<cv::Mat> get_other_tiles(); //return the other tile list T_1 to T_n
std::vector<cv::Mat> vector_math(string operation, reference_tile, other_tile_list); //for loop allowing operations across single element and list
std::vector<cv::Mat> scalar_vector_math(string operation, scalar num, std::vector<cv::Mat> tile_list); //for loop allowing operations across single element and list
std::vector<cv::Mat> average_vector(std::vector<cv::Mat> tile_list); //take average of vector elements
*/
private:
float tileRMS(cv::Mat tile) {
cv::Mat squared;
cv::multiply(tile, tile, squared);
return sqrt(cv::mean(squared)[0]);
}
std::vector<float> getNoiseVariance(std::vector<cv::Mat> tiles, float lambda_shot, float lambda_read) {
std::vector<float> noise_variance;
for (auto tile : tiles) {
noise_variance.push_back(lambda_shot * tileRMS(tile) + lambda_read);
}
return noise_variance;
}
cv::Mat cosineWindow1D(cv::Mat input, int window_size = TILE_SIZE) {
cv::Mat output = input.clone();
for (int i = 0; i < input.cols; ++i) {
output.at<float>(0, i) = 1. / 2. - 1. / 2. * cos(2 * M_PI * (input.at<float>(0, i) + 1 / 2.) / window_size);
}
return output;
}
cv::Mat cosineWindow2D(cv::Mat tile) {
int window_size = tile.rows; // Assuming square tile
cv::Mat output_tile = tile.clone();
cv::Mat window = cv::Mat::zeros(1, window_size, CV_32F);
for(int i = 0; i < window_size; ++i) {
window.at<float>(i) = i;
}
cv::Mat window_x = cosineWindow1D(window, window_size);
window_x = cv::repeat(window_x, window_size, 1);
cv::Mat window_2d = window_x.mul(window_x.t());
cv::Mat window_applied;
cv::multiply(tile, window_2d, window_applied, 1, CV_32F);
return window_applied;
}
cv::Mat cat2Dtiles(std::vector<std::vector<cv::Mat>> tiles) {
std::vector<cv::Mat> rows;
for (auto row_tiles : tiles) {
cv::Mat row;
cv::hconcat(row_tiles, row);
rows.push_back(row);
}
cv::Mat img;
cv::vconcat(rows, img);
return img;
}
void circshift(cv::Mat &out, const cv::Point &delta)
{
cv::Size sz = out.size();
// error checking
assert(sz.height > 0 && sz.width > 0);
// no need to shift
if ((sz.height == 1 && sz.width == 1) || (delta.x == 0 && delta.y == 0))
return;
// delta transform
int x = delta.x;
int y = delta.y;
if (x > 0) x = x % sz.width;
if (y > 0) y = y % sz.height;
if (x < 0) x = x % sz.width + sz.width;
if (y < 0) y = y % sz.height + sz.height;
// in case of multiple dimensions
std::vector<cv::Mat> planes;
split(out, planes);
for (size_t i = 0; i < planes.size(); i++)
{
// vertical
cv::Mat tmp0, tmp1, tmp2, tmp3;
cv::Mat q0(planes[i], cv::Rect(0, 0, sz.width, sz.height - y));
cv::Mat q1(planes[i], cv::Rect(0, sz.height - y, sz.width, y));
q0.copyTo(tmp0);
q1.copyTo(tmp1);
tmp0.copyTo(planes[i](cv::Rect(0, y, sz.width, sz.height - y)));
tmp1.copyTo(planes[i](cv::Rect(0, 0, sz.width, y)));
// horizontal
cv::Mat q2(planes[i], cv::Rect(0, 0, sz.width - x, sz.height));
cv::Mat q3(planes[i], cv::Rect(sz.width - x, 0, x, sz.height));
q2.copyTo(tmp2);
q3.copyTo(tmp3);
tmp2.copyTo(planes[i](cv::Rect(x, 0, sz.width - x, sz.height)));
tmp3.copyTo(planes[i](cv::Rect(0, 0, x, sz.height)));
}
cv::merge(planes, out);
}
void fftshift(cv::Mat &out)
{
cv::Size sz = out.size();
cv::Point pt(0, 0);
pt.x = (int) floor(sz.width / 2.0);
pt.y = (int) floor(sz.height / 2.0);
circshift(out, pt);
}
void ifftshift(cv::Mat &out)
{
cv::Size sz = out.size();
cv::Point pt(0, 0);
pt.x = (int) ceil(sz.width / 2.0);
pt.y = (int) ceil(sz.height / 2.0);
circshift(out, pt);
}
std::vector<cv::Mat> getReferenceTiles(cv::Mat reference_image);
cv::Mat mergeTiles(std::vector<cv::Mat> tiles, int rows, int cols);
cv::Mat processChannel( hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments, \
cv::Mat channel_image, \
std::vector<cv::Mat> alternate_channel_i_list,\
float lambda_shot, \
float lambda_read);
//temporal denoise
std::vector<cv::Mat> temporal_denoise(std::vector<cv::Mat> tiles, std::vector<std::vector<cv::Mat>> alt_tiles, std::vector<float> noise_variance, float temporal_factor);
std::vector<cv::Mat> spatial_denoise(std::vector<cv::Mat> tiles, int num_alts, std::vector<float> noise_variance, float spatial_factor);
};
} // namespace hdrplus

@ -1,69 +0,0 @@
#pragma once
#include <string>
#include <unordered_map>
#include <memory> // std::shared_ptr
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
namespace hdrplus
{
class RawpyArgs{
public:
int demosaic_algorithm = 3;// 3 - AHD interpolation <->int user_qual
bool half_size = false;
bool use_camera_wb = true;
bool use_auto_wb = false;
bool no_auto_bright = true;
int output_color = LIBRAW_COLORSPACE_sRGB;
int gamma[2] = {1,1}; //# gamma correction not applied by rawpy (not quite understand)
int output_bps = 16;
};
class Options{
public:
std::string input = "";
std::string output = "";
std::string mode = "full"; //'full' 'align' 'merge' 'finish'
int reference = 0;
float temporalfactor=75.0;
float spatialfactor = 0.1;
int ltmGain=-1;
double gtmContrast=0.075;
int verbose=2; // (0, 1, 2, 3, 4, 5)
};
class Tuning{
public:
std::string ltmGain = "auto";
double gtmContrast = 0.075;
std::vector<float> sharpenAmount{1,0.5,0.5};
std::vector<float> sharpenSigma{1,2,4};
std::vector<float> sharpenThreshold{0.02,0.04,0.06};
};
class Parameters{
public:
std::unordered_map<std::string,bool> flags;
RawpyArgs rawpyArgs;
Options options;
Tuning tuning;
Parameters()
{
const char* keys[] = {"writeReferenceImage", "writeGammaReference", "writeMergedImage", "writeGammaMerged",
"writeShortExposure", "writeLongExposure", "writeFusedExposure", "writeLTMImage",
"writeLTMGamma", "writeGTMImage", "writeReferenceFinal", "writeFinalImage"};
for (int idx = 0; idx < sizeof(keys) / sizeof(const char*); idx++) {
flags[keys[idx]] = true;
}
}
};
cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs);
void setParams(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs);
} // namespace hdrplus

@ -1,326 +0,0 @@
#pragma once
#include <string>
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <omp.h>
// https://stackoverflow.com/questions/63404539/portable-loop-unrolling-with-template-parameter-in-c-with-gcc-icc
/// Helper macros for stringification
#define TO_STRING_HELPER(X) #X
#define TO_STRING(X) TO_STRING_HELPER(X)
// Define loop unrolling depending on the compiler
#if defined(__ICC) || defined(__ICL)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(unroll (n)))
#elif defined(__clang__)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(unroll (n)))
#elif defined(__GNUC__) && !defined(__clang__)
#define UNROLL_LOOP(n) _Pragma(TO_STRING(GCC unroll (16)))
#elif defined(_MSC_BUILD)
#pragma message ("Microsoft Visual C++ (MSVC) detected: Loop unrolling not supported!")
#define UNROLL_LOOP(n)
#else
#warning "Unknown compiler: Loop unrolling not supported!"
#define UNROLL_LOOP(n)
#endif
namespace hdrplus
{
template <typename T, int kernel>
cv::Mat box_filter_kxk( const cv::Mat& src_image )
{
const T* src_image_ptr = (T*)src_image.data;
int src_height = src_image.size().height;
int src_width = src_image.size().width;
int src_step = src_image.step1();
if ( kernel <= 0 )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error(std::string( __FILE__ ) + "::" + __func__ + " box filter only support kernel size >= 1");
#endif
}
// int(src_height / kernel) = floor(src_height / kernel)
// When input size is not multiplier of kernel, take floor
cv::Mat dst_image( src_height / kernel, src_width / kernel, src_image.type() );
T* dst_image_ptr = (T*)dst_image.data;
int dst_height = dst_image.size().height;
int dst_width = dst_image.size().width;
int dst_step = dst_image.step1();
for ( int row_i = 0; row_i < dst_height; ++row_i )
{
for ( int col_i = 0; col_i < dst_width; col_i++ )
{
// Take ceiling for rounding
T box_sum = T( 0 );
UNROLL_LOOP( kernel )
for ( int kernel_row_i = 0; kernel_row_i < kernel; ++kernel_row_i )
{
UNROLL_LOOP( kernel )
for ( int kernel_col_i = 0; kernel_col_i < kernel; ++kernel_col_i )
{
box_sum += src_image_ptr[ ( row_i * kernel + kernel_row_i ) * src_step + ( col_i * kernel + kernel_col_i ) ];
}
}
// Average by taking ceiling
T box_avg = box_sum / T( kernel * kernel );
dst_image_ptr[ row_i * dst_step + col_i ] = box_avg;
}
}
return dst_image;
}
template <typename T, int kernel>
cv::Mat downsample_nearest_neighbour( const cv::Mat& src_image )
{
const T* src_image_ptr = (T*)src_image.data;
int src_height = src_image.size().height;
int src_width = src_image.size().width;
int src_step = src_image.step1();
// int(src_height / kernel) = floor(src_height / kernel)
// When input size is not multiplier of kernel, take floor
cv::Mat dst_image = cv::Mat( src_height / kernel, src_width / kernel, src_image.type() );
T* dst_image_ptr = (T*)dst_image.data;
int dst_height = dst_image.size().height;
int dst_width = dst_image.size().width;
int dst_step = dst_image.step1();
// -03 should be enough to optimize below code
for ( int row_i = 0; row_i < dst_height; row_i++ )
{
UNROLL_LOOP( 32 )
for ( int col_i = 0; col_i < dst_width; col_i++ )
{
dst_image_ptr[ row_i * dst_step + col_i ] = \
src_image_ptr[ (row_i * kernel) * src_step + (col_i * kernel) ];
}
}
return dst_image;
}
template< typename T >
void print_cvmat( cv::Mat image )
{
const T* img_ptr = (const T*)image.data;
int height = image.size().height;
int width = image.size().width;
int step = image.step1();
int chns = image.channels();
printf("print_cvmat()::Image of size height = %d, width = %d, step = %d\n", \
height, width, step );
if ( chns == 1 )
{
for ( int row_i = 0; row_i < height; ++row_i )
{
int row_i_offset = row_i * step;
for ( int col_i = 0; col_i < width; ++col_i )
{
printf("%3.d ", img_ptr[ row_i_offset + col_i ]);
//printf("%3.d ", int( image.at<T>( row_i, col_i ) ) );
}
printf("\n");
}
}
else if ( chns == 3 )
{
for ( int row_i = 0; row_i < height; ++row_i )
{
int row_i_offset = row_i * step;
for ( int col_i = 0; col_i < width; ++col_i )
{
printf("[%3.d, %3.d, %3.d] ", img_ptr[ row_i_offset + col_i * 3 + 0 ], \
img_ptr[ row_i_offset + col_i * 3 + 1 ], \
img_ptr[ row_i_offset + col_i * 3 + 2 ] );
}
printf("\n");
}
}
else
{
#ifdef __ANDROID__
#else
throw std::runtime_error("cv::Mat number of channel currently not support to print\n");
#endif
}
}
/**
* @brief Extract RGB channel seprately from bayer image
*
* @tparam T data tyoe of bayer image.
* @return vector of RGB image. OpenCV internally maintain reference count.
* Thus this step won't create deep copy overhead.
*
* @example extract_rgb_from_bayer<uint16_t>( bayer_img, rgb_vector_container );
*/
template <typename T>
void extract_rgb_from_bayer( const cv::Mat& bayer_img, \
cv::Mat& img_ch1, cv::Mat& img_ch2, cv::Mat& img_ch3, cv::Mat& img_ch4 )
{
const T* bayer_img_ptr = (const T*)bayer_img.data;
int bayer_width = bayer_img.size().width;
int bayer_height = bayer_img.size().height;
int bayer_step = bayer_img.step1();
if ( bayer_width % 2 != 0 || bayer_height % 2 != 0 )
{
#ifdef __ANDROID__
#else
throw std::runtime_error("Bayer image data size incorrect, must be multiplier of 2\n");
#endif
}
// RGB image is half the size of bayer image
int rgb_width = bayer_width / 2;
int rgb_height = bayer_height / 2;
img_ch1.create( rgb_height, rgb_width, bayer_img.type() );
img_ch2.create( rgb_height, rgb_width, bayer_img.type() );
img_ch3.create( rgb_height, rgb_width, bayer_img.type() );
img_ch4.create( rgb_height, rgb_width, bayer_img.type() );
int rgb_step = img_ch1.step1();
T* img_ch1_ptr = (T*)img_ch1.data;
T* img_ch2_ptr = (T*)img_ch2.data;
T* img_ch3_ptr = (T*)img_ch3.data;
T* img_ch4_ptr = (T*)img_ch4.data;
#pragma omp parallel for
for ( int rgb_row_i = 0; rgb_row_i < rgb_height; rgb_row_i++ )
{
int rgb_row_i_offset = rgb_row_i * rgb_step;
// Every RGB row corresbonding to two Bayer image row
int bayer_row_i_offset0 = ( rgb_row_i * 2 + 0 ) * bayer_step; // For RG
int bayer_row_i_offset1 = ( rgb_row_i * 2 + 1 ) * bayer_step; // For GB
for ( int rgb_col_j = 0; rgb_col_j < rgb_width; rgb_col_j++ )
{
// img_ch1/2/3/4 : (0,0), (1,0), (0,1), (1,1)
int bayer_col_i_offset0 = rgb_col_j * 2 + 0;
int bayer_col_i_offset1 = rgb_col_j * 2 + 1;
img_ch1_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset0 + bayer_col_i_offset0 ];
img_ch3_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset0 + bayer_col_i_offset1 ];
img_ch2_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + bayer_col_i_offset0 ];
img_ch4_ptr[ rgb_row_i_offset + rgb_col_j ] = bayer_img_ptr[ bayer_row_i_offset1 + bayer_col_i_offset1 ];
}
}
}
/**
* @brief Convert RGB image to gray image through same weight linear combination.
* Also support implicit data type conversion.
*
* @tparam RGB_DTYPE rgb image type (e.g. uint16_t)
* @tparam GRAY_DTYPE gray image type (e.g. uint16_t)
* @tparam GRAY_CVTYPE opencv gray image type
*/
template< typename RGB_DTYPE, typename GRAY_DTYPE, int GRAY_CVTYPE >
cv::Mat rgb_2_gray( const cv::Mat& rgb_img )
{
const RGB_DTYPE* rgb_img_ptr = (const RGB_DTYPE*)rgb_img.data;
int img_width = rgb_img.size().width;
int img_height = rgb_img.size().height;
int rgb_img_step = rgb_img.step1();
// Create output gray cv::Mat
cv::Mat gray_img( img_height, img_width, GRAY_CVTYPE );
GRAY_DTYPE* gray_img_ptr = (GRAY_DTYPE*)gray_img.data;
int gray_img_step = gray_img.step1();
#pragma omp parallel for
for ( int row_i = 0; row_i < img_height; row_i++ )
{
int rgb_row_i_offset = row_i * rgb_img_step;
int gray_row_i_offset = row_i * gray_img_step;
UNROLL_LOOP( 32 ) // multiplier of cache line size
for ( int col_j = 0; col_j < img_width; col_j++ )
{
GRAY_DTYPE avg_ij(0);
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 0) ];
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 1) ];
avg_ij += rgb_img_ptr[ rgb_row_i_offset + (col_j * 3 + 2) ];
avg_ij /= 3;
gray_img_ptr[ gray_row_i_offset + col_j ] = avg_ij;
}
}
// OpenCV use reference count. Thus return won't create deep copy
return gray_img;
}
template <typename T>
void print_tile( const cv::Mat& img, int tile_size, int start_idx_row, int start_idx_col )
{
const T* img_ptr = (T*)img.data;
int src_step = img.step1();
for ( int row = start_idx_row; row < tile_size + start_idx_row; ++row )
{
const T* img_ptr_row = img_ptr + row * src_step;
for ( int col = start_idx_col; col < tile_size + start_idx_col; ++col )
{
printf("%u ", img_ptr_row[ col ] );
}
printf("\n");
}
printf("\n");
}
template< typename T>
void print_img( const cv::Mat& img, int img_height = -1, int img_width = -1 )
{
const T* img_ptr = (T*)img.data;
if ( img_height == -1 && img_width == -1 )
{
img_height = img.size().height;
img_width = img.size().width;
}
else
{
img_height = std::min( img.size().height, img_height );
img_width = std::min( img.size().width, img_width );
}
printf("Image size (h=%d, w=%d), Print range (h=0-%d, w=0-%d)]\n", \
img.size().height, img.size().width, img_height, img_width );
int img_step = img.step1();
for ( int row = 0; row < img_height; ++row )
{
const T* img_ptr_row = img_ptr + row * img_step;
for ( int col = 0; col < img_width; ++col )
{
printf("%u ", img_ptr_row[ col ]);
}
printf("\n");
}
printf("\n");
}
} // namespace hdrplus

@ -1,994 +0,0 @@
#include <vector>
#include <string>
#include <limits>
#include <cstdio>
#include <utility> // std::make_pair
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <omp.h>
#include "hdrplus/align.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
// Function declration
static void build_per_grayimg_pyramid( \
std::vector<cv::Mat>& images_pyramid, \
const cv::Mat& src_image, \
const std::vector<int>& inv_scale_factors );
template< int pyramid_scale_factor_prev_curr, int tilesize_scale_factor_prev_curr, int tile_size >
static void build_upsampled_prev_aligement( \
const std::vector<std::vector<std::pair<int, int>>>& src_alignment, \
std::vector<std::vector<std::pair<int, int>>>& dst_alignment, \
int num_tiles_h, int num_tiles_w, \
const cv::Mat& ref_img, const cv::Mat& alt_img, \
bool consider_nbr = false );
template< typename data_type, typename return_type, int tile_size >
static unsigned long long l1_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx );
template< typename data_type, typename return_type, int tile_size >
static return_type l2_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx );
static void align_image_level( \
const cv::Mat& ref_img, \
const cv::Mat& alt_img, \
std::vector<std::vector<std::pair<int, int>>>& prev_aligement, \
std::vector<std::vector<std::pair<int, int>>>& curr_alignment, \
int scale_factor_prev_curr, \
int curr_tile_size, \
int prev_tile_size, \
int search_radiou, \
int distance_type );
// Function Implementations
// static function only visible within file
static void build_per_grayimg_pyramid( \
std::vector<cv::Mat>& images_pyramid, \
const cv::Mat& src_image, \
const std::vector<int>& inv_scale_factors )
{
// #ifndef NDEBUG
// printf("%s::%s build_per_grayimg_pyramid start with scale factor : ", __FILE__, __func__ );
// for ( int i = 0; i < inv_scale_factors.size(); ++i )
// {
// printf("%d ", inv_scale_factors.at( i ));
// }
// printf("\n");
// #endif
images_pyramid.resize( inv_scale_factors.size() );
for ( size_t i = 0; i < inv_scale_factors.size(); ++i )
{
cv::Mat blur_image;
cv::Mat downsample_image;
switch ( inv_scale_factors[ i ] )
{
case 1:
images_pyramid[ i ] = src_image.clone();
// cv::Mat use reference count, will not create deep copy
downsample_image = src_image;
break;
case 2:
// printf("(2) downsample with gaussian sigma %.2f", inv_scale_factors[ i ] * 0.5 );
// // Gaussian blur
cv::GaussianBlur( images_pyramid.at( i-1 ), blur_image, cv::Size(0, 0), inv_scale_factors[ i ] * 0.5 );
// // Downsample
downsample_image = downsample_nearest_neighbour<uint16_t, 2>( blur_image );
// downsample_image = downsample_nearest_neighbour<uint16_t, 2>( images_pyramid.at( i-1 ) );
// Add
images_pyramid.at( i ) = downsample_image.clone();
break;
case 4:
// printf("(4) downsample with gaussian sigma %.2f", inv_scale_factors[ i ] * 0.5 );
cv::GaussianBlur( images_pyramid.at( i-1 ), blur_image, cv::Size(0, 0), inv_scale_factors[ i ] * 0.5 );
downsample_image = downsample_nearest_neighbour<uint16_t, 4>( blur_image );
// downsample_image = downsample_nearest_neighbour<uint16_t, 4>( images_pyramid.at( i-1 ) );
images_pyramid.at( i ) = downsample_image.clone();
break;
default:
#ifdef __ANDROID__
break;
#else
throw std::runtime_error("inv scale factor " + std::to_string( inv_scale_factors[ i ]) + "invalid" );
#endif
}
}
}
static bool operator!=( const std::pair<int, int>& lhs, const std::pair<int, int>& rhs )
{
return lhs.first != rhs.first || lhs.second != rhs.second;
}
template< int pyramid_scale_factor_prev_curr, int tilesize_scale_factor_prev_curr, int tile_size >
static void build_upsampled_prev_aligement( \
const std::vector<std::vector<std::pair<int, int>>>& src_alignment, \
std::vector<std::vector<std::pair<int, int>>>& dst_alignment, \
int num_tiles_h, int num_tiles_w, \
const cv::Mat& ref_img, const cv::Mat& alt_img, \
bool consider_nbr )
{
int src_num_tiles_h = src_alignment.size();
int src_num_tiles_w = src_alignment[ 0 ].size();
constexpr int repeat_factor = pyramid_scale_factor_prev_curr / tilesize_scale_factor_prev_curr;
// printf("build_upsampled_prev_aligement with scale factor %d, repeat factor %d, tile size factor %d\n", \
// pyramid_scale_factor_prev_curr, repeat_factor, tilesize_scale_factor_prev_curr );
int dst_num_tiles_main_h = src_num_tiles_h * repeat_factor;
int dst_num_tiles_main_w = src_num_tiles_w * repeat_factor;
if ( dst_num_tiles_main_h > num_tiles_h || dst_num_tiles_main_w > num_tiles_w )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("current level number of tiles smaller than upsampled tiles\n");
#endif
}
// Allocate data for dst_alignment
// NOTE: number of tiles h, number of tiles w might be different from dst_num_tiles_main_h, dst_num_tiles_main_w
// For tiles between num_tile_h and dst_num_tiles_main_h, use (0,0)
dst_alignment.resize( num_tiles_h, std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
// Upsample alignment
#pragma omp parallel for collapse(2)
for ( int row_i = 0; row_i < src_num_tiles_h; row_i++ )
{
for ( int col_i = 0; col_i < src_num_tiles_w; col_i++ )
{
// Scale alignment
std::pair<int, int> align_i = src_alignment[ row_i ][ col_i ];
align_i.first *= pyramid_scale_factor_prev_curr;
align_i.second *= pyramid_scale_factor_prev_curr;
// repeat
UNROLL_LOOP( repeat_factor )
for ( int repeat_row_i = 0; repeat_row_i < repeat_factor; ++repeat_row_i )
{
int repeat_row_i_offset = row_i * repeat_factor + repeat_row_i;
UNROLL_LOOP( repeat_factor )
for ( int repeat_col_i = 0; repeat_col_i < repeat_factor; ++repeat_col_i )
{
int repeat_col_i_offset = col_i * repeat_factor + repeat_col_i;
dst_alignment[ repeat_row_i_offset ][ repeat_col_i_offset ] = align_i;
}
}
}
}
if ( consider_nbr )
{
// Copy consurtctor
std::vector<std::vector<std::pair<int, int>>> upsampled_alignment{ dst_alignment };
// Distance function
unsigned long long (*distance_func_ptr)(const cv::Mat&, const cv::Mat&, int, int, int, int) = \
&l1_distance<uint16_t, unsigned long long, tile_size>;
#pragma omp parallel for collapse(2)
for ( int tile_row_i = 0; tile_row_i < num_tiles_h; tile_row_i++ )
{
for ( int tile_col_i = 0; tile_col_i < num_tiles_w; tile_col_i++ )
{
const auto& curr_align_i = upsampled_alignment[ tile_row_i ][ tile_col_i ];
// Container for nbr alignment pair
std::vector<std::pair<int, int>> nbrs_align_i;
// Consider 4 neighbour's alignment
// Only compute distance if alignment is different
if ( tile_col_i > 0 )
{
const auto& nbr1_align_i = upsampled_alignment[ tile_row_i + 0 ][ tile_col_i - 1 ];
if ( curr_align_i != nbr1_align_i ) nbrs_align_i.emplace_back( nbr1_align_i );
}
if ( tile_col_i < num_tiles_w - 1 )
{
const auto& nbr2_align_i = upsampled_alignment[ tile_row_i + 0 ][ tile_col_i + 1 ];
if ( curr_align_i != nbr2_align_i ) nbrs_align_i.emplace_back( nbr2_align_i );
}
if ( tile_row_i > 0 )
{
const auto& nbr3_align_i = upsampled_alignment[ tile_row_i - 1 ][ tile_col_i + 0 ];
if ( curr_align_i != nbr3_align_i ) nbrs_align_i.emplace_back( nbr3_align_i );
}
if ( tile_row_i < num_tiles_h - 1 )
{
const auto& nbr4_align_i = upsampled_alignment[ tile_row_i + 1 ][ tile_col_i + 0 ];
if ( curr_align_i != nbr4_align_i ) nbrs_align_i.emplace_back( nbr4_align_i );
}
// If there is a nbr alignment that need to be considered. Compute distance
if ( ! nbrs_align_i.empty() )
{
int ref_tile_row_start_idx_i = tile_row_i * tile_size / 2;
int ref_tile_col_start_idx_i = tile_col_i * tile_size / 2;
// curr_align_i's distance
auto curr_align_i_distance = distance_func_ptr(
ref_img, alt_img, \
ref_tile_row_start_idx_i, \
ref_tile_col_start_idx_i, \
ref_tile_row_start_idx_i + curr_align_i.first, \
ref_tile_col_start_idx_i + curr_align_i.second );
for ( const auto& nbr_align_i : nbrs_align_i )
{
auto nbr_align_i_distance = distance_func_ptr(
ref_img, alt_img, \
ref_tile_row_start_idx_i, \
ref_tile_col_start_idx_i, \
ref_tile_row_start_idx_i + nbr_align_i.first, \
ref_tile_col_start_idx_i + nbr_align_i.second );
if ( nbr_align_i_distance < curr_align_i_distance )
{
#ifdef NDEBUG
printf("tile [%d, %d] update align, prev align (%d, %d) curr align (%d, %d), prev distance %d curr distance %d\n", \
tile_row_i, tile_col_i, \
curr_align_i.first, curr_align_i.second, \
nbr_align_i.first, nbr_align_i.second, \
int(curr_align_i_distance), int(nbr_align_i_distance) );
#endif
dst_alignment[ tile_row_i ][ tile_col_i ] = nbr_align_i;
curr_align_i_distance = nbr_align_i_distance;
}
}
}
}
}
}
}
// Set tilesize as template argument for better compiler optimization result.
template< typename data_type, typename return_type, int tile_size >
static unsigned long long l1_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx )
{
#define CUSTOME_ABS( x ) ( x ) > 0 ? ( x ) : - ( x )
const data_type* img1_ptr = (const data_type*)img1.data;
const data_type* img2_ptr = (const data_type*)img2.data;
int img1_step = img1.step1();
int img2_step = img2.step1();
int img1_width = img1.size().width;
int img1_height = img1.size().height;
int img2_width = img2.size().width;
int img2_height = img2.size().height;
// Range check for safety
if ( img1_tile_row_start_idx < 0 || img1_tile_row_start_idx > img1_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img1_tile_row_start_idx" + std::to_string( img1_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_height - tile_size ) + ")\n" );
#endif
}
if ( img1_tile_col_start_idx < 0 || img1_tile_col_start_idx > img1_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img1_tile_col_start_idx" + std::to_string( img1_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_width - tile_size ) + ")\n" );
#endif
}
if ( img2_tile_row_start_idx < 0 || img2_tile_row_start_idx > img2_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img2_tile_row_start_idx out of valid range\n");
#endif
}
if ( img2_tile_col_start_idx < 0 || img2_tile_col_start_idx > img2_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l1 distance img2_tile_col_start_idx out of valid range\n");
#endif
}
return_type sum(0);
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const data_type* img1_ptr_row_i = img1_ptr + (img1_tile_row_start_idx + row_i) * img1_step + img1_tile_col_start_idx;
const data_type* img2_ptr_row_i = img2_ptr + (img2_tile_row_start_idx + row_i) * img2_step + img2_tile_col_start_idx;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
data_type l1 = CUSTOME_ABS( img1_ptr_row_i[ col_i ] - img2_ptr_row_i[ col_i ] );
sum += l1;
}
}
#undef CUSTOME_ABS
return sum;
}
template< typename data_type, typename return_type, int tile_size >
static return_type l2_distance( const cv::Mat& img1, const cv::Mat& img2, \
int img1_tile_row_start_idx, int img1_tile_col_start_idx, \
int img2_tile_row_start_idx, int img2_tile_col_start_idx )
{
#define CUSTOME_ABS( x ) ( x ) > 0 ? ( x ) : - ( x )
const data_type* img1_ptr = (const data_type*)img1.data;
const data_type* img2_ptr = (const data_type*)img2.data;
int img1_step = img1.step1();
int img2_step = img2.step1();
int img1_width = img1.size().width;
int img1_height = img1.size().height;
int img2_width = img2.size().width;
int img2_height = img2.size().height;
// Range check for safety
if ( img1_tile_row_start_idx < 0 || img1_tile_row_start_idx > img1_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img1_tile_row_start_idx" + std::to_string( img1_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_height - tile_size ) + ")\n" );
#endif
}
if ( img1_tile_col_start_idx < 0 || img1_tile_col_start_idx > img1_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img1_tile_col_start_idx" + std::to_string( img1_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img1_width - tile_size ) + ")\n" );
#endif
}
if ( img2_tile_row_start_idx < 0 || img2_tile_row_start_idx > img2_height - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img2_tile_row_start_idx out of valid range\n");
#endif
}
if ( img2_tile_col_start_idx < 0 || img2_tile_col_start_idx > img2_width - tile_size )
{
#ifdef __ANDROID__
return 0;
#else
throw std::runtime_error("l2 distance img2_tile_col_start_idx out of valid range\n");
#endif
}
// printf("Search two tile with ref : \n");
// print_tile<data_type>( img1, tile_size, img1_tile_row_start_idx, img1_tile_col_start_idx );
// printf("Search two tile with alt :\n");
// print_tile<data_type>( img2, tile_size, img2_tile_row_start_idx, img2_tile_col_start_idx );
return_type sum(0);
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const data_type* img1_ptr_row_i = img1_ptr + (img1_tile_row_start_idx + row_i) * img1_step + img1_tile_col_start_idx;
const data_type* img2_ptr_row_i = img2_ptr + (img2_tile_row_start_idx + row_i) * img2_step + img2_tile_col_start_idx;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
data_type l1 = CUSTOME_ABS( img1_ptr_row_i[ col_i ] - img2_ptr_row_i[ col_i ] );
sum += ( l1 * l1 );
}
}
#undef CUSTOME_ABS
return sum;
}
template<typename T, int tile_size>
static cv::Mat extract_img_tile( const cv::Mat& img, int img_tile_row_start_idx, int img_tile_col_start_idx )
{
const T* img_ptr = (const T*)img.data;
int img_width = img.size().width;
int img_height = img.size().height;
int img_step = img.step1();
if ( img_tile_row_start_idx < 0 || img_tile_row_start_idx > img_height - tile_size )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error("extract_img_tile img_tile_row_start_idx " + std::to_string( img_tile_row_start_idx ) + \
" out of valid range (0, " + std::to_string( img_height - tile_size ) + ")\n" );
#endif
}
if ( img_tile_col_start_idx < 0 || img_tile_col_start_idx > img_width - tile_size )
{
#ifdef __ANDROID__
return cv::Mat();
#else
throw std::runtime_error("extract_img_tile img_tile_col_start_idx " + std::to_string( img_tile_col_start_idx ) + \
" out of valid range (0, " + std::to_string( img_width - tile_size ) + ")\n" );
#endif
}
cv::Mat img_tile( tile_size, tile_size, img.type() );
T* img_tile_ptr = (T*)img_tile.data;
int img_tile_step = img_tile.step1();
UNROLL_LOOP( tile_size )
for ( int row_i = 0; row_i < tile_size; ++row_i )
{
const T* img_ptr_row_i = img_ptr + img_step * ( img_tile_row_start_idx + row_i );
T* img_tile_ptr_row_i = img_tile_ptr + img_tile_step * row_i;
UNROLL_LOOP( tile_size )
for ( int col_i = 0; col_i < tile_size; ++col_i )
{
img_tile_ptr_row_i[ col_i ] = img_ptr_row_i[ img_tile_col_start_idx + col_i ];
}
}
return img_tile;
}
void align_image_level( \
const cv::Mat& ref_img, \
const cv::Mat& alt_img, \
std::vector<std::vector<std::pair<int, int>>>& prev_aligement, \
std::vector<std::vector<std::pair<int, int>>>& curr_alignment, \
int scale_factor_prev_curr, \
int curr_tile_size, \
int prev_tile_size, \
int search_radiou, \
int distance_type )
{
// Every align image level share the same distance function.
// Use function ptr to reduce if else overhead inside for loop
unsigned long long (*distance_func_ptr)(const cv::Mat&, const cv::Mat&, int, int, int, int) = nullptr;
if ( distance_type == 1 ) // l1 distance
{
if ( curr_tile_size == 8 )
{
distance_func_ptr = &l1_distance<uint16_t, unsigned long long, 8>;
}
else if ( curr_tile_size == 16 )
{
distance_func_ptr = &l1_distance<uint16_t, unsigned long long, 16>;
}
}
else if ( distance_type == 2 ) // l2 distance
{
if ( curr_tile_size == 8 )
{
distance_func_ptr = &l2_distance<uint16_t, unsigned long long, 8>;
}
else if ( curr_tile_size == 16 )
{
distance_func_ptr = &l2_distance<uint16_t, unsigned long long, 16>;
}
}
// Every level share the same upsample function
void (*upsample_alignment_func_ptr)(const std::vector<std::vector<std::pair<int, int>>>&, \
std::vector<std::vector<std::pair<int, int>>>&, \
int, int, const cv::Mat&, const cv::Mat&, bool) = nullptr;
if ( scale_factor_prev_curr == 2 )
{
if ( curr_tile_size / prev_tile_size == 2 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 2, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 2, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( curr_tile_size / prev_tile_size == 1 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 1, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<2, 1, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( scale_factor_prev_curr == 4 )
{
if ( curr_tile_size / prev_tile_size == 2 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 2, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 2, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else if ( curr_tile_size / prev_tile_size == 1 )
{
if ( curr_tile_size == 8 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 1, 8>;
}
else if ( curr_tile_size == 16 )
{
upsample_alignment_func_ptr = &build_upsampled_prev_aligement<4, 1, 16>;
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
else
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Something wrong with upsampling function setting\n");
#endif
}
}
// Function to extract reference image tile for memory cache
cv::Mat (*extract_ref_img_tile)(const cv::Mat&, int, int) = nullptr;
if ( curr_tile_size == 8 )
{
extract_ref_img_tile = &extract_img_tile<uint16_t, 8>;
}
else if ( curr_tile_size == 16 )
{
extract_ref_img_tile = &extract_img_tile<uint16_t, 16>;
}
// Function to extract search image tile for memory cache
cv::Mat (*extract_alt_img_search)(const cv::Mat&, int, int) = nullptr;
if ( curr_tile_size == 8 )
{
if ( search_radiou == 1 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 8+1*2>;
}
else if ( search_radiou == 4 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 8+4*2>;
}
}
else if ( curr_tile_size == 16 )
{
if ( search_radiou == 1 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 16+1*2>;
}
else if ( search_radiou == 4 )
{
extract_alt_img_search = &extract_img_tile<uint16_t, 16+4*2>;
}
}
int num_tiles_h = ref_img.size().height / (curr_tile_size / 2) - 1;
int num_tiles_w = ref_img.size().width / (curr_tile_size / 2 ) - 1;
/* Upsample pervious layer alignment */
std::vector<std::vector<std::pair<int, int>>> upsampled_prev_aligement;
// Coarsest level
// prev_alignment is invalid / empty, construct alignment as (0,0)
if ( prev_tile_size == -1 )
{
upsampled_prev_aligement.resize( num_tiles_h, \
std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
}
// Upsample previous level alignment
else
{
upsample_alignment_func_ptr( prev_aligement, upsampled_prev_aligement, \
num_tiles_h, num_tiles_w, ref_img, alt_img, false );
// printf("\n!!!!!Upsampled previous alignment\n");
// for ( int tile_row = 0; tile_row < int(upsampled_prev_aligement.size()); tile_row++ )
// {
// for ( int tile_col = 0; tile_col < int(upsampled_prev_aligement.at(0).size()); tile_col++ )
// {
// const auto tile_start = upsampled_prev_aligement.at( tile_row ).at( tile_col );
// printf("up tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
}
#ifndef NDEBUG
printf("%s::%s start: \n", __FILE__, __func__ );
printf(" scale_factor_prev_curr %d, tile_size %d, prev_tile_size %d, search_radiou %d, distance L%d, \n", \
scale_factor_prev_curr, curr_tile_size, prev_tile_size, search_radiou, distance_type );
printf(" ref img size h=%d w=%d, alt img size h=%d w=%d, \n", \
ref_img.size().height, ref_img.size().width, alt_img.size().height, alt_img.size().width );
printf(" num tile h (upsampled) %d, num tile w (upsampled) %d\n", num_tiles_h, num_tiles_w);
#endif
// allocate memory for current alignmenr
curr_alignment.resize( num_tiles_h, std::vector<std::pair<int, int>>( num_tiles_w, std::pair<int, int>(0, 0) ) );
/* Pad alternative image */
cv::Mat alt_img_pad;
cv::copyMakeBorder( alt_img, \
alt_img_pad, \
search_radiou, search_radiou, search_radiou, search_radiou, \
cv::BORDER_CONSTANT, cv::Scalar( UINT_LEAST16_MAX ) );
// printf("Reference image h=%d, w=%d: \n", ref_img.size().height, ref_img.size().width );
// print_img<uint16_t>( ref_img );
// printf("Alter image pad h=%d, w=%d: \n", alt_img_pad.size().height, alt_img_pad.size().width );
// print_img<uint16_t>( alt_img_pad );
// printf("!! enlarged tile size %d\n", curr_tile_size + 2 * search_radiou );
int alt_tile_row_idx_max = alt_img_pad.size().height - ( curr_tile_size + 2 * search_radiou );
int alt_tile_col_idx_max = alt_img_pad.size().width - ( curr_tile_size + 2 * search_radiou );
// Dlete below distance vector, this is for debug only
std::vector<std::vector<uint16_t>> distances( num_tiles_h, std::vector<uint16_t>( num_tiles_w, 0 ));
/* Iterate through all reference tile & compute distance */
#pragma omp parallel for collapse(2)
for ( int ref_tile_row_i = 0; ref_tile_row_i < num_tiles_h; ref_tile_row_i++ )
{
for ( int ref_tile_col_i = 0; ref_tile_col_i < num_tiles_w; ref_tile_col_i++ )
{
// Upper left index of reference tile
int ref_tile_row_start_idx_i = ref_tile_row_i * curr_tile_size / 2;
int ref_tile_col_start_idx_i = ref_tile_col_i * curr_tile_size / 2;
// printf("\nRef img tile [%d, %d] -> start idx [%d, %d] (row, col)\n", \
// ref_tile_row_i, ref_tile_col_i, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
// printf("\nRef img tile [%d, %d]\n", ref_tile_row_i, ref_tile_col_i );
// print_tile<uint16_t>( ref_img, curr_tile_size, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
// Upsampled alignment at this tile
// Alignment are relative displacement in pixel value
int prev_alignment_row_i = upsampled_prev_aligement.at( ref_tile_row_i ).at( ref_tile_col_i ).first;
int prev_alignment_col_i = upsampled_prev_aligement.at( ref_tile_row_i ).at( ref_tile_col_i ).second;
// Alternative image tile start idx
int alt_tile_row_start_idx_i = ref_tile_row_start_idx_i + prev_alignment_row_i;
int alt_tile_col_start_idx_i = ref_tile_col_start_idx_i + prev_alignment_col_i;
// Ensure alternative image tile within range
if ( alt_tile_row_start_idx_i < 0 )
alt_tile_row_start_idx_i = 0;
if ( alt_tile_col_start_idx_i < 0 )
alt_tile_col_start_idx_i = 0;
if ( alt_tile_row_start_idx_i > alt_tile_row_idx_max )
{
// int before = alt_tile_row_start_idx_i;
alt_tile_row_start_idx_i = alt_tile_row_idx_max;
// printf("@@ change start x from %d to %d\n", before, alt_tile_row_idx_max);
}
if ( alt_tile_col_start_idx_i > alt_tile_col_idx_max )
{
// int before = alt_tile_col_start_idx_i;
alt_tile_col_start_idx_i = alt_tile_col_idx_max;
// printf("@@ change start y from %d to %d\n", before, alt_tile_col_idx_max );
}
// Explicitly caching reference image tile
cv::Mat ref_img_tile_i = extract_ref_img_tile( ref_img, ref_tile_row_start_idx_i, ref_tile_col_start_idx_i );
cv::Mat alt_img_search_i = extract_alt_img_search( alt_img_pad, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// Because alternative image is padded with search radious.
// Using same coordinate with reference image will automatically considered search radious * 2
// printf("Alt image tile [%d, %d]-> start idx [%d, %d]\n", \
// ref_tile_row_i, ref_tile_col_i, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// printf("\nAlt image tile [%d, %d]\n", ref_tile_row_i, ref_tile_col_i );
// print_tile<uint16_t>( alt_img_pad, curr_tile_size + 2 * search_radiou, alt_tile_row_start_idx_i, alt_tile_col_start_idx_i );
// Search based on L1/L2 distance
unsigned long long min_distance_i = ULONG_LONG_MAX;
int min_distance_row_i = -1;
int min_distance_col_i = -1;
for ( int search_row_j = 0; search_row_j < ( search_radiou * 2 + 1 ); search_row_j++ )
{
for ( int search_col_j = 0; search_col_j < ( search_radiou * 2 + 1 ); search_col_j++ )
{
// printf("\n--->tile at [%d, %d] search (%d, %d)\n", \
// ref_tile_row_i, ref_tile_col_i, search_row_j - search_radiou, search_col_j - search_radiou );
// unsigned long long distance_j = distance_func_ptr( ref_img, alt_img_pad, \
// ref_tile_row_start_idx_i, ref_tile_col_start_idx_i, \
// alt_tile_row_start_idx_i + search_row_j, alt_tile_col_start_idx_i + search_col_j );
// unsigned long long distance_j = distance_func_ptr( ref_img_tile_i, alt_img_pad, \
// 0, 0, \
// alt_tile_row_start_idx_i + search_row_j, alt_tile_col_start_idx_i + search_col_j );
unsigned long long distance_j = distance_func_ptr( ref_img_tile_i, alt_img_search_i, \
0, 0, \
search_row_j, search_col_j );
// printf("<---tile at [%d, %d] search (%d, %d), new dis %llu, old dis %llu\n", \
// ref_tile_row_i, ref_tile_col_i, search_row_j - search_radiou, search_col_j - search_radiou, distance_j, min_distance_i );
// If this is smaller distance
if ( distance_j < min_distance_i )
{
min_distance_i = distance_j;
min_distance_col_i = search_col_j;
min_distance_row_i = search_row_j;
}
// If same value, choose the one closer to the original tile location
if ( distance_j == min_distance_i && min_distance_row_i != -1 && min_distance_col_i != -1 )
{
int prev_distance_row_2_ref = min_distance_row_i - search_radiou;
int prev_distance_col_2_ref = min_distance_col_i - search_radiou;
int curr_distance_row_2_ref = search_row_j - search_radiou;
int curr_distance_col_2_ref = search_col_j - search_radiou;
int prev_distance_2_ref_sqr = prev_distance_row_2_ref * prev_distance_row_2_ref + prev_distance_col_2_ref * prev_distance_col_2_ref;
int curr_distance_2_ref_sqr = curr_distance_row_2_ref * curr_distance_row_2_ref + curr_distance_col_2_ref * curr_distance_col_2_ref;
// previous min distance idx is farther away from ref tile start location
if ( prev_distance_2_ref_sqr > curr_distance_2_ref_sqr )
{
// printf("@@@ Same distance %d, choose closer one (%d, %d) instead of (%d, %d)\n", \
// distance_j, search_row_j, search_col_j, min_distance_row_i, min_distance_col_i);
min_distance_col_i = search_col_j;
min_distance_row_i = search_row_j;
}
}
}
}
// printf("tile at (%d, %d) alignment (%d, %d)\n", \
// ref_tile_row_i, ref_tile_col_i, min_distance_row_i, min_distance_col_i );
int alignment_row_i = prev_alignment_row_i + min_distance_row_i - search_radiou;
int alignment_col_i = prev_alignment_col_i + min_distance_col_i - search_radiou;
std::pair<int, int> alignment_i( alignment_row_i, alignment_col_i );
// Add min_distance_i's corresbonding idx as min
curr_alignment.at( ref_tile_row_i ).at( ref_tile_col_i ) = alignment_i;
distances.at( ref_tile_row_i ).at( ref_tile_col_i ) = min_distance_i;
}
}
// printf("\n!!!!!Min distance for each tile \n");
// for ( int tile_row = 0; tile_row < num_tiles_h; tile_row++ )
// {
// for ( int tile_col = 0; tile_col < num_tiles_w; ++tile_col )
// {
// printf("tile (%d, %d) distance %u\n", \
// tile_row, tile_col, distances.at( tile_row).at(tile_col ) );
// }
// }
// printf("\n!!!!!Alignment at current level\n");
// for ( int tile_row = 0; tile_row < num_tiles_h; tile_row++ )
// {
// for ( int tile_col = 0; tile_col < num_tiles_w; tile_col++ )
// {
// const auto tile_start = curr_alignment.at( tile_row ).at( tile_col );
// printf("tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
}
void align::process( const hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& images_alignment )
{
#ifndef NDEBUG
printf("%s::%s align::process start\n", __FILE__, __func__ ); fflush(stdout);
#endif
images_alignment.clear();
images_alignment.resize( burst_images.num_images );
// image pyramid per image, per pyramid level
std::vector<std::vector<cv::Mat>> per_grayimg_pyramid;
// printf("!!!!! ref bayer padded\n");
// print_img<uint16_t>( burst_images.bayer_images_pad.at( burst_images.reference_image_idx) );
// exit(1);
// printf("!!!!! ref gray padded\n");
// print_img<uint16_t>( burst_images.grayscale_images_pad.at( burst_images.reference_image_idx) );
// exit(1);
per_grayimg_pyramid.resize( burst_images.num_images );
#pragma omp parallel for
for ( int img_idx = 0; img_idx < burst_images.num_images; ++img_idx )
{
// per_grayimg_pyramid[ img_idx ][ 0 ] is the original image
// per_grayimg_pyramid[ img_idx ][ 3 ] is the coarsest image
build_per_grayimg_pyramid( per_grayimg_pyramid.at( img_idx ), \
burst_images.grayscale_images_pad.at( img_idx ), \
this->inv_scale_factors );
}
// #ifndef NDEBUG
// printf("%s::%s build image pyramid of size : ", __FILE__, __func__ );
// for ( int level_i = 0; level_i < num_levels; ++level_i )
// {
// printf("(%d, %d) ", per_grayimg_pyramid[ 0 ][ level_i ].size().height,
// per_grayimg_pyramid[ 0 ][ level_i ].size().width );
// }
// printf("\n"); fflush(stdout);
// #endif
// print image pyramid
// for ( int level_i; level_i < num_levels; ++level_i )
// {
// printf("\n\n!!!!! ref gray pyramid level %d img : \n" , level_i );
// print_img<uint16_t>( per_grayimg_pyramid[ burst_images.reference_image_idx ][ level_i ] );
// }
// exit(-1);
// Align every image
const std::vector<cv::Mat>& ref_grayimg_pyramid = per_grayimg_pyramid[ burst_images.reference_image_idx ];
std::vector<std::vector<std::pair<int, int>>> curr_alignment;
std::vector<std::vector<std::pair<int, int>>> prev_alignment;
for ( int img_idx = 0; img_idx < burst_images.num_images; ++img_idx )
{
// Do not align with reference image
if ( img_idx == burst_images.reference_image_idx )
continue;
const std::vector<cv::Mat>& alt_grayimg_pyramid = per_grayimg_pyramid[ img_idx ];
// Align every level from coarse to grain
// level 0 : finest level, the original image
// level 3 : coarsest level
curr_alignment.clear();
prev_alignment.clear();
for ( int level_i = num_levels - 1; level_i >= 0; level_i-- ) // 3,2,1,0
{
// make curr alignment as previous alignment
prev_alignment.swap( curr_alignment );
curr_alignment.clear();
// printf("\n\n########################align level %d\n", level_i );
align_image_level(
ref_grayimg_pyramid[ level_i ], // reference image at current level
alt_grayimg_pyramid[ level_i ], // alternative image at current level
prev_alignment, // previous layer alignment
curr_alignment, // current layer alignment
( level_i == ( num_levels - 1 ) ? -1 : inv_scale_factors[ level_i + 1 ] ), // scale factor between previous layer and current layer. -1 if current layer is the coarsest layer, [-1, 4, 4, 2]
grayimg_tile_sizes[ level_i ], // current level tile size
( level_i == ( num_levels - 1 ) ? -1 : grayimg_tile_sizes[ level_i + 1 ] ), // previous level tile size
grayimg_search_radious[ level_i ], // search radious
distances[ level_i ] ); // L1/L2 distance
// printf("@@@Alignment at level %d is h=%d, w=%d", level_i, curr_alignment.size(), curr_alignment.at(0).size() );
} // for pyramid level
// Alignment at grayscale image
images_alignment.at( img_idx ).swap( curr_alignment );
// printf("\n!!!!!Alternative Image Alignment\n");
// for ( int tile_row = 0; tile_row < images_alignment.at( img_idx ).size(); tile_row++ )
// {
// for ( int tile_col = 0; tile_col < images_alignment.at( img_idx ).at(0).size(); tile_col++ )
// {
// const auto tile_start = images_alignment.at( img_idx ).at( tile_row ).at( tile_col );
// printf("tile (%d, %d) -> start idx (%d, %d)\n", \
// tile_row, tile_col, tile_start.first, tile_start.second);
// }
// }
} // for alternative image
per_grayimg_pyramid.clear();
}
} // namespace hdrplus

@ -1,234 +0,0 @@
#include <string>
#include <cstdio>
#include <iostream>
#include <utility> // std::pair, std::makr_pair
#include <memory> // std::shared_ptr
#include <stdexcept> // std::runtime_error
#include <opencv2/opencv.hpp> // all opencv header
#include <libraw/libraw.h>
#include <exiv2/exiv2.hpp> // exiv2
#include "hdrplus/bayer_image.h"
#include "hdrplus/utility.h" // box_filter_kxk
namespace hdrplus
{
bayer_image::bayer_image( const std::string& bayer_image_path )
{
libraw_processor = std::make_shared<LibRaw>();
// Open RAW image file
int return_code;
if ( ( return_code = libraw_processor->open_file( bayer_image_path.c_str() ) ) != LIBRAW_SUCCESS )
{
libraw_processor->recycle();
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error opening file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Unpack the raw image
if ( ( return_code = libraw_processor->unpack() ) != LIBRAW_SUCCESS )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error unpack file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Get image basic info
width = int( libraw_processor->imgdata.rawdata.sizes.raw_width );
height = int( libraw_processor->imgdata.rawdata.sizes.raw_height );
// Read exif tags
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(bayer_image_path);
assert(image.get() != 0);
image->readMetadata();
Exiv2::ExifData &exifData = image->exifData();
if (exifData.empty()) {
std::string error(bayer_image_path);
error += ": No Exif data found in the file";
std::cout << error << std::endl;
}
white_level = exifData["Exif.Image.WhiteLevel"].toLong();
black_level_per_channel.resize( 4 );
black_level_per_channel.at(0) = exifData["Exif.Image.BlackLevel"].toLong(0);
black_level_per_channel.at(1) = exifData["Exif.Image.BlackLevel"].toLong(1);
black_level_per_channel.at(2) = exifData["Exif.Image.BlackLevel"].toLong(2);
black_level_per_channel.at(3) = exifData["Exif.Image.BlackLevel"].toLong(3);
iso = exifData["Exif.Image.ISOSpeedRatings"].toLong();
// Create CV mat
// https://answers.opencv.org/question/105972/de-bayering-a-cr2-image/
// https://www.libraw.org/node/2141
raw_image = cv::Mat( height, width, CV_16U, libraw_processor->imgdata.rawdata.raw_image ).clone(); // changed the order of width and height
// 2x2 box filter
grayscale_image = box_filter_kxk<uint16_t, 2>( raw_image );
#ifndef NDEBUG
printf("%s::%s read bayer image %s with\n width %zu\n height %zu\n iso %.3f\n white level %d\n black level %d %d %d %d\n", \
__FILE__, __func__, bayer_image_path.c_str(), width, height, iso, white_level, \
black_level_per_channel[0], black_level_per_channel[1], black_level_per_channel[2], black_level_per_channel[3] );
fflush( stdout );
#endif
}
bayer_image::bayer_image( const std::vector<uint8_t>& bayer_image_content )
{
libraw_processor = std::make_shared<LibRaw>();
// Open RAW image file
int return_code;
if ( ( return_code = libraw_processor->open_buffer( (void *)(&bayer_image_content[0]), bayer_image_content.size() ) ) != LIBRAW_SUCCESS )
{
libraw_processor->recycle();
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error opening file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Unpack the raw image
if ( ( return_code = libraw_processor->unpack() ) != LIBRAW_SUCCESS )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error unpack file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Get image basic info
width = int( libraw_processor->imgdata.rawdata.sizes.raw_width );
height = int( libraw_processor->imgdata.rawdata.sizes.raw_height );
// Read exif tags
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(&bayer_image_content[0], bayer_image_content.size());
assert(image.get() != 0);
image->readMetadata();
Exiv2::ExifData &exifData = image->exifData();
if (exifData.empty()) {
std::string error = "No Exif data found in the file";
std::cout << error << std::endl;
}
white_level = exifData["Exif.Image.WhiteLevel"].toLong();
black_level_per_channel.resize( 4 );
black_level_per_channel.at(0) = exifData["Exif.Image.BlackLevel"].toLong(0);
black_level_per_channel.at(1) = exifData["Exif.Image.BlackLevel"].toLong(1);
black_level_per_channel.at(2) = exifData["Exif.Image.BlackLevel"].toLong(2);
black_level_per_channel.at(3) = exifData["Exif.Image.BlackLevel"].toLong(3);
iso = exifData["Exif.Image.ISOSpeedRatings"].toLong();
// Create CV mat
// https://answers.opencv.org/question/105972/de-bayering-a-cr2-image/
// https://www.libraw.org/node/2141
raw_image = cv::Mat( height, width, CV_16U, libraw_processor->imgdata.rawdata.raw_image ).clone(); // changed the order of width and height
// 2x2 box filter
grayscale_image = box_filter_kxk<uint16_t, 2>( raw_image );
#ifndef NDEBUG
printf("%s::%s read bayer image with\n width %zu\n height %zu\n iso %.3f\n white level %d\n black level %d %d %d %d\n", \
__FILE__, __func__, width, height, iso, white_level, \
black_level_per_channel[0], black_level_per_channel[1], black_level_per_channel[2], black_level_per_channel[3] );
fflush( stdout );
#endif
}
bayer_image::bayer_image( std::shared_ptr<MemFile> bayer_image_file )
{
libraw_processor = std::make_shared<LibRaw>();
// Open RAW image file
int return_code;
{
std::vector<uint8_t>& fileData = bayer_image_file->content;
if ( ( return_code = libraw_processor->open_buffer( (void *)(&fileData[0]), fileData.size() ) ) != LIBRAW_SUCCESS )
{
libraw_processor->recycle();
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error opening file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
}
// Unpack the raw image
if ( ( return_code = libraw_processor->unpack() ) != LIBRAW_SUCCESS )
{
#ifdef __ANDROID__
return;
#else
throw std::runtime_error("Error unpack file " + bayer_image_path + " " + libraw_strerror( return_code ));
#endif
}
// Get image basic info
width = int( libraw_processor->imgdata.rawdata.sizes.raw_width );
height = int( libraw_processor->imgdata.rawdata.sizes.raw_height );
// Read exif tags
Exiv2::Image::AutoPtr image = Exiv2::ImageFactory::open(&bayer_image_file->content[0], bayer_image_file->content.size());
assert(image.get() != 0);
image->readMetadata();
Exiv2::ExifData &exifData = image->exifData();
if (exifData.empty()) {
std::string error = "No Exif data found in the file";
std::cout << error << std::endl;
}
white_level = exifData["Exif.Image.WhiteLevel"].toLong();
black_level_per_channel.resize( 4 );
black_level_per_channel.at(0) = exifData["Exif.Image.BlackLevel"].toLong(0);
black_level_per_channel.at(1) = exifData["Exif.Image.BlackLevel"].toLong(1);
black_level_per_channel.at(2) = exifData["Exif.Image.BlackLevel"].toLong(2);
black_level_per_channel.at(3) = exifData["Exif.Image.BlackLevel"].toLong(3);
iso = exifData["Exif.Image.ISOSpeedRatings"].toLong();
// Create CV mat
// https://answers.opencv.org/question/105972/de-bayering-a-cr2-image/
// https://www.libraw.org/node/2141
raw_image = cv::Mat( height, width, CV_16U, libraw_processor->imgdata.rawdata.raw_image ).clone(); // changed the order of width and height
// 2x2 box filter
grayscale_image = box_filter_kxk<uint16_t, 2>( raw_image );
#ifndef NDEBUG
printf("%s::%s read bayer image with\n width %zu\n height %zu\n iso %.3f\n white level %d\n black level %d %d %d %d\n", \
__FILE__, __func__, width, height, iso, white_level, \
black_level_per_channel[0], black_level_per_channel[1], black_level_per_channel[2], black_level_per_channel[3] );
fflush( stdout );
#endif
}
std::pair<double, double> bayer_image::get_noise_params() const
{
// Set ISO to 100 if not positive
double iso_ = iso <= 0 ? 100 : iso;
// Calculate shot noise and read noise parameters w.r.t ISO 100
double lambda_shot_p = iso_ / 100.0f * baseline_lambda_shot;
double lambda_read_p = (iso_ / 100.0f) * (iso_ / 100.0f) * baseline_lambda_read;
double black_level = (black_level_per_channel[0] + \
black_level_per_channel[1] + \
black_level_per_channel[2] + \
black_level_per_channel[3]) / 4.0;
// Rescale shot and read noise to normal range
double lambda_shot = lambda_shot_p * (white_level - black_level);
double lambda_read = lambda_read_p * (white_level - black_level) * (white_level - black_level);
// return pair
return std::make_pair(lambda_shot, lambda_read);
}
}

@ -1,321 +0,0 @@
#include <cstdio>
#include <string>
#include <omp.h>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
burst::burst( const std::string& burst_path, const std::string& reference_image_path )
{
std::vector<cv::String> bayer_image_paths;
// Search through the input path directory to get all input image path
if ( burst_path.at( burst_path.size() - 1) == '/')
cv::glob( burst_path + "*.dng", bayer_image_paths, false );
else
cv::glob( burst_path + "/*.dng", bayer_image_paths, false );
#ifndef NDEBUG
for ( const auto& bayer_img_path_i : bayer_image_paths )
{
printf("img i path %s\n", bayer_img_path_i.c_str()); fflush(stdout);
}
printf("ref img path %s\n", reference_image_path.c_str()); fflush(stdout);
#endif
// Number of images
num_images = bayer_image_paths.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
for ( size_t i = 0; i < bayer_image_paths.size(); ++i )
{
if ( bayer_image_paths[ i ] == reference_image_path )
{
reference_image_idx = i;
}
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error unable to locate reference image " + reference_image_path );
}
#ifndef NDEBUG
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
printf("%s::%s Find image %s\n", \
__FILE__, __func__, bayer_image_path_i.c_str());
}
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
bayer_images.emplace_back( bayer_image_path_i );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
burst::burst( const std::vector<std::string>& bayer_image_paths, int reference_image_index )
{
// Number of images
num_images = bayer_image_paths.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
if ( reference_image_index >= 0 && reference_image_index < bayer_image_paths.size() )
{
reference_image_idx = reference_image_index;
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error reference image index is out of range " );
}
#ifndef NDEBUG
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
printf("%s::%s Find image %s\n", \
__FILE__, __func__, bayer_image_path_i.c_str());
}
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_path_i : bayer_image_paths )
{
bayer_images.emplace_back( bayer_image_path_i );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
burst::burst( const std::vector<std::vector<uint8_t> >& bayer_image_contents, int reference_image_index )
{
// Number of images
num_images = bayer_image_contents.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
if ( reference_image_index >= 0 && reference_image_index < bayer_image_contents.size() )
{
reference_image_idx = reference_image_index;
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error reference image index is out of range " );
}
#ifndef NDEBUG
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_content : bayer_image_contents )
{
bayer_images.emplace_back( bayer_image_content );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
burst::burst( const std::vector<std::shared_ptr<MemFile> >& bayer_image_files, int reference_image_index )
{
// Number of images
num_images = bayer_image_files.size();
// Find reference image path in input directory
// reference image path need to be absolute path
reference_image_idx = -1;
if ( reference_image_index >= 0 && reference_image_index < bayer_image_files.size() )
{
reference_image_idx = reference_image_index;
}
if ( reference_image_idx == -1 )
{
return;
// throw std::runtime_error("Error reference image index is out of range " );
}
#ifndef NDEBUG
printf("%s::%s reference image idx %d\n", \
__FILE__, __func__, reference_image_idx );
#endif
// Get source bayer image
// Downsample original bayer image by 2x2 box filter
for ( const auto& bayer_image_file : bayer_image_files )
{
bayer_images.emplace_back( bayer_image_file );
}
// Pad information
int tile_size_bayer = 32;
int padding_top = tile_size_bayer / 2;
int padding_bottom = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].height % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].height % tile_size_bayer );
int padding_left = tile_size_bayer / 2;
int padding_right = tile_size_bayer / 2 + \
( (bayer_images[ 0 ].width % tile_size_bayer) == 0 ? \
0 : tile_size_bayer - bayer_images[ 0 ].width % tile_size_bayer );
padding_info_bayer = std::vector<int>{ padding_top, padding_bottom, padding_left, padding_right };
// Pad bayer image
for ( const auto& bayer_image_i : bayer_images )
{
cv::Mat bayer_image_pad_i;
cv::copyMakeBorder( bayer_image_i.raw_image, \
bayer_image_pad_i, \
padding_top, padding_bottom, padding_left, padding_right, \
cv::BORDER_REFLECT );
// cv::Mat use internal reference count
bayer_images_pad.emplace_back( bayer_image_pad_i );
grayscale_images_pad.emplace_back( box_filter_kxk<uint16_t, 2>( bayer_image_pad_i ) );
}
#ifndef NDEBUG
printf("%s::%s Pad bayer image from (%d, %d) -> (%d, %d)\n", \
__FILE__, __func__, \
bayer_images[ 0 ].height, \
bayer_images[ 0 ].width, \
bayer_images_pad[ 0 ].size().height, \
bayer_images_pad[ 0 ].size().width );
printf("%s::%s pad top %d, buttom %d, left %d, right %d\n", \
__FILE__, __func__, \
padding_top, padding_bottom, padding_left, padding_right );
#endif
}
} // namespace hdrplus

@ -1,786 +0,0 @@
#include <iostream>
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/finish.h"
#include "hdrplus/utility.h"
#include <cmath>
#ifdef __ANDROID__
#define DBG_OUTPUT_ROOT "/sdcard/com.xypower.mpapp/tmp/"
#else
#define DBG_OUTPUT_ROOT ""
#endif
// #include <type_traits>
namespace hdrplus
{
cv::Mat convert16bit2_8bit_(cv::Mat ans){
if(ans.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(255.0/USHRT_MAX);
(*it)[1] *=(255.0/USHRT_MAX);
(*it)[2] *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC3);
}else if(ans.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(255.0/USHRT_MAX);
}
ans.convertTo(ans, CV_8UC1);
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
cv::Mat convert8bit2_16bit_(cv::Mat ans){
if(ans.type()==CV_8UC3){
ans.convertTo(ans, CV_16UC3);
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(65535.0/255.0);
(*it)[1] *=(65535.0/255.0);
(*it)[2] *=(65535.0/255.0);
}
}else if(ans.type()==CV_8UC1){
ans.convertTo(ans, CV_16UC1);
u_int16_t* ptr = (u_int16_t*)ans.data;
int end = ans.rows*ans.cols;
for(int i=0;i<end;i++){
*(ptr+i) *=(65535.0/255.0);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return ans;
}
cv::Mat convert8bit2_12bit_(cv::Mat ans){
// cv::Mat ans(I);
cv::MatIterator_<cv::Vec3w> it, end;
for( it = ans.begin<cv::Vec3w>(), end = ans.end<cv::Vec3w>(); it != end; ++it)
{
// std::cout<<sizeof (*it)[0] <<std::endl;
(*it)[0] *=(2048.0/255.0);
(*it)[1] *=(2048.0/255.0);
(*it)[2] *=(2048.0/255.0);
}
ans.convertTo(ans, CV_16UC3);
return ans;
}
uint16_t uGammaCompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent){
// Normalize pixel val
x/=USHRT_MAX;
// check the val against the threshold
if(x<=threshold){
x =gainMin*x;
}else{
x = gainMax* pow(x,exponent)-gainMax+1;
}
// clip
if(x<0){
x=0;
}else{
if(x>1){
x = 1;
}
}
x*=USHRT_MAX;
return (uint16_t)x;
}
uint16_t uGammaDecompress_1pix(float x, float threshold,float gainMin,float gainMax,float exponent){
// Normalize pixel val
x/=65535.0;
// check the val against the threshold
if(x<=threshold){
x = x/gainMin;
}else{
x = pow((x+gainMax-1)/gainMax,exponent);
}
// clip
if(x<0){
x=0;
}else{
if(x>1){
x = 1;
}
}
x*=65535;
return (uint16_t)x;
}
cv::Mat uGammaCompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent){
if(m.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = m.begin<cv::Vec3w>(), end = m.end<cv::Vec3w>(); it != end; ++it)
{
(*it)[0] =uGammaCompress_1pix((*it)[0],threshold,gainMin,gainMax,exponent);
(*it)[1] =uGammaCompress_1pix((*it)[1],threshold,gainMin,gainMax,exponent);
(*it)[2] =uGammaCompress_1pix((*it)[2],threshold,gainMin,gainMax,exponent);
}
}else if(m.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)m.data;
int end = m.rows*m.cols;
for(int i=0;i<end;i++){
*(ptr+i) = uGammaCompress_1pix(*(ptr+i),threshold,gainMin,gainMax,exponent);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return m;
}
cv::Mat uGammaDecompress_(cv::Mat m,float threshold,float gainMin,float gainMax,float exponent){
if(m.type()==CV_16UC3){
cv::MatIterator_<cv::Vec3w> it, end;
for( it = m.begin<cv::Vec3w>(), end = m.end<cv::Vec3w>(); it != end; ++it)
{
(*it)[0] =uGammaDecompress_1pix((*it)[0],threshold,gainMin,gainMax,exponent);
(*it)[1] =uGammaDecompress_1pix((*it)[1],threshold,gainMin,gainMax,exponent);
(*it)[2] =uGammaDecompress_1pix((*it)[2],threshold,gainMin,gainMax,exponent);
}
}else if(m.type()==CV_16UC1){
u_int16_t* ptr = (u_int16_t*)m.data;
int end = m.rows*m.cols;
for(int i=0;i<end;i++){
*(ptr+i) = uGammaDecompress_1pix(*(ptr+i),threshold,gainMin,gainMax,exponent);
}
}else{
std::cout<<"Unsupported Data Type"<<std::endl;
}
return m;
}
cv::Mat gammasRGB(cv::Mat img, bool mode){
if(mode){// compress
return uGammaCompress_(img,0.0031308, 12.92, 1.055, 1. / 2.4);
}else{ // decompress
return uGammaDecompress_(img, 0.04045, 12.92, 1.055, 2.4);
}
}
void copy_mat_16U_2(u_int16_t* ptr_A, cv::Mat B){
// u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < B.rows; r++) {
for(int c = 0; c < B.cols; c++) {
*(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
cv::Mat mean_(cv::Mat img){
// initialize processedImg
int H = img.rows;
int W = img.cols;
cv::Mat processedImg = cv::Mat(H,W,CV_16UC1);
u_int16_t* ptr = (u_int16_t*)processedImg.data;
// traverse img
int idx = 0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = img.begin<cv::Vec3w>(), end = img.end<cv::Vec3w>(); it != end; ++it)
{
uint32_t tmp = (*it)[0]+(*it)[1]+(*it)[2];
uint16_t avg_val = tmp/3;
*(ptr+idx) = avg_val;
idx++;
}
return processedImg;
}
double getMean(cv::Mat img){
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
double sum=0;
for(int i=0;i<max_idx;i++){
sum += *(ptr+i);
}
sum/=max_idx;
sum/=USHRT_MAX;
return sum;
}
cv::Mat matMultiply_scalar(cv::Mat img,float gain){
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
for(int i=0;i<max_idx;i++){
double tmp = *(ptr+i)*gain;
if(tmp<0){
*(ptr+i)=0;
}else if(tmp>USHRT_MAX){
*(ptr+i) = USHRT_MAX;
}else{
*(ptr+i)=(u_int16_t)tmp;
}
}
return img;
}
double getSaturated(cv::Mat img, double threshold){
threshold *= USHRT_MAX;
double count=0;
u_int16_t* ptr = (u_int16_t*)img.data;
int max_idx = img.rows*img.cols*img.channels();
for(int i=0;i<max_idx;i++){
if(*(ptr+i)>threshold){
count++;
}
}
return count/(double)max_idx;
}
cv::Mat meanGain_(cv::Mat img,int gain){
if(img.channels()!=3){
std::cout<<"unsupport img type in meanGain_()"<<std::endl;
return cv::Mat();
}else{ // RGB img
int H = img.rows;
int W = img.cols;
cv::Mat processedImg = cv::Mat(H,W,CV_16UC1);
u_int16_t* ptr = (u_int16_t*)processedImg.data;
int idx=0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = img.begin<cv::Vec3w>(), end = img.end<cv::Vec3w>(); it != end; ++it)
{
double sum = 0;
// R
double tmp = (*it)[0]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// G
tmp = (*it)[1]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// B
tmp = (*it)[2]*gain;
if(tmp<0) tmp=0;
if(tmp>USHRT_MAX) tmp = USHRT_MAX;
sum+=tmp;
// put into processedImg
uint16_t avg_val = sum/3;
*(ptr+idx) = avg_val;
idx++;
}
return processedImg;
}
}
cv::Mat applyScaling_(cv::Mat mergedImage, cv::Mat shortGray, cv::Mat fusedGray){
cv::Mat result = mergedImage.clone();
u_int16_t* ptr_shortg = (u_int16_t*)shortGray.data;
u_int16_t* ptr_fusedg = (u_int16_t*)fusedGray.data;
int count = 0;
cv::MatIterator_<cv::Vec3w> it, end;
for( it = result.begin<cv::Vec3w>(), end = result.end<cv::Vec3w>(); it != end; ++it)
{
double s = 1;
if(*(ptr_shortg+count)!=0){
s = *(ptr_fusedg+count);
s/=*(ptr_shortg+count);
}
for(int c=0;c<mergedImage.channels();c++){
double tmp = (*it)[c]*s;
if(tmp<0){
(*it)[c] = 0;
}else if(tmp>USHRT_MAX){
(*it)[c] = USHRT_MAX;
}else{
(*it)[c] = tmp;
}
}
}
return result;
}
void localToneMap(cv::Mat& mergedImage, Options options, cv::Mat& shortg,
cv::Mat& longg, cv::Mat& fusedg, int& gain){
std::cout<<"HDR Tone Mapping..."<<std::endl;
// # Work with grayscale images
cv::Mat shortGray = rgb_2_gray<uint16_t, uint16_t, CV_16U>(mergedImage); //mean_(mergedImage);
std::cout<<"--- Compute grayscale image"<<std::endl;
// compute gain
gain = 0;
if(options.ltmGain==-1){
double dsFactor = 25;
int down_height = round(shortGray.rows/dsFactor);
int down_width = round(shortGray.cols/dsFactor);
cv::Mat shortS;
cv::resize(shortGray,shortS,cv::Size(down_height,down_width),cv::INTER_LINEAR);
shortS = shortS.reshape(1,1);
bool bestGain = false;
double compression = 1.0;
double saturated = 0.0;
cv::Mat shortSg = gammasRGB(shortS.clone(), true);
double sSMean = getMean(shortSg);
while((compression < 1.9 && saturated < .95)||((!bestGain) && (compression < 6) && (gain < 30) && (saturated < 0.33))){
gain += 2;
cv::Mat longSg = gammasRGB(shortS.clone()*gain, true);
double lSMean = getMean(longSg);
compression = lSMean / sSMean;
bestGain = lSMean > (1 - sSMean) / 2; // only works if burst underexposed
saturated = getSaturated(longSg,0.95);
if(options.verbose==4){
}
}
}else{
if(options.ltmGain>0){
gain = options.ltmGain;
}
}
std::cout<<"--- Compute gain"<<std::endl;
// create a synthetic long exposure
cv::Mat longGray = meanGain_(mergedImage.clone(),gain);
std::cout<<"--- Synthetic long expo"<<std::endl;
// apply gamma correction to both
longg = gammasRGB(longGray.clone(), true);
shortg = gammasRGB(shortGray.clone(),true);
std::cout<<"--- Apply Gamma correction"<<std::endl;
// perform tone mapping by exposure fusion in grayscale
cv::Ptr<cv::MergeMertens> mergeMertens = cv::createMergeMertens();
std::cout<<"--- Create Mertens"<<std::endl;
// hack: cv2 mergeMertens expects inputs between 0 and 255
// but the result is scaled between 0 and 1 (some values can actually be greater than 1!)
std::vector<cv::Mat> src_expos;
src_expos.push_back(convert16bit2_8bit_(shortg.clone()));
src_expos.push_back(convert16bit2_8bit_(longg.clone()));
mergeMertens->process(src_expos, fusedg);
fusedg = fusedg*USHRT_MAX;
fusedg.convertTo(fusedg, CV_16UC1);
std::cout<<"--- Apply Mertens"<<std::endl;
// undo gamma correction
cv::Mat fusedGray = gammasRGB(fusedg.clone(), false);
// cv::imwrite("fusedg_degamma.png", fusedGray);
std::cout<<"--- Un-apply Gamma correction"<<std::endl;
// scale each RGB channel of the short exposure accordingly
mergedImage = applyScaling_(mergedImage, shortGray, fusedGray);
std::cout<<"--- Scale channels"<<std::endl;
}
u_int16_t enhanceContrast_1pix(u_int16_t pix_val,double gain){
double x = pix_val;
x/=USHRT_MAX;
x = x - gain*sin(2*M_PI*x);
if(x<0){
x = 0;
}else if(x>1){
x = 1;
}
u_int16_t result = x*USHRT_MAX;
return result;
}
cv::Mat enhanceContrast(cv::Mat image, Options options){
if(options.gtmContrast>=0 && options.gtmContrast<=1){
u_int16_t* ptr = (u_int16_t*)image.data;
int end = image.rows*image.cols*image.channels();
for(int idx = 0;idx<end;idx++){
*(ptr+idx) = enhanceContrast_1pix(*(ptr+idx),options.gtmContrast);
}
}else{
std::cout<<"GTM ignored, expected a contrast enhancement ratio between 0 and 1"<<std::endl;
}
return image;
}
cv::Mat distL1_(cv::Mat X, cv::Mat Y){
int end_x = X.rows*X.cols*X.channels();
int end_y = Y.rows*Y.cols*Y.channels();
cv::Mat result = cv::Mat(X.rows,X.cols,X.type());
if(end_x==end_y){
u_int16_t* ptr_x = (u_int16_t*)X.data;
u_int16_t* ptr_y = (u_int16_t*)Y.data;
u_int16_t* ptr_r = (u_int16_t*)result.data;
for(int i=0;i<end_x;i++){
if(*(ptr_x+i)<*(ptr_y+i)){
*(ptr_r+i) = *(ptr_y+i) - *(ptr_x+i);
}else{
*(ptr_r+i) = *(ptr_x+i) - *(ptr_y+i);
}
}
}else{
std::cout<<"Mat size not match. distL1_ failed!"<<std::endl;
}
return result;
}
cv::Mat sharpenTriple_(cv::Mat image,
cv::Mat blur0, cv::Mat low0, float th0, float k0,
cv::Mat blur1, cv::Mat low1, float th1, float k1,
cv::Mat blur2, cv::Mat low2, float th2, float k2){
// create result mat
cv::Mat result = cv::Mat(image.rows,image.cols,image.type());
// initialize iteraters
u_int16_t* ptr_r = (u_int16_t*)result.data;
u_int16_t* ptr_img = (u_int16_t*)image.data;
u_int16_t* ptr_blur0 = (u_int16_t*)blur0.data;
u_int16_t* ptr_low0 = (u_int16_t*)low0.data;
u_int16_t* ptr_blur1 = (u_int16_t*)blur1.data;
u_int16_t* ptr_low1 = (u_int16_t*)low1.data;
u_int16_t* ptr_blur2 = (u_int16_t*)blur2.data;
u_int16_t* ptr_low2 = (u_int16_t*)low2.data;
int n_channels = image.channels();
int end = image.rows*image.cols*n_channels;
// traverse Image
for(int idx = 0;idx<end;idx++){
double r, r0, r1, r2;
double x = *(ptr_img+idx);
double l0 = *(ptr_low0+idx)/(double)USHRT_MAX;
double l1 = *(ptr_low1+idx)/(double)USHRT_MAX;
double l2 = *(ptr_low2+idx)/(double)USHRT_MAX;
double b0 = *(ptr_blur0+idx);
double b1 = *(ptr_blur1+idx);
double b2 = *(ptr_blur2+idx);
r0 = l0<th0? x:x+k0*(x-b0);
r1 = l1<th1? x:x+k1*(x-b1);
r2 = l2<th2? x:x+k2*(x-b2);
r = (r0+r1+r2)/3.0;
if(r<0) r=0;
if(r>USHRT_MAX) r = USHRT_MAX;
*(ptr_r+idx) = (u_int16_t)r;
}
return result;
}
cv::Mat sharpenTriple(cv::Mat image, Tuning tuning, Options options){
// sharpen the image using unsharp masking
std::vector<float> amounts = tuning.sharpenAmount;
std::vector<float> sigmas = tuning.sharpenSigma;
std::vector<float> thresholds = tuning.sharpenThreshold;
// Compute all Gaussian blur
cv::Mat blur0,blur1,blur2;
cv::GaussianBlur(image,blur0,cv::Size(0,0),sigmas[0]);
cv::GaussianBlur(image,blur1,cv::Size(0,0),sigmas[1]);
cv::GaussianBlur(image,blur2,cv::Size(0,0),sigmas[2]);
std::cout<<" --- gaussian blur"<<std::endl;
// cv::imwrite("blur2.png", blur2);
// Compute all low contrast images
cv::Mat low0 = distL1_(blur0, image);
cv::Mat low1 = distL1_(blur1, image);
cv::Mat low2 = distL1_(blur2, image);
std::cout<<" --- low contrast"<<std::endl;
// cv::imwrite("low2.png", low2);
// Compute the triple sharpen
cv::Mat sharpImage = sharpenTriple_(image,
blur0, low0, thresholds[0], amounts[0],
blur1, low1, thresholds[1], amounts[1],
blur2, low2, thresholds[2], amounts[2]);
std::cout<<" --- sharpen"<<std::endl;
return sharpImage;
}
void copy_mat_16U_3(u_int16_t* ptr_A, cv::Mat B){
// u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
int H = B.rows;
int W = B.cols;
int end = H*W;
for(int i=0;i<end;i++){
*(ptr_A+i) = *(ptr_B+i);
}
}
// void copy_mat_16U_3(u_int16_t* ptr_A, cv::Mat B){
// // u_int16_t* ptr_A = (u_int16_t*)A.data;
// u_int16_t* ptr_B = (u_int16_t*)B.data;
// for(int r = 0; r < B.rows; r++) {
// for(int c = 0; c < B.cols; c++) {
// *(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
// }
// }
// }
cv::Mat processMergedMat(cv::Mat mergedImg, int opencv_type){
cv::Mat m;
#if 0
uint16_t* ptr = (uint16_t*)mergedImg.data;
for(int r = 0; r < mergedImg.rows; r++) {
std::vector<int> dvals;
for(int c = 0; c < mergedImg.cols; c++) {
dvals.push_back(*(ptr+r*mergedImg.cols+c));
}
cv::Mat mline(dvals, true);
cv::transpose(mline, mline);
m.push_back(mline);
}
#endif
int ch = CV_MAT_CN(opencv_type);
m = mergedImg.clone();
m = m.reshape(ch);
m.convertTo(m, opencv_type);
return m;
}
void show20_20(cv::Mat m){
u_int16_t* ptr = (u_int16_t*)m.data;
for(int i=0;i<20;i++){
for(int j=0;j<20;j++){
std::cout<<*(ptr+i*m.cols+j)<<", ";
}
std::cout<<std::endl;
}
}
void writeCSV(std::string filename, cv::Mat m)
{
std::ofstream myfile;
myfile.open(filename.c_str());
myfile<< cv::format(m, cv::Formatter::FMT_CSV) << std::endl;
myfile.close();
}
void finish::process(const hdrplus::burst& burst_images, cv::Mat& finalOutputImage){
// copy mergedBayer to rawReference
std::cout<<"finish pipeline start ..."<<std::endl;
// save merged Image value
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
writeCSV(DBG_OUTPUT_ROOT "merged.csv",burst_images.merged_bayer_image);
#endif
this->refIdx = burst_images.reference_image_idx;
// this->burstPath = burstPath;
// std::cout<<"processMerged:"<<std::endl;
// show20_20(mergedB);
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
// this->mergedBayer = processMergedMat(mergedB,CV_16UC1);//loadFromCSV("merged.csv", CV_16UC1);
// std::cout<<"processMerged:"<<std::endl;
// show20_20(this->mergedBayer);
// this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
// this->mergedBayer = processMergedMat(burst_images.merged_bayer_image, CV_16UC1);
#else
// this->mergedBayer = loadFromCSV(DBG_OUTPUT_ROOT "merged.csv", CV_16UC1);
this->mergedBayer = processMergedMat(burst_images.merged_bayer_image, CV_16UC1);
// std::cout<<"processMerged:"<<std::endl;
#endif
// std::cout<<"csv:"<<std::endl;
// show20_20(this->mergedBayer);
// load_rawPathList(burstPath);
// read in ref img
// bayer_image* ref = new bayer_image(rawPathList[refIdx]);
bayer_image* ref = new bayer_image(burst_images.bayer_images[burst_images.reference_image_idx]);
cv::Mat processedRefImage = postprocess(ref->libraw_processor,params.rawpyArgs);
std::cout<<"size ref: "<<processedRefImage.rows<<"*"<<processedRefImage.cols<<std::endl;
// write reference image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeReferenceImage"]){
std::cout<<"writing reference img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedRefImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
// cv::imshow("test",processedImage);
cv::imwrite(DBG_OUTPUT_ROOT "processedRef.jpg", outputImg);
// cv::waitKey(0);
}
#endif
// write gamma reference
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeGammaReference"]){
std::cout<<"writing Gamma reference img ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedRefImage.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "processedRefGamma.jpg", outputImg);
}
#endif
// get the bayer_image of the merged image
// bayer_image* mergedImg = new bayer_image(rawPathList[refIdx]);
bayer_image* mergedImg = new bayer_image(burst_images.bayer_images[this->refIdx]);
mergedImg->libraw_processor->imgdata.rawdata.raw_image = (uint16_t*)this->mergedBayer.data;
// copy_mat_16U_3(mergedImg->libraw_processor->imgdata.rawdata.raw_image,this->mergedBayer);
cv::Mat processedMerge = postprocess(mergedImg->libraw_processor,params.rawpyArgs);
// write merged image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeMergedImage"]){
std::cout<<"writing Merged img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "mergedImg.jpg", outputImg);
}
#endif
// write gamma merged image
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeMergedImage"]){
std::cout<<"writing Gamma Merged img ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedMerge.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "mergedImgGamma.jpg", outputImg);
}
#endif
// step 5. HDR tone mapping
// processedImage, gain, shortExposure, longExposure, fusedExposure = localToneMap(burstPath, processedImage, options)
int gain;
if(params.options.ltmGain){
cv::Mat shortExposure, longExposure, fusedExposure;
localToneMap(processedMerge, params.options,shortExposure,longExposure,fusedExposure,gain);
std::cout<<"gain="<< gain<<std::endl;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeShortExposure"]){
std::cout<<"writing ShortExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(shortExposure);
cv::imwrite(DBG_OUTPUT_ROOT "shortg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLongExposure"]){
std::cout<<"writing LongExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(longExposure);
cv::imwrite(DBG_OUTPUT_ROOT "longg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeFusedExposure"]){
std::cout<<"writing FusedExposure img ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(fusedExposure);
cv::imwrite(DBG_OUTPUT_ROOT "fusedg.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLTMImage"]){
std::cout<<"writing LTMImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "ltmGain.jpg", outputImg);
}
#endif
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeLTMGamma"]){
std::cout<<"writing LTMImage Gamma ..."<<std::endl;
cv::Mat outputImg = gammasRGB(processedMerge.clone(),true);
outputImg = convert16bit2_8bit_(outputImg);
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "ltmGain_gamma.jpg", outputImg);
}
#endif
}
// step 6 GTM: contrast enhancement / global tone mapping
if(params.options.gtmContrast){
processedMerge = enhanceContrast(processedMerge, params.options);
std::cout<<"STEP 6 -- Apply GTM"<<std::endl;
}
// apply the final sRGB gamma curve
processedMerge = gammasRGB(processedMerge.clone(),true);
std::cout<<"-- Apply Gamma"<<std::endl;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeGTMImage"]) {
std::cout<<"writing GTMImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedMerge.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "GTM_gamma.jpg", outputImg);
}
#endif
// Step 7: sharpen
finalOutputImage = sharpenTriple(processedMerge.clone(), params.tuning, params.options);
cv::Mat& processedImage = finalOutputImage;
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeFinalImage"]){
std::cout<<"writing FinalImage ..."<<std::endl;
cv::Mat outputImg = convert16bit2_8bit_(processedImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "FinalImage.jpg", outputImg);
}
#endif
// write final ref
#ifndef HDRPLUS_NO_DETAILED_OUTPUT
if(params.flags["writeReferenceFinal"]){
std::cout<<"writing Final Ref Image ..."<<std::endl;
if(params.options.ltmGain){
params.options.ltmGain = gain;
}
cv::Mat shortExposureRef, longExposureRef, fusedExposureRef;
localToneMap(processedRefImage, params.options,shortExposureRef,longExposureRef,fusedExposureRef,gain);
if(params.options.gtmContrast){ // contrast enhancement / global tone mapping
processedRefImage = enhanceContrast(processedRefImage, params.options);
}
processedRefImage = gammasRGB(processedRefImage.clone(),true);
// sharpen
processedRefImage = sharpenTriple(processedRefImage.clone(), params.tuning, params.options);
cv::Mat outputImg = convert16bit2_8bit_(processedRefImage.clone());
cv::cvtColor(outputImg, outputImg, cv::COLOR_RGB2BGR);
cv::imwrite(DBG_OUTPUT_ROOT "FinalReference.jpg", outputImg);
}
#endif
// End of finishing
}
void finish::copy_mat_16U(cv::Mat& A, cv::Mat B){
u_int16_t* ptr_A = (u_int16_t*)A.data;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < A.rows; r++) {
for(int c = 0; c < A.cols; c++) {
*(ptr_A+r*A.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
void finish::copy_rawImg2libraw(std::shared_ptr<LibRaw>& libraw_ptr, cv::Mat B){
u_int16_t* ptr_A = (u_int16_t*)libraw_ptr->imgdata.rawdata.raw_image;
u_int16_t* ptr_B = (u_int16_t*)B.data;
for(int r = 0; r < B.rows; r++) {
for(int c = 0; c < B.cols; c++) {
*(ptr_A+r*B.cols+c) = *(ptr_B+r*B.cols+c);
}
}
}
} // namespace hdrplus

@ -1,138 +0,0 @@
#include <cstdio>
#include <string>
#include <vector>
#include <utility> // std::pair
#include <opencv2/opencv.hpp> // all opencv header
#include "hdrplus/hdrplus_pipeline.h"
#include "hdrplus/burst.h"
#include "hdrplus/align.h"
#include "hdrplus/merge.h"
#include "hdrplus/finish.h"
#include <fstream>
#ifdef __ANDROID__
// #include <AndroidHelper.h>
#endif
namespace hdrplus
{
void hdrplus_pipeline::run_pipeline( \
const std::string& burst_path, \
const std::string& reference_image_path )
{
// Create burst of images
burst burst_images( burst_path, reference_image_path );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
// Run align
align_module.process( burst_images, alignments );
// Run merging
merge_module.process( burst_images, alignments );
// Run finishing
cv::Mat finalImg;
finish_module.process( burst_images, finalImg);
}
bool hdrplus_pipeline::run_pipeline( \
const std::vector<std::string>& burst_paths, \
int reference_image_index, cv::Mat& finalImg )
{
// Create burst of images
burst burst_images( burst_paths, reference_image_index );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
#ifdef __ANDROID__
// ALOGI("Finish loading images");
#endif
// Run align
align_module.process( burst_images, alignments );
#ifdef __ANDROID__
// ALOGI("Finish align");
#endif
// Run merging
merge_module.process( burst_images, alignments );
#ifdef __ANDROID__
// ALOGI("Finish merging");
#endif
// Run finishing
finish_module.process( burst_images, finalImg);
#ifdef __ANDROID__
// ALOGI("Finish process");
#endif
return true;
}
bool hdrplus_pipeline::run_pipeline( \
const std::vector<std::vector<uint8_t> >& burst_contents, \
int reference_image_index, cv::Mat& finalImg )
{
// Create burst of images
burst burst_images( burst_contents, reference_image_index );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
#ifdef __ANDROID__
// ALOGI("Finish loading images");
#endif
// Run align
align_module.process( burst_images, alignments );
#ifdef __ANDROID__
// ALOGI("Finish align");
#endif
// Run merging
merge_module.process( burst_images, alignments );
#ifdef __ANDROID__
// ALOGI("Finish merging");
#endif
// Run finishing
finish_module.process( burst_images, finalImg);
#ifdef __ANDROID__
// ALOGI("Finish process");
#endif
return true;
}
bool hdrplus_pipeline::run_pipeline( \
const std::vector<std::shared_ptr<MemFile> >& burst_files, \
int reference_image_index, cv::Mat& finalImg )
{
// Create burst of images
burst burst_images( burst_files, reference_image_index );
std::vector<std::vector<std::vector<std::pair<int, int>>>> alignments;
#ifdef __ANDROID__
// ALOGI("Finish loading images");
#endif
// Run align
align_module.process( burst_images, alignments );
#ifdef __ANDROID__
// ALOGI("Finish align");
#endif
// Run merging
merge_module.process( burst_images, alignments );
#ifdef __ANDROID__
// ALOGI("Finish merging");
#endif
// Run finishing
finish_module.process( burst_images, finalImg);
#ifdef __ANDROID__
// ALOGI("Finish process");
#endif
return true;
}
} // namespace hdrplus

@ -1,340 +0,0 @@
#include <opencv2/opencv.hpp> // all opencv header
#include <vector>
#include <utility>
#include "hdrplus/merge.h"
#include "hdrplus/burst.h"
#include "hdrplus/utility.h"
namespace hdrplus
{
void merge::process(hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments)
{
// 4.1 Noise Parameters and RMS
// Noise parameters calculated from baseline ISO noise parameters
double lambda_shot, lambda_read;
std::tie(lambda_shot, lambda_read) = burst_images.bayer_images[burst_images.reference_image_idx].get_noise_params();
// 4.2-4.4 Denoising and Merging
// Get padded bayer image
cv::Mat reference_image = burst_images.bayer_images_pad[burst_images.reference_image_idx];
#ifndef NDEBUG
// cv::imwrite("ref.jpg", reference_image);
#endif
// Get raw channels
std::vector<cv::Mat> channels(4);
hdrplus::extract_rgb_from_bayer<uint16_t>(reference_image, channels[0], channels[1], channels[2], channels[3]);
std::vector<cv::Mat> processed_channels(4);
// For each channel, perform denoising and merge
for (int i = 0; i < 4; ++i) {
// Get channel mat
cv::Mat channel_i = channels[i];
// cv::imwrite("ref" + std::to_string(i) + ".jpg", channel_i);
//we should be getting the individual channel in the same place where we call the processChannel function with the reference channel in its arguments
//possibly we could add another argument in the processChannel function which is the channel_i for the alternate image. maybe using a loop to cover all the other images
//create list of channel_i of alternate images:
std::vector<cv::Mat> alternate_channel_i_list;
for (int j = 0; j < burst_images.num_images; j++) {
if (j != burst_images.reference_image_idx) {
//get alternate image
cv::Mat alt_image = burst_images.bayer_images_pad[j];
std::vector<cv::Mat> alt_channels(4);
hdrplus::extract_rgb_from_bayer<uint16_t>(alt_image, alt_channels[0], alt_channels[1], alt_channels[2], alt_channels[3]);
alternate_channel_i_list.push_back(alt_channels[i]);
}
}
// Apply merging on the channel
cv::Mat merged_channel = processChannel(burst_images, alignments, channel_i, alternate_channel_i_list, lambda_shot, lambda_read);
// cv::imwrite("merged" + std::to_string(i) + ".jpg", merged_channel);
// Put channel raw data back to channels
merged_channel.convertTo(processed_channels[i], CV_16U);
}
// Write all channels back to a bayer mat
cv::Mat merged(reference_image.rows, reference_image.cols, CV_16U);
int x, y;
for (y = 0; y < reference_image.rows; ++y){
uint16_t* row = merged.ptr<uint16_t>(y);
if (y % 2 == 0){
uint16_t* i0 = processed_channels[0].ptr<uint16_t>(y / 2);
uint16_t* i1 = processed_channels[1].ptr<uint16_t>(y / 2);
for (x = 0; x < reference_image.cols;){
//R
row[x] = i0[x / 2];
x++;
//G1
row[x] = i1[x / 2];
x++;
}
}
else {
uint16_t* i2 = processed_channels[2].ptr<uint16_t>(y / 2);
uint16_t* i3 = processed_channels[3].ptr<uint16_t>(y / 2);
for(x = 0; x < reference_image.cols;){
//G2
row[x] = i2[x / 2];
x++;
//B
row[x] = i3[x / 2];
x++;
}
}
}
// Remove padding
std::vector<int> padding = burst_images.padding_info_bayer;
cv::Range horizontal = cv::Range(padding[2], reference_image.cols - padding[3]);
cv::Range vertical = cv::Range(padding[0], reference_image.rows - padding[1]);
burst_images.merged_bayer_image = merged(vertical, horizontal);
// cv::imwrite("merged.jpg", burst_images.merged_bayer_image);
}
std::vector<cv::Mat> merge::getReferenceTiles(cv::Mat reference_image) {
std::vector<cv::Mat> reference_tiles;
for (int y = 0; y < reference_image.rows - offset; y += offset) {
for (int x = 0; x < reference_image.cols - offset; x += offset) {
cv::Mat tile = reference_image(cv::Rect(x, y, TILE_SIZE, TILE_SIZE));
reference_tiles.push_back(tile);
}
}
return reference_tiles;
}
cv::Mat merge::mergeTiles(std::vector<cv::Mat> tiles, int num_rows, int num_cols) {
// 1. get all four subsets: original (evenly split), horizontal overlapped,
// vertical overlapped, 2D overlapped
std::vector<std::vector<cv::Mat>> tiles_original;
std::vector<cv::Mat> row;
for (int y = 0; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 0; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_original.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_horizontal;
// std::vector<cv::Mat> row;
for (int y = 0; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 1; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_horizontal.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_vertical;
// std::vector<cv::Mat> row;
for (int y = 1; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 0; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_vertical.push_back(row);
}
std::vector<std::vector<cv::Mat>> tiles_2d;
// std::vector<cv::Mat> row;
for (int y = 1; y < num_rows / offset - 1; y += 2) {
row.clear();
for (int x = 1; x < num_cols / offset - 1; x += 2) {
row.push_back(tiles[y * (num_cols / offset - 1) + x]);
}
tiles_2d.push_back(row);
}
// 2. Concatenate the four subsets
cv::Mat img_original = cat2Dtiles(tiles_original);
cv::Mat img_horizontal = cat2Dtiles(tiles_horizontal);
cv::Mat img_vertical = cat2Dtiles(tiles_vertical);
cv::Mat img_2d = cat2Dtiles(tiles_2d);
// 3. Add the four subsets together
img_original(cv::Rect(offset, 0, num_cols - TILE_SIZE, num_rows)) += img_horizontal;
img_original(cv::Rect(0, offset, num_cols, num_rows - TILE_SIZE)) += img_vertical;
img_original(cv::Rect(offset, offset, num_cols - TILE_SIZE, num_rows - TILE_SIZE)) += img_2d;
return img_original;
}
cv::Mat merge::processChannel(hdrplus::burst& burst_images, \
std::vector<std::vector<std::vector<std::pair<int, int>>>>& alignments, \
cv::Mat channel_image, \
std::vector<cv::Mat> alternate_channel_i_list,\
float lambda_shot, \
float lambda_read) {
// Get tiles of the reference image
std::vector<cv::Mat> reference_tiles = getReferenceTiles(channel_image);
// Get noise variance (sigma**2 = lambda_shot * tileRMS + lambda_read)
std::vector<float> noise_variance = getNoiseVariance(reference_tiles, lambda_shot, lambda_read);
// Apply FFT on reference tiles (spatial to frequency)
std::vector<cv::Mat> reference_tiles_DFT;
for (auto ref_tile : reference_tiles) {
cv::Mat ref_tile_DFT;
ref_tile.convertTo(ref_tile_DFT, CV_32F);
cv::dft(ref_tile_DFT, ref_tile_DFT, cv::DFT_COMPLEX_OUTPUT);
reference_tiles_DFT.push_back(ref_tile_DFT);
}
// Acquire alternate tiles and apply FFT on them as well
std::vector<std::vector<cv::Mat>> alt_tiles_list(reference_tiles.size());
int num_tiles_row = alternate_channel_i_list[0].rows / offset - 1;
int num_tiles_col = alternate_channel_i_list[0].cols / offset - 1;
std::vector<cv::Mat> alt_tiles;
for (int y = 0; y < num_tiles_row; ++y) {
for (int x = 0; x < num_tiles_col; ++x) {
alt_tiles.clear();
// Get reference tile location
int top_left_y = y * offset;
int top_left_x = x * offset;
for (int i = 0; i < alternate_channel_i_list.size(); ++i) {
// Get alignment displacement
int displacement_y, displacement_x;
std::tie(displacement_y, displacement_x) = alignments[i + 1][y][x];
// Get tile
cv::Mat alt_tile = alternate_channel_i_list[i](cv::Rect(top_left_x + displacement_x, top_left_y + displacement_y, TILE_SIZE, TILE_SIZE));
// Apply FFT
cv::Mat alt_tile_DFT;
alt_tile.convertTo(alt_tile_DFT, CV_32F);
cv::dft(alt_tile_DFT, alt_tile_DFT, cv::DFT_COMPLEX_OUTPUT);
alt_tiles.push_back(alt_tile_DFT);
}
alt_tiles_list[y * num_tiles_col + x] = alt_tiles;
}
}
// 4.2 Temporal Denoising
reference_tiles_DFT = temporal_denoise(reference_tiles_DFT, alt_tiles_list, noise_variance, TEMPORAL_FACTOR);
// 4.3 Spatial Denoising
reference_tiles_DFT = spatial_denoise(reference_tiles_DFT, alternate_channel_i_list.size(), noise_variance, SPATIAL_FACTOR);
//now reference tiles are temporally and spatially denoised
// Apply IFFT on reference tiles (frequency to spatial)
std::vector<cv::Mat> denoised_tiles;
for (auto dft_tile : reference_tiles_DFT) {
cv::Mat denoised_tile;
cv::divide(dft_tile, TILE_SIZE * TILE_SIZE, dft_tile);
cv::dft(dft_tile, denoised_tile, cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);
denoised_tiles.push_back(denoised_tile);
}
reference_tiles = denoised_tiles;
// 4.4 Cosine Window Merging
// Process tiles through 2D cosine window
std::vector<cv::Mat> windowed_tiles;
for (auto tile : reference_tiles) {
windowed_tiles.push_back(cosineWindow2D(tile));
}
// Merge tiles
return mergeTiles(windowed_tiles, channel_image.rows, channel_image.cols);
}
std::vector<cv::Mat> merge::temporal_denoise(std::vector<cv::Mat> tiles, std::vector<std::vector<cv::Mat>> alt_tiles, std::vector<float> noise_variance, float temporal_factor) {
// goal: temporially denoise using the weiner filter
// input:
// 1. array of 2D dft tiles of the reference image
// 2. array of 2D dft tiles of the aligned alternate image
// 3. estimated noise variance
// 4. temporal factor
// return: merged image patches dft
// calculate noise scaling
double temporal_noise_scaling = (TILE_SIZE * TILE_SIZE * (2.0 / 16)) * TEMPORAL_FACTOR;
// loop across tiles
std::vector<cv::Mat> denoised;
for (int i = 0; i < tiles.size(); ++i) {
// sum of pairwise denoising
cv::Mat tile_sum = tiles[i].clone();
double coeff = temporal_noise_scaling * noise_variance[i];
// Ref tile
cv::Mat tile = tiles[i];
// Alt tiles
std::vector<cv::Mat> alt_tiles_i = alt_tiles[i];
for (int j = 0; j < alt_tiles_i.size(); ++j) {
// Alt tile
cv::Mat alt_tile = alt_tiles_i[j];
// Tile difference
cv::Mat diff = tile - alt_tile;
// Calculate absolute difference
cv::Mat complexMats[2];
cv::split(diff, complexMats); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
cv::magnitude(complexMats[0], complexMats[1], complexMats[0]); // planes[0] = magnitude
cv::Mat absolute_diff = complexMats[0].mul(complexMats[0]);
// find shrinkage operator A
cv::Mat shrinkage;
cv::divide(absolute_diff, absolute_diff + coeff, shrinkage);
cv::merge(std::vector<cv::Mat>{shrinkage, shrinkage}, shrinkage);
// Interpolation
tile_sum += alt_tile + diff.mul(shrinkage);
}
// Average by num of frames
cv::divide(tile_sum, alt_tiles_i.size() + 1, tile_sum);
denoised.push_back(tile_sum);
}
return denoised;
}
std::vector<cv::Mat> merge::spatial_denoise(std::vector<cv::Mat> tiles, int num_alts, std::vector<float> noise_variance, float spatial_factor) {
double spatial_noise_scaling = (TILE_SIZE * TILE_SIZE * (1.0 / 16)) * spatial_factor;
// Calculate |w| using ifftshift
cv::Mat row_distances = cv::Mat::zeros(1, TILE_SIZE, CV_32F);
for(int i = 0; i < TILE_SIZE; ++i) {
row_distances.at<float>(i) = i - offset;
}
row_distances = cv::repeat(row_distances.t(), 1, TILE_SIZE);
cv::Mat col_distances = row_distances.t();
cv::Mat distances;
cv::sqrt(row_distances.mul(row_distances) + col_distances.mul(col_distances), distances);
ifftshift(distances);
std::vector<cv::Mat> denoised;
// Loop through all tiles
for (int i = 0; i < tiles.size(); ++i) {
cv::Mat tile = tiles[i];
float coeff = noise_variance[i] / (num_alts + 1) * spatial_noise_scaling;
// Calculate absolute difference
cv::Mat complexMats[2];
cv::split(tile, complexMats); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
cv::magnitude(complexMats[0], complexMats[1], complexMats[0]); // planes[0] = magnitude
cv::Mat absolute_diff = complexMats[0].mul(complexMats[0]);
// Division
cv::Mat scale;
cv::divide(absolute_diff, absolute_diff + distances * coeff, scale);
cv::merge(std::vector<cv::Mat>{scale, scale}, scale);
denoised.push_back(tile.mul(scale));
}
return denoised;
}
} // namespace hdrplus

@ -1,53 +0,0 @@
#include <iostream>
#include <opencv2/opencv.hpp> // all opencv header
#include <hdrplus/params.h>
namespace hdrplus
{
void setParams(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs){
libraw_ptr->imgdata.params.user_qual = rawpyArgs.demosaic_algorithm;
libraw_ptr->imgdata.params.half_size = rawpyArgs.half_size;
libraw_ptr->imgdata.params.use_camera_wb = rawpyArgs.use_camera_wb;
libraw_ptr->imgdata.params.use_auto_wb = rawpyArgs.use_auto_wb;
libraw_ptr->imgdata.params.no_auto_bright = rawpyArgs.no_auto_bright;
libraw_ptr->imgdata.params.output_color = rawpyArgs.output_color;
libraw_ptr->imgdata.params.gamm[0] = rawpyArgs.gamma[0];
libraw_ptr->imgdata.params.gamm[1] = rawpyArgs.gamma[1];
libraw_ptr->imgdata.params.output_bps = rawpyArgs.output_bps;
}
cv::Mat postprocess(std::shared_ptr<LibRaw>& libraw_ptr, RawpyArgs rawpyArgs){
std::cout<<"postprocessing..."<<std::endl;
// set parameters
setParams(libraw_ptr,rawpyArgs);
std::cout<<"conversion to 16 bit using black and white levels, demosaicking, white balance, color correction..."<<std::endl;
libraw_ptr->dcraw_process();
int errorcode;
libraw_processed_image_t *ret_img = libraw_ptr->dcraw_make_mem_image(&errorcode);
int opencv_type = CV_16UC3; // 16bit RGB
if(ret_img->colors==1){ // grayscale
if(ret_img->bits == 8){ // uint8
opencv_type = CV_8UC1;
}else{ // uint16
opencv_type = CV_16UC1;
}
}else{// RGB
if(ret_img->bits == 8){ //8bit
opencv_type = CV_8UC3;
}else{ // 16bit
opencv_type = CV_16UC3;
}
}
cv::Mat processedImg(ret_img->height,ret_img->width,opencv_type,ret_img->data);
std::cout<<"postprocess finished!"<<std::endl;
return processedImg;
}
}

File diff suppressed because it is too large Load Diff

@ -1,36 +0,0 @@
// MACHINE GENERATED -- DO NOT EDIT
extern "C" {
struct halide_filter_metadata_t;
void halide_register_argv_and_metadata(
int (*filter_argv_call)(void **),
const struct halide_filter_metadata_t *filter_metadata,
const char * const *extra_key_value_pairs
);
}
extern "C" {
extern int hdrplus_pipeline_argv(void **args);
extern const struct halide_filter_metadata_t *hdrplus_pipeline_metadata();
}
#ifdef HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
extern "C" const char * const *HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC();
#endif // HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
namespace halide_nsreg_hdrplus_pipeline {
namespace {
struct Registerer {
Registerer() {
#ifdef HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
halide_register_argv_and_metadata(::hdrplus_pipeline_argv, ::hdrplus_pipeline_metadata(), HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC());
#else
halide_register_argv_and_metadata(::hdrplus_pipeline_argv, ::hdrplus_pipeline_metadata(), nullptr);
#endif // HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
}
};
static Registerer registerer;
} // namespace
} // halide_nsreg_hdrplus_pipeline

File diff suppressed because it is too large Load Diff

@ -1,36 +0,0 @@
// MACHINE GENERATED -- DO NOT EDIT
extern "C" {
struct halide_filter_metadata_t;
void halide_register_argv_and_metadata(
int (*filter_argv_call)(void **),
const struct halide_filter_metadata_t *filter_metadata,
const char * const *extra_key_value_pairs
);
}
extern "C" {
extern int hdrplus_pipeline_argv(void **args);
extern const struct halide_filter_metadata_t *hdrplus_pipeline_metadata();
}
#ifdef HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
extern "C" const char * const *HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC();
#endif // HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
namespace halide_nsreg_hdrplus_pipeline {
namespace {
struct Registerer {
Registerer() {
#ifdef HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
halide_register_argv_and_metadata(::hdrplus_pipeline_argv, ::hdrplus_pipeline_metadata(), HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC());
#else
halide_register_argv_and_metadata(::hdrplus_pipeline_argv, ::hdrplus_pipeline_metadata(), nullptr);
#endif // HALIDE_REGISTER_EXTRA_KEY_VALUE_PAIRS_FUNC
}
};
static Registerer registerer;
} // namespace
} // halide_nsreg_hdrplus_pipeline

@ -1,13 +0,0 @@
#ifndef __HDRPLUS__
#define __HDRPLUS__
#include <string>
#include <vector>
#include <opencv2/opencv.hpp> // all opencv header
int doHdrPlus(const std::string& dir_path, const std::string& out_name, const std::vector<std::string>& in_names);
bool doHdrPlus(const std::vector< std::vector<uint8_t> >& images, cv::Mat& mat);
#endif // __HDRPLUS__

File diff suppressed because it is too large Load Diff

@ -1,39 +0,0 @@
#include "Burst.h"
Halide::Runtime::Buffer<uint16_t> Burst::ToBuffer() const {
if (Raws.empty()) {
return Halide::Runtime::Buffer<uint16_t>();
}
Halide::Runtime::Buffer<uint16_t> result(GetWidth(), GetHeight(),
Raws.size());
for (int i = 0; i < Raws.size(); ++i) {
auto resultSlice = result.sliced(2, i);
Raws[i].CopyToBuffer(resultSlice);
}
return result;
}
void Burst::CopyToBuffer(Halide::Runtime::Buffer<uint16_t> &buffer) const {
buffer.copy_from(ToBuffer());
}
std::vector<RawImage> Burst::LoadRaws(const std::vector< std::vector<uint8_t> >& images) {
std::vector<RawImage> result;
for (const auto &img : images) {
result.emplace_back(&img[0], img.size());
}
return result;
}
std::vector<RawImage> Burst::LoadRaws(const std::string &dirPath,
std::vector<std::string> &inputs) {
std::vector<RawImage> result;
for (const auto &input : inputs) {
const std::string img_path = dirPath + "/" + input;
result.emplace_back(img_path);
}
return result;
}
const RawImage &Burst::GetRaw(const size_t i) const { return this->Raws[i]; }

@ -1,76 +0,0 @@
#pragma once
#include "InputSource.h"
#include <hdrplus_pipeline.h>
#include <string>
#include <vector>
class Burst {
public:
Burst(std::string dir_path, std::vector<std::string> inputs)
: Dir(std::move(dir_path)), Inputs(std::move(inputs)),
Raws(LoadRaws(Dir, Inputs))
{
}
Burst(const std::vector< std::vector<uint8_t> >& images)
: Raws(LoadRaws(images))
{
}
~Burst() = default;
Burst(const Burst& src)
{
this->Dir = src.Dir;
this->Inputs = src.Inputs;
this->Raws = src.Raws;
int aa = 0;
}
int GetWidth() const { return Raws.empty() ? -1 : Raws[0].GetWidth(); }
int GetHeight() const { return Raws.empty() ? -1 : Raws[0].GetHeight(); }
int GetBlackLevel() const
{
return Raws.empty() ? -1 : Raws[0].GetScalarBlackLevel();
}
int GetWhiteLevel() const {
return Raws.empty() ? -1 : Raws[0].GetWhiteLevel();
}
WhiteBalance GetWhiteBalance() const {
return Raws.empty() ? WhiteBalance{-1, -1, -1, -1}
: Raws[0].GetWhiteBalance();
}
CfaPattern GetCfaPattern() const {
return Raws.empty() ? CfaPattern::CFA_UNKNOWN : Raws[0].GetCfaPattern();
}
Halide::Runtime::Buffer<float> GetColorCorrectionMatrix() const {
return Raws.empty() ? Halide::Runtime::Buffer<float>()
: Raws[0].GetColorCorrectionMatrix();
}
Halide::Runtime::Buffer<uint16_t> ToBuffer() const;
void CopyToBuffer(Halide::Runtime::Buffer<uint16_t> &buffer) const;
const RawImage &GetRaw(const size_t i) const;
private:
std::string Dir;
std::vector<std::string> Inputs;
std::vector<RawImage> Raws;
private:
static std::vector<RawImage> LoadRaws(const std::string &dirPath,
std::vector<std::string> &inputs);
static std::vector<RawImage> LoadRaws(const std::vector< std::vector<uint8_t> >& images);
};

@ -1,147 +0,0 @@
#include <fstream>
#include <iostream>
#include <stdio.h>
#ifdef _DEBUG
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <include/stb_image_write.h>
#endif
#include <hdrplus_pipeline.h>
#include "Burst.h"
#include <include/HDRPlus.h>
extern "C" void halide_register_argv_and_metadata(
int (*filter_argv_call)(void **),
const struct halide_filter_metadata_t *filter_metadata,
const char *const *extra_key_value_pairs) {
}
/*
* HDRPlus Class -- Houses file I/O, defines pipeline attributes and calls
* processes main stages of the pipeline.
*/
class HDRPlus {
const Burst &burst;
public:
const Compression c;
const Gain g;
HDRPlus(Burst& burst, const Compression c, const Gain g)
: burst(burst), c(c), g(g)
{
}
Halide::Runtime::Buffer<uint8_t> process() {
const int width = burst.GetWidth();
const int height = burst.GetHeight();
Halide::Runtime::Buffer<uint8_t> output_img(3, width, height);
#ifdef _DEBUG
std::cerr << "Black point: " << burst.GetBlackLevel() << std::endl;
std::cerr << "White point: " << burst.GetWhiteLevel() << std::endl;
#endif
const WhiteBalance wb = burst.GetWhiteBalance();
std::cerr << "RGGB: " << wb.r << " " << wb.g0 << " " << wb.g1 << " " << wb.b
<< std::endl;
Halide::Runtime::Buffer<uint16_t> imgs = burst.ToBuffer();
if (imgs.dimensions() != 3 || imgs.extent(2) < 2) {
return output_img;
#if 0
throw std::invalid_argument(
"The input of HDRPlus must be a 3-dimensional buffer with at least "
"two channels.");
#endif
}
const int cfa_pattern = static_cast<int>(burst.GetCfaPattern());
auto ccm = burst.GetColorCorrectionMatrix();
hdrplus_pipeline(imgs, burst.GetBlackLevel(), burst.GetWhiteLevel(), wb.r,
wb.g0, wb.g1, wb.b, cfa_pattern, ccm, c, g, output_img);
// transpose to account for interleaved layout
output_img.transpose(0, 1);
output_img.transpose(1, 2);
return output_img;
}
#ifdef _DEBUG
static bool save_png(const std::string &dir_path, const std::string &img_name,
const Halide::Runtime::Buffer<uint8_t> &img) {
const std::string img_path = dir_path + "/" + img_name;
const int stride_in_bytes = img.width() * img.channels();
if (!stbi_write_png(img_path.c_str(), img.width(), img.height(),
img.channels(), img.data(), stride_in_bytes)) {
std::cerr << "Unable to write output image '" << img_name << "'"
<< std::endl;
return false;
}
return true;
}
#endif
};
bool doHdrPlus(const std::vector< std::vector<uint8_t> >& images, cv::Mat& mat)
{
Compression c = 3.8f;
Gain g = 1.1f;
Burst burst(images);
HDRPlus hdr_plus(burst, c, g);
Halide::Runtime::Buffer<uint8_t> outputHdr = hdr_plus.process();
#ifdef _DEBUG
HDRPlus::save_png("/sdcard/com.xypower.mpapp/tmp", "2.png", outputHdr);
#endif
int width = outputHdr.width();
int height = outputHdr.height();
int channels = outputHdr.channels();
int jch = 0;
mat = cv::Mat::zeros(height, width, CV_8UC3);
for (int i = 0; i < height; ++i)
{
jch = 0;
for (int j = 0; j < width; ++j)
{
for (int n = 0; n < channels; ++n)
{
mat.at<uchar>(i, jch + n) = (uchar)outputHdr(j, i, n);
}
jch += channels;
}
}
// if (!HDRPlus::save_png(dir_path, out_name, output)) {
return true;
}
#if 0
int doHdrPlus(const std::string& dir_path, const std::string& out_name, const std::vector<std::string>& in_names) {
Compression c = 3.8f;
Gain g = 1.1f;
Burst burst(dir_path, in_names);
HDRPlus hdr_plus(burst, c, g);
Halide::Runtime::Buffer<uint8_t> output = hdr_plus.process();
if (!HDRPlus::save_png(dir_path, out_name, output)) {
return EXIT_FAILURE;
}
return 0;
}
#endif

@ -1,154 +0,0 @@
#include "InputSource.h"
#include <algorithm>
#include <unordered_map>
#include "LibRaw2DngConverter.h"
RawImage::RawImage(const std::string &path)
: Path(path), RawProcessor(std::make_shared<LibRaw>()) {
// TODO: Check LibRaw parametres.
// RawProcessor->imgdata.params.X = Y;
std::cerr << "Opening " << path << std::endl;
if (int err = RawProcessor->open_file(path.c_str())) {
std::cerr << "Cannot open file " << path
<< " error: " << libraw_strerror(err) << std::endl;
#if 0
throw std::runtime_error("Error opening " + path);
#endif
}
if (int err = RawProcessor->unpack()) {
std::cerr << "Cannot unpack file " << path
<< " error: " << libraw_strerror(err) << std::endl;
#if 0
throw std::runtime_error("Error opening " + path);
#endif
}
if (int ret = RawProcessor->raw2image()) {
std::cerr << "Cannot do raw2image on " << path
<< " error: " << libraw_strerror(ret) << std::endl;
#if 0
throw std::runtime_error("Error opening " + path);
#endif
}
}
RawImage::RawImage(const uint8_t* data, size_t length)
: RawProcessor(std::make_shared<LibRaw>())
{
std::cerr << "Opening raw from memory" << std::endl;
if (int err = RawProcessor->open_buffer((void *)data, length)) {
std::cerr << "Cannot open raw from memory" << " error: " << libraw_strerror(err) << std::endl;
#if 0
throw std::runtime_error("Error opening raw");
#endif
}
if (int err = RawProcessor->unpack()) {
std::cerr << "Cannot unpack raw from memory " << " error: " << libraw_strerror(err) << std::endl;
#if 0
throw std::runtime_error("Error opening " + path);
#endif
}
if (int ret = RawProcessor->raw2image()) {
std::cerr << "Cannot do raw2image" << " error: " << libraw_strerror(ret) << std::endl;
#if 0
throw std::runtime_error("Error opening " + path);
#endif
}
}
WhiteBalance RawImage::GetWhiteBalance() const {
const auto coeffs = RawProcessor->imgdata.color.cam_mul;
// Scale multipliers to green channel
const float r = coeffs[0] / coeffs[1];
const float g0 = 1.f; // same as coeffs[1] / coeffs[1];
const float g1 = 1.f;
const float b = coeffs[2] / coeffs[1];
return WhiteBalance{r, g0, g1, b};
}
void RawImage::CopyToBuffer(Halide::Runtime::Buffer<uint16_t> &buffer) const {
const auto image_data = (uint16_t *)RawProcessor->imgdata.rawdata.raw_image;
const auto raw_width = RawProcessor->imgdata.rawdata.sizes.raw_width;
const auto raw_height = RawProcessor->imgdata.rawdata.sizes.raw_height;
const auto top = RawProcessor->imgdata.rawdata.sizes.top_margin;
const auto left = RawProcessor->imgdata.rawdata.sizes.left_margin;
Halide::Runtime::Buffer<uint16_t> raw_buffer(image_data, raw_width,
raw_height);
buffer.copy_from(raw_buffer.translated({-left, -top}));
}
void RawImage::WriteDng(const std::string &output_path,
const Halide::Runtime::Buffer<uint16_t> &buffer) const {
LibRaw2DngConverter converter(*this);
converter.SetBuffer(buffer);
converter.Write(output_path);
}
std::array<float, 4> RawImage::GetBlackLevel() const {
// See https://www.libraw.org/node/2471
const auto raw_color = RawProcessor->imgdata.color;
const auto base_black_level = static_cast<float>(raw_color.black);
std::array<float, 4> black_level = {
base_black_level + static_cast<float>(raw_color.cblack[0]),
base_black_level + static_cast<float>(raw_color.cblack[1]),
base_black_level + static_cast<float>(raw_color.cblack[2]),
base_black_level + static_cast<float>(raw_color.cblack[3])};
if (raw_color.cblack[4] == 2 && raw_color.cblack[5] == 2) {
for (int x = 0; x < raw_color.cblack[4]; ++x) {
for (int y = 0; y < raw_color.cblack[5]; ++y) {
const auto index = y * 2 + x;
black_level[index] = raw_color.cblack[6 + index];
}
}
}
return black_level;
}
int RawImage::GetScalarBlackLevel() const {
const auto black_level = GetBlackLevel();
return static_cast<int>(
*std::min_element(black_level.begin(), black_level.end()));
}
std::string RawImage::GetCfaPatternString() const {
static const std::unordered_map<char, char> CDESC_TO_CFA = {
{'R', 0}, {'G', 1}, {'B', 2}, {'r', 0}, {'g', 1}, {'b', 2}};
const auto &cdesc = RawProcessor->imgdata.idata.cdesc;
return {CDESC_TO_CFA.at(cdesc[RawProcessor->COLOR(0, 0)]),
CDESC_TO_CFA.at(cdesc[RawProcessor->COLOR(0, 1)]),
CDESC_TO_CFA.at(cdesc[RawProcessor->COLOR(1, 0)]),
CDESC_TO_CFA.at(cdesc[RawProcessor->COLOR(1, 1)])};
}
CfaPattern RawImage::GetCfaPattern() const {
const auto cfa_pattern = GetCfaPatternString();
if (cfa_pattern == std::string{0, 1, 1, 2}) {
return CfaPattern::CFA_RGGB;
} else if (cfa_pattern == std::string{1, 0, 2, 1}) {
return CfaPattern::CFA_GRBG;
} else if (cfa_pattern == std::string{2, 1, 1, 0}) {
return CfaPattern::CFA_BGGR;
} else if (cfa_pattern == std::string{1, 2, 0, 1}) {
return CfaPattern::CFA_GBRG;
}
#if 0
throw std::invalid_argument("Unsupported CFA pattern: " + cfa_pattern);
#endif
return CfaPattern::CFA_UNKNOWN;
}
Halide::Runtime::Buffer<float> RawImage::GetColorCorrectionMatrix() const {
const auto raw_color = RawProcessor->imgdata.color;
Halide::Runtime::Buffer<float> ccm(3, 3);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
ccm(i, j) = raw_color.rgb_cam[j][i];
}
}
return ccm;
}

@ -1,47 +0,0 @@
#pragma once
#include <array>
#include <string>
#include <libraw/libraw.h>
#include "finish.h"
#include <HalideBuffer.h>
class RawImage {
public:
explicit RawImage(const std::string &path);
explicit RawImage(const uint8_t* data, size_t length);
~RawImage() = default;
int GetWidth() const { return RawProcessor->imgdata.rawdata.sizes.width; }
int GetHeight() const { return RawProcessor->imgdata.rawdata.sizes.height; }
int GetScalarBlackLevel() const;
std::array<float, 4> GetBlackLevel() const;
int GetWhiteLevel() const { return RawProcessor->imgdata.color.maximum; }
WhiteBalance GetWhiteBalance() const;
std::string GetCfaPatternString() const;
CfaPattern GetCfaPattern() const;
Halide::Runtime::Buffer<float> GetColorCorrectionMatrix() const;
void CopyToBuffer(Halide::Runtime::Buffer<uint16_t> &buffer) const;
// Writes current RawImage as DNG. If buffer was provided, then use it instead
// of internal buffer.
void WriteDng(const std::string &path,
const Halide::Runtime::Buffer<uint16_t> &buffer = {}) const;
std::shared_ptr<LibRaw> GetRawProcessor() const { return RawProcessor; }
private:
std::string Path;
std::shared_ptr<LibRaw> RawProcessor;
};

@ -1,95 +0,0 @@
#include "LibRaw2DngConverter.h"
#include <unordered_map>
#include <libraw/libraw.h>
#include "InputSource.h"
LibRaw2DngConverter::LibRaw2DngConverter(const RawImage &raw)
: OutputStream(), Raw(raw),
Tiff(SetTiffFields(
TiffPtr(TIFFStreamOpen("", &OutputStream), TIFFClose))) {}
LibRaw2DngConverter::TiffPtr
LibRaw2DngConverter::SetTiffFields(LibRaw2DngConverter::TiffPtr tiff_ptr) {
const auto RawProcessor = Raw.GetRawProcessor();
const auto raw_color = RawProcessor->imgdata.color;
const uint16_t bayer_pattern_dimensions[] = {2, 2};
const auto tiff = tiff_ptr.get();
TIFFSetField(tiff, TIFFTAG_DNGVERSION, "\01\04\00\00");
TIFFSetField(tiff, TIFFTAG_DNGBACKWARDVERSION, "\01\04\00\00");
TIFFSetField(tiff, TIFFTAG_SUBFILETYPE, 0);
TIFFSetField(tiff, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
TIFFSetField(tiff, TIFFTAG_BITSPERSAMPLE, 16);
TIFFSetField(tiff, TIFFTAG_ROWSPERSTRIP, 1);
TIFFSetField(tiff, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
TIFFSetField(tiff, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_CFA);
TIFFSetField(tiff, TIFFTAG_SAMPLESPERPIXEL, 1);
TIFFSetField(tiff, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField(tiff, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT);
TIFFSetField(tiff, TIFFTAG_CFAREPEATPATTERNDIM, &bayer_pattern_dimensions);
const std::string cfa = Raw.GetCfaPatternString();
TIFFSetField(tiff, TIFFTAG_CFAPATTERN, cfa.c_str());
TIFFSetField(tiff, TIFFTAG_MAKE, "hdr-plus");
TIFFSetField(tiff, TIFFTAG_UNIQUECAMERAMODEL, "hdr-plus");
const std::array<float, 9> color_matrix = {
raw_color.cam_xyz[0][0], raw_color.cam_xyz[0][1], raw_color.cam_xyz[0][2],
raw_color.cam_xyz[1][0], raw_color.cam_xyz[1][1], raw_color.cam_xyz[1][2],
raw_color.cam_xyz[2][0], raw_color.cam_xyz[2][1], raw_color.cam_xyz[2][2],
};
TIFFSetField(tiff, TIFFTAG_COLORMATRIX1, 9, &color_matrix);
TIFFSetField(tiff, TIFFTAG_CALIBRATIONILLUMINANT1, 21); // D65
const std::array<float, 3> as_shot_neutral = {
1.f / (raw_color.cam_mul[0] / raw_color.cam_mul[1]), 1.f,
1.f / (raw_color.cam_mul[2] / raw_color.cam_mul[1])};
TIFFSetField(tiff, TIFFTAG_ASSHOTNEUTRAL, 3, &as_shot_neutral);
TIFFSetField(tiff, TIFFTAG_CFALAYOUT, 1); // Rectangular (or square) layout
TIFFSetField(
tiff, TIFFTAG_CFAPLANECOLOR, 3,
"\00\01\02"); // RGB
// https://www.awaresystems.be/imaging/tiff/tifftags/cfaplanecolor.html
const std::array<float, 4> black_level = Raw.GetBlackLevel();
TIFFSetField(tiff, TIFFTAG_BLACKLEVEL, 4, &black_level);
static const uint32_t white_level = raw_color.maximum;
TIFFSetField(tiff, TIFFTAG_WHITELEVEL, 1, &white_level);
if (RawProcessor->imgdata.sizes.flip > 0) {
// Seems that LibRaw uses LibTIFF notation.
TIFFSetField(tiff, TIFFTAG_ORIENTATION, RawProcessor->imgdata.sizes.flip);
} else {
TIFFSetField(tiff, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
}
return tiff_ptr;
}
void LibRaw2DngConverter::SetBuffer(
const Halide::Runtime::Buffer<uint16_t> &buffer) const {
const auto width = buffer.width();
const auto height = buffer.height();
const auto tiff = Tiff.get();
TIFFSetField(tiff, TIFFTAG_IMAGEWIDTH, width);
TIFFSetField(tiff, TIFFTAG_IMAGELENGTH, height);
uint16_t *row_pointer = buffer.data();
for (int row = 0; row < height; row++) {
TIFFWriteScanline(tiff, row_pointer, row, 0);
row_pointer += width;
}
}
void LibRaw2DngConverter::Write(const std::string &path) const {
TIFFCheckpointDirectory(Tiff.get());
TIFFFlush(Tiff.get());
std::ofstream output(path, std::ofstream::binary);
output << OutputStream.str();
}

@ -1,26 +0,0 @@
#pragma once
#include <sstream>
#include <tiffio.h>
#include <tiffio.hxx>
#include <HalideBuffer.h>
class RawImage;
class LibRaw2DngConverter {
using TiffPtr = std::shared_ptr<TIFF>;
TiffPtr SetTiffFields(TiffPtr tiff_ptr);
public:
explicit LibRaw2DngConverter(const RawImage &raw);
void SetBuffer(const Halide::Runtime::Buffer<uint16_t> &buffer) const;
void Write(const std::string &path) const;
private:
std::ostringstream OutputStream;
const RawImage &Raw;
std::shared_ptr<TIFF> Tiff;
};

@ -1,36 +0,0 @@
#ifndef HDRPLUS_FINISH_H_
#define HDRPLUS_FINISH_H_
#include <hdrplus_pipeline.h>
template <class T = float> struct TypedWhiteBalance {
template <class TT>
explicit TypedWhiteBalance(const TypedWhiteBalance<TT> &other)
: r(other.r), g0(other.g0), g1(other.g1), b(other.b) {}
TypedWhiteBalance(T r, T g0, T g1, T b) : r(r), g0(g0), g1(g1), b(b) {}
T r;
T g0;
T g1;
T b;
};
using WhiteBalance = TypedWhiteBalance<float>;
typedef uint16_t BlackPoint;
typedef uint16_t WhitePoint;
typedef float Compression;
typedef float Gain;
enum class CfaPattern : int {
CFA_UNKNOWN = 0,
CFA_RGGB = 1,
CFA_GRBG = 2,
CFA_BGGR = 3,
CFA_GBRG = 4
};
#endif

@ -1,60 +0,0 @@
// Copyright 2014 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_library_shared {
name: "libimg_utils",
srcs: [
"src/EndianUtils.cpp",
"src/FileInput.cpp",
"src/FileOutput.cpp",
"src/SortedEntryVector.cpp",
"src/Input.cpp",
"src/Output.cpp",
"src/Orderable.cpp",
"src/TiffIfd.cpp",
"src/TiffWritable.cpp",
"src/TiffWriter.cpp",
"src/TiffEntry.cpp",
"src/TiffEntryImpl.cpp",
"src/ByteArrayOutput.cpp",
"src/DngUtils.cpp",
"src/StripSource.cpp",
],
shared_libs: [
"liblog",
"libutils",
"libcutils",
],
cflags: [
"-Wall",
"-Wextra",
"-Werror",
"-fvisibility=hidden",
],
product_variables: {
debuggable: {
// Enable assert() in eng builds
cflags: [
"-UNDEBUG",
"-DLOG_NDEBUG=1",
],
},
},
export_include_dirs: ["include"],
}

@ -1,83 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_BYTE_ARRAY_OUTPUT_H
#define IMG_UTILS_BYTE_ARRAY_OUTPUT_H
#include <img_utils/Output.h>
#include <utils/Errors.h>
// #include <utils/Vector.h>
#include <cutils/compiler.h>
#include <stdint.h>
#include <vector>
namespace android {
namespace img_utils {
/**
* Utility class that accumulates written bytes into a buffer.
*/
class ANDROID_API ByteArrayOutput : public Output {
public:
ByteArrayOutput();
virtual ~ByteArrayOutput();
/**
* Open this ByteArrayOutput.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Write bytes from the given buffer. The number of bytes given in the count
* argument will be written. Bytes will be written from the given buffer starting
* at the index given in the offset argument.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
/**
* Close this ByteArrayOutput.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
/**
* Get current size of the array of bytes written.
*/
virtual size_t getSize() const;
/**
* Get pointer to array of bytes written. It is not valid to use this pointer if
* open, write, or close is called after this method.
*/
virtual const uint8_t* getArray() const;
protected:
std::vector<uint8_t> mByteArray;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_BYTE_ARRAY_OUTPUT_H*/

@ -1,232 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_DNG_UTILS_H
#define IMG_UTILS_DNG_UTILS_H
#include <img_utils/ByteArrayOutput.h>
#include <img_utils/EndianUtils.h>
#include <utils/Errors.h>
#include <utils/Log.h>
#include <utils/RefBase.h>
#include <cutils/compiler.h>
#include <stdint.h>
namespace android {
namespace img_utils {
#define NELEMS(x) ((int) (sizeof(x) / sizeof((x)[0])))
#define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
/**
* Utility class for building values for the OpcodeList tags specified
* in the Adobe DNG 1.4 spec.
*/
class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
public:
// Note that the Adobe DNG 1.4 spec for Bayer phase (defined for the
// FixBadPixelsConstant and FixBadPixelsList opcodes) is incorrect. It's
// inconsistent with the DNG SDK (cf. dng_negative::SetBayerMosaic and
// dng_opcode_FixBadPixelsList::IsGreen), and Adobe confirms that the
// spec should be updated to match the SDK.
enum CfaLayout {
CFA_GRBG = 0,
CFA_RGGB,
CFA_BGGR,
CFA_GBRG,
CFA_NONE,
};
OpcodeListBuilder();
virtual ~OpcodeListBuilder();
/**
* Get the total size of this opcode list in bytes.
*/
virtual size_t getSize() const;
/**
* Get the number of opcodes defined in this list.
*/
virtual uint32_t getCount() const;
/**
* Write the opcode list into the given buffer. This buffer
* must be able to hold at least as many elements as returned
* by calling the getSize() method.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t buildOpList(/*out*/ uint8_t* buf) const;
/**
* Add GainMap opcode(s) for the given metadata parameters. The given
* CFA layout must match the layout of the shading map passed into the
* lensShadingMap parameter.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaTop,
uint32_t activeAreaLeft,
uint32_t activeAreaBottom,
uint32_t activeAreaRight,
CfaLayout cfa,
const float* lensShadingMap);
/**
* Add a GainMap opcode with the given fields. The mapGains array
* must have mapPointsV * mapPointsH * mapPlanes elements.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addGainMap(uint32_t top,
uint32_t left,
uint32_t bottom,
uint32_t right,
uint32_t plane,
uint32_t planes,
uint32_t rowPitch,
uint32_t colPitch,
uint32_t mapPointsV,
uint32_t mapPointsH,
double mapSpacingV,
double mapSpacingH,
double mapOriginV,
double mapOriginH,
uint32_t mapPlanes,
const float* mapGains);
/**
* Add WarpRectilinear opcode for the given metadata parameters.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addWarpRectilinearForMetadata(const float* kCoeffs,
uint32_t activeArrayWidth,
uint32_t activeArrayHeight,
float opticalCenterX,
float opticalCenterY);
/**
* Add a WarpRectilinear opcode.
*
* numPlanes - Number of planes included in this opcode.
* opticalCenterX, opticalCenterY - Normalized x,y coordinates of the sensor optical
* center relative to the top,left pixel of the produced images (e.g. [0.5, 0.5]
* gives a sensor optical center in the image center.
* kCoeffs - A list of coefficients for the polynomial equation representing the distortion
* correction. For each plane, 6 coefficients must be included:
* {k_r0, k_r1, k_r2, k_r3, k_t0, k_t1}. See the DNG 1.4 specification for an
* outline of the polynomial used here.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addWarpRectilinear(uint32_t numPlanes,
double opticalCenterX,
double opticalCenterY,
const double* kCoeffs);
/**
* Add FixBadPixelsList opcode for the given metadata parameters.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addBadPixelListForMetadata(const uint32_t* hotPixels,
uint32_t xyPairCount,
uint32_t colorFilterArrangement);
/**
* Add FixBadPixelsList opcode.
*
* bayerPhase - 0=top-left of image is red, 1=top-left of image is green pixel in red row,
* 2=top-left of image is green pixel in blue row, 3=top-left of image is
* blue.
* badPointCount - number of (x,y) pairs of bad pixels are given in badPointRowColPairs.
* badRectCount - number of (top, left, bottom, right) tuples are given in
* badRectTopLeftBottomRightTuples
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addBadPixelList(uint32_t bayerPhase,
uint32_t badPointCount,
uint32_t badRectCount,
const uint32_t* badPointRowColPairs,
const uint32_t* badRectTopLeftBottomRightTuples);
// TODO: Add other Opcode methods
protected:
static const uint32_t FLAG_OPTIONAL = 0x1u;
static const uint32_t FLAG_OPTIONAL_FOR_PREVIEW = 0x2u;
// Opcode IDs
enum {
WARP_RECTILINEAR_ID = 1,
FIX_BAD_PIXELS_LIST = 5,
GAIN_MAP_ID = 9,
};
// LSM mosaic indices
enum {
LSM_R_IND = 0,
LSM_GE_IND = 1,
LSM_GO_IND = 2,
LSM_B_IND = 3,
};
uint32_t mCount;
ByteArrayOutput mOpList;
EndianOutput mEndianOut;
status_t addOpcodePreamble(uint32_t opcodeId);
private:
/**
* Add Bayer GainMap opcode(s) for the given metadata parameters.
* CFA layout must match the layout of the shading map passed into the
* lensShadingMap parameter.
*
* Returns OK on success, or a negative error code.
*/
status_t addBayerGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
CfaLayout cfa,
const float* lensShadingMap);
/**
* Add Bayer GainMap opcode(s) for the given metadata parameters.
* CFA layout must match the layout of the shading map passed into the
* lensShadingMap parameter.
*
* Returns OK on success, or a negative error code.
*/
status_t addMonochromeGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
const float* lensShadingMap);
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_DNG_UTILS_H*/

@ -1,250 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_ENDIAN_UTILS
#define IMG_UTILS_ENDIAN_UTILS
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
#include <endian.h>
#include <assert.h>
namespace android {
namespace img_utils {
/**
* Endianness types supported.
*/
enum ANDROID_API Endianness {
UNDEFINED_ENDIAN, // Default endianness will be used.
BIG,
LITTLE
};
/**
* Convert from the native device endianness to big endian.
*/
template<typename T>
T convertToBigEndian(T in);
/**
* Convert from the native device endianness to little endian.
*/
template<typename T>
T convertToLittleEndian(T in);
/**
* A utility class for writing to an Output with the given endianness.
*/
class ANDROID_API EndianOutput : public Output {
public:
/**
* Wrap the given Output. Calling write methods will result in
* writes to this output.
*/
explicit EndianOutput(Output* out, Endianness end=LITTLE);
virtual ~EndianOutput();
/**
* Call open on the wrapped output.
*/
virtual status_t open();
/**
* Call close on the wrapped output.
*/
virtual status_t close();
/**
* Set the endianness to use when writing.
*/
virtual void setEndianness(Endianness end);
/**
* Get the currently configured endianness.
*/
virtual Endianness getEndianness() const;
/**
* Get the current number of bytes written by this EndianOutput.
*/
virtual uint32_t getCurrentOffset() const;
// TODO: switch write methods to uint32_t instead of size_t,
// the max size of a TIFF files is bounded
/**
* The following methods will write elements from given input buffer to the output.
* Count elements in the buffer will be written with the endianness set for this
* EndianOutput. If the given offset is greater than zero, that many elements will
* be skipped in the buffer before writing.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
virtual status_t write(const int8_t* buf, size_t offset, size_t count);
virtual status_t write(const uint16_t* buf, size_t offset, size_t count);
virtual status_t write(const int16_t* buf, size_t offset, size_t count);
virtual status_t write(const uint32_t* buf, size_t offset, size_t count);
virtual status_t write(const int32_t* buf, size_t offset, size_t count);
virtual status_t write(const uint64_t* buf, size_t offset, size_t count);
virtual status_t write(const int64_t* buf, size_t offset, size_t count);
virtual status_t write(const float* buf, size_t offset, size_t count);
virtual status_t write(const double* buf, size_t offset, size_t count);
protected:
template<typename T>
inline status_t writeHelper(const T* buf, size_t offset, size_t count);
uint32_t mOffset;
Output* mOutput;
Endianness mEndian;
};
template<typename T>
inline status_t EndianOutput::writeHelper(const T* buf, size_t offset, size_t count) {
assert(offset <= count);
status_t res = OK;
size_t size = sizeof(T);
switch(mEndian) {
case BIG: {
for (size_t i = offset; i < count; ++i) {
T tmp = convertToBigEndian<T>(buf[offset + i]);
if ((res = mOutput->write(reinterpret_cast<uint8_t*>(&tmp), 0, size))
!= OK) {
return res;
}
mOffset += size;
}
break;
}
case LITTLE: {
for (size_t i = offset; i < count; ++i) {
T tmp = convertToLittleEndian<T>(buf[offset + i]);
if ((res = mOutput->write(reinterpret_cast<uint8_t*>(&tmp), 0, size))
!= OK) {
return res;
}
mOffset += size;
}
break;
}
default: {
return BAD_VALUE;
}
}
return res;
}
template<>
inline uint8_t convertToBigEndian(uint8_t in) {
return in;
}
template<>
inline int8_t convertToBigEndian(int8_t in) {
return in;
}
template<>
inline uint16_t convertToBigEndian(uint16_t in) {
return htobe16(in);
}
template<>
inline int16_t convertToBigEndian(int16_t in) {
return htobe16(in);
}
template<>
inline uint32_t convertToBigEndian(uint32_t in) {
return htobe32(in);
}
template<>
inline int32_t convertToBigEndian(int32_t in) {
return htobe32(in);
}
template<>
inline uint64_t convertToBigEndian(uint64_t in) {
return htobe64(in);
}
template<>
inline int64_t convertToBigEndian(int64_t in) {
return htobe64(in);
}
template<>
inline uint8_t convertToLittleEndian(uint8_t in) {
return in;
}
template<>
inline int8_t convertToLittleEndian(int8_t in) {
return in;
}
template<>
inline uint16_t convertToLittleEndian(uint16_t in) {
return htole16(in);
}
template<>
inline int16_t convertToLittleEndian(int16_t in) {
return htole16(in);
}
template<>
inline uint32_t convertToLittleEndian(uint32_t in) {
return htole32(in);
}
template<>
inline int32_t convertToLittleEndian(int32_t in) {
return htole32(in);
}
template<>
inline uint64_t convertToLittleEndian(uint64_t in) {
return htole64(in);
}
template<>
inline int64_t convertToLittleEndian(int64_t in) {
return htole64(in);
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_ENDIAN_UTILS*/

@ -1,76 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_FILE_INPUT_H
#define IMG_UTILS_FILE_INPUT_H
#include <img_utils/Input.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/String8.h>
#include <stdio.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* Utility class for reading from a file.
*/
class ANDROID_API FileInput : public Input {
public:
/**
* Create a file input for the given path.
*/
explicit FileInput(String8 path);
virtual ~FileInput();
/**
* Open a file descriptor to the path given in the constructor.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Read bytes from the file into the given buffer. At most, the number
* of bytes given in the count argument will be read. Bytes will be written
* into the given buffer starting at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
virtual ssize_t read(uint8_t* buf, size_t offset, size_t count);
/**
* Close the file descriptor to the path given in the constructor.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
private:
FILE *mFp;
String8 mPath;
bool mOpen;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_INPUT_H*/

@ -1,46 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_FILE_OUTPUT_H
#define IMG_UTILS_FILE_OUTPUT_H
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/String8.h>
#include <stdio.h>
#include <stdint.h>
namespace android {
namespace img_utils {
class ANDROID_API FileOutput : public Output {
public:
explicit FileOutput(String8 path);
virtual ~FileOutput();
virtual status_t open();
virtual status_t write(const uint8_t* buf, size_t offset, size_t count);
virtual status_t close();
private:
FILE *mFp;
String8 mPath;
bool mOpen;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_FILE_OUTPUT_H*/

@ -1,71 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_INPUT_H
#define IMG_UTILS_INPUT_H
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* Utility class used as a source of bytes.
*/
class ANDROID_API Input {
public:
virtual ~Input();
/**
* Open this Input.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Read bytes into the given buffer. At most, the number of bytes given in the
* count argument will be read. Bytes will be written into the given buffer starting
* at the index given in the offset argument.
*
* Returns the number of bytes read, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
virtual ssize_t read(uint8_t* buf, size_t offset, size_t count) = 0;
/**
* Skips bytes in the input.
*
* Returns the number of bytes skipped, or NOT_ENOUGH_DATA if at the end of the file. If an
* error has occurred, this will return a negative error code other than NOT_ENOUGH_DATA.
*/
virtual ssize_t skip(size_t count);
/**
* Close the Input. It is not valid to call open on a previously closed Input.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_INPUT_H*/

@ -1,57 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_ORDERABLE
#define IMG_UTILS_ORDERABLE
#include <cutils/compiler.h>
#include <stdint.h>
namespace android {
namespace img_utils {
#define COMPARE_DEF(op) \
inline bool operator op (const Orderable& orderable) const;
/**
* Subclasses of Orderable can be compared and sorted. This is
* intended to be used to create sorted arrays of TIFF entries
* and IFDs.
*/
class ANDROID_API Orderable {
public:
virtual ~Orderable();
/**
* Comparison operatotors are based on the value returned
* from this method.
*/
virtual uint32_t getComparableValue() const = 0;
COMPARE_DEF(>)
COMPARE_DEF(<)
COMPARE_DEF(>=)
COMPARE_DEF(<=)
COMPARE_DEF(==)
COMPARE_DEF(!=)
};
#undef COMPARE_DEF
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_ORDERABLE*/

@ -1,61 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_OUTPUT_H
#define IMG_UTILS_OUTPUT_H
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* Utility class used to output bytes.
*/
class ANDROID_API Output {
public:
virtual ~Output();
/**
* Open this Output.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t open();
/**
* Write bytes from the given buffer. The number of bytes given in the count
* argument will be written. Bytes will be written from the given buffer starting
* at the index given in the offset argument.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t write(const uint8_t* buf, size_t offset, size_t count) = 0;
/**
* Close this Output. It is not valid to call open on a previously closed Output.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t close();
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_OUTPUT_H*/

@ -1,44 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_PAIR_H
#define IMG_UTILS_PAIR_H
#include <cutils/compiler.h>
namespace android {
namespace img_utils {
/**
* Generic pair utility class. Nothing special here.
*/
template<typename F, typename S>
class ANDROID_API Pair {
public:
F first;
S second;
Pair() {}
Pair(const Pair& o) : first(o.first), second(o.second) {}
Pair(const F& f, const S& s) : first(f), second(s) {}
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_PAIR_H*/

@ -1,53 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_SORTED_ENTRY_VECTOR_H
#define IMG_UTILS_SORTED_ENTRY_VECTOR_H
#include <img_utils/TiffEntry.h>
#include <utils/StrongPointer.h>
#include <utils/SortedVector.h>
namespace android {
namespace img_utils {
/**
* Subclass of SortedVector that has been extended to
* do comparisons/lookups based on the tag ID of the entries.
*/
class SortedEntryVector : public SortedVector<sp<TiffEntry> > {
public:
virtual ~SortedEntryVector();
/**
* Returns the index of the entry with the given tag ID, or
* -1 if none exists.
*/
ssize_t indexOfTag(uint16_t tag) const;
protected:
/**
* Compare tag ID.
*/
virtual int do_compare(const void* lhs, const void* rhs) const;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_SORTED_ENTRY_VECTOR_H*/

@ -1,53 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_STRIP_SOURCE_H
#define IMG_UTILS_STRIP_SOURCE_H
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* This class acts as a data source for strips set in a TiffIfd.
*/
class ANDROID_API StripSource {
public:
virtual ~StripSource();
/**
* Write count bytes to the stream.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t writeToStream(Output& stream, uint32_t count) = 0;
/**
* Return the source IFD.
*/
virtual uint32_t getIfd() const = 0;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_STRIP_SOURCE_H*/

@ -1,130 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_ENTRY
#define IMG_UTILS_TIFF_ENTRY
#include <img_utils/TiffWritable.h>
#include <img_utils/TiffHelpers.h>
#include <img_utils/EndianUtils.h>
#include <cutils/compiler.h>
// #include <utils/String8.h>
#include <utils/Errors.h>
#include <stdint.h>
namespace android {
namespace img_utils {
#define COMPARE_DEF(op) \
inline bool operator op (const TiffEntry& entry) const;
/**
* This class holds a single TIFF IFD entry.
*
* Subclasses are expected to support assignment and copying operations.
*/
class ANDROID_API TiffEntry : public TiffWritable {
public:
virtual ~TiffEntry();
/**
* Write the 12-byte IFD entry to the output. The given offset will be
* set as the tag value if the size of the tag value exceeds the max
* size for the TIFF Value field (4 bytes), and should be word aligned.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t writeTagInfo(uint32_t offset, /*out*/EndianOutput* out) const = 0;
/**
* Get the count set for this entry. This corresponds to the TIFF Count
* field.
*/
virtual uint32_t getCount() const = 0;
/**
* Get the tag id set for this entry. This corresponds to the TIFF Tag
* field.
*/
virtual uint16_t getTag() const = 0;
/**
* Get the type set for this entry. This corresponds to the TIFF Type
* field.
*/
virtual TagType getType() const = 0;
/**
* Get the defined endianness for this entry. If this is defined,
* the tag value will be written with the given byte order.
*/
virtual Endianness getEndianness() const = 0;
/**
* Get the value for this entry. This corresponds to the TIFF Value
* field.
*
* Returns NULL if the value is NULL, or if the type used does not
* match the type of this tag.
*/
template<typename T>
const T* getData() const;
virtual std::string toString() const;
/**
* Force the type used here to be a valid TIFF type.
*
* Returns NULL if the given value is NULL, or if the type given does
* not match the type of the value given.
*/
template<typename T>
static const T* forceValidType(TagType type, const T* value);
virtual const void* getDataHelper() const = 0;
COMPARE_DEF(>)
COMPARE_DEF(<)
protected:
enum {
MAX_PRINT_STRING_LENGTH = 256
};
};
#define COMPARE(op) \
bool TiffEntry::operator op (const TiffEntry& entry) const { \
return getComparableValue() op entry.getComparableValue(); \
}
COMPARE(>)
COMPARE(<)
template<typename T>
const T* TiffEntry::getData() const {
const T* value = reinterpret_cast<const T*>(getDataHelper());
return forceValidType<T>(getType(), value);
}
#undef COMPARE
#undef COMPARE_DEF
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_ENTRY*/

@ -1,219 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_ENTRY_IMPL
#define IMG_UTILS_TIFF_ENTRY_IMPL
#include <img_utils/TiffIfd.h>
#include <img_utils/TiffEntry.h>
#include <img_utils/TiffHelpers.h>
#include <img_utils/Output.h>
#include <img_utils/EndianUtils.h>
#include <utils/Log.h>
#include <utils/Errors.h>
// #include <utils/Vector.h>
#include <utils/StrongPointer.h>
#include <stdint.h>
#include <vector>
namespace android {
namespace img_utils {
template<typename T>
class TiffEntryImpl : public TiffEntry {
public:
TiffEntryImpl(uint16_t tag, TagType type, uint32_t count, Endianness end, const T* data);
virtual ~TiffEntryImpl();
status_t writeData(uint32_t offset, /*out*/EndianOutput* out) const;
status_t writeTagInfo(uint32_t offset, /*out*/EndianOutput* out) const;
uint32_t getCount() const;
uint16_t getTag() const;
TagType getType() const;
Endianness getEndianness() const;
size_t getSize() const;
uint32_t getComparableValue() const;
protected:
const void* getDataHelper() const;
uint32_t getActualSize() const;
uint16_t mTag;
uint16_t mType;
uint32_t mCount;
Endianness mEnd;
std::vector<T> mData;
};
template<typename T>
TiffEntryImpl<T>::TiffEntryImpl(uint16_t tag, TagType type, uint32_t count, Endianness end,
const T* data)
: mTag(tag), mType(static_cast<uint16_t>(type)), mCount(count), mEnd(end) {
count = (type == RATIONAL || type == SRATIONAL) ? count * 2 : count;
auto it = mData.insert(mData.end(), data, data + count);
// LOG_ALWAYS_FATAL_IF(index < 0, "%s: Could not allocate vector for data.", __FUNCTION__);
}
template<typename T>
TiffEntryImpl<T>::~TiffEntryImpl() {}
template<typename T>
uint32_t TiffEntryImpl<T>::getCount() const {
return mCount;
}
template<typename T>
uint16_t TiffEntryImpl<T>::getTag() const {
return mTag;
}
template<typename T>
TagType TiffEntryImpl<T>::getType() const {
return static_cast<TagType>(mType);
}
template<typename T>
const void* TiffEntryImpl<T>::getDataHelper() const {
return reinterpret_cast<const void*>(&mData[0]);
}
template<typename T>
size_t TiffEntryImpl<T>::getSize() const {
uint32_t total = getActualSize();
WORD_ALIGN(total)
return (total <= OFFSET_SIZE) ? 0 : total;
}
template<typename T>
uint32_t TiffEntryImpl<T>::getActualSize() const {
uint32_t total = sizeof(T) * mCount;
if (getType() == RATIONAL || getType() == SRATIONAL) {
// 2 ints stored for each rational, multiply by 2
total <<= 1;
}
return total;
}
template<typename T>
Endianness TiffEntryImpl<T>::getEndianness() const {
return mEnd;
}
template<typename T>
uint32_t TiffEntryImpl<T>::getComparableValue() const {
return mTag;
}
template<typename T>
status_t TiffEntryImpl<T>::writeTagInfo(uint32_t offset, /*out*/EndianOutput* out) const {
assert((offset % TIFF_WORD_SIZE) == 0);
status_t ret = OK;
BAIL_ON_FAIL(out->write(&mTag, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mType, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mCount, 0, 1), ret);
uint32_t dataSize = getActualSize();
if (dataSize > OFFSET_SIZE) {
BAIL_ON_FAIL(out->write(&offset, 0, 1), ret);
} else {
uint32_t count = mCount;
if (getType() == RATIONAL || getType() == SRATIONAL) {
/**
* Rationals are stored as an array of ints. Each
* rational is represented by 2 ints. To recover the
* size of the array here, multiply the count by 2.
*/
count <<= 1;
}
BAIL_ON_FAIL(out->write(&mData[0], 0, count), ret);
ZERO_TILL_WORD(out, dataSize, ret);
}
return ret;
}
template<typename T>
status_t TiffEntryImpl<T>::writeData(uint32_t /*offset*/, EndianOutput* out) const {
status_t ret = OK;
// Some tags have fixed-endian value output
Endianness tmp = UNDEFINED_ENDIAN;
if (mEnd != UNDEFINED_ENDIAN) {
tmp = out->getEndianness();
out->setEndianness(mEnd);
}
uint32_t count = mCount;
if (getType() == RATIONAL || getType() == SRATIONAL) {
/**
* Rationals are stored as an array of ints. Each
* rational is represented by 2 ints. To recover the
* size of the array here, multiply the count by 2.
*/
count <<= 1;
}
BAIL_ON_FAIL(out->write(&mData[0], 0, count), ret);
if (mEnd != UNDEFINED_ENDIAN) {
out->setEndianness(tmp);
}
// Write to next word alignment
ZERO_TILL_WORD(out, sizeof(T) * count, ret);
return ret;
}
template<>
inline status_t TiffEntryImpl<sp<TiffIfd> >::writeTagInfo(uint32_t offset,
/*out*/EndianOutput* out) const {
assert((offset % TIFF_WORD_SIZE) == 0);
status_t ret = OK;
BAIL_ON_FAIL(out->write(&mTag, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mType, 0, 1), ret);
BAIL_ON_FAIL(out->write(&mCount, 0, 1), ret);
BAIL_ON_FAIL(out->write(&offset, 0, 1), ret);
return ret;
}
template<>
inline uint32_t TiffEntryImpl<sp<TiffIfd> >::getActualSize() const {
uint32_t total = 0;
for (size_t i = 0; i < mData.size(); ++i) {
total += mData[i]->getSize();
}
return total;
}
template<>
inline status_t TiffEntryImpl<sp<TiffIfd> >::writeData(uint32_t offset, EndianOutput* out) const {
status_t ret = OK;
for (uint32_t i = 0; i < mCount; ++i) {
BAIL_ON_FAIL(mData[i]->writeData(offset, out), ret);
offset += mData[i]->getSize();
}
return ret;
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_ENTRY_IMPL*/

@ -1,132 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_HELPERS_H
#define IMG_UTILS_TIFF_HELPERS_H
#include <stdint.h>
namespace android {
namespace img_utils {
const uint8_t ZERO_WORD[] = {0, 0, 0, 0};
#define BAIL_ON_FAIL(x, flag) \
if (((flag) = (x)) != OK) return flag;
#define BYTES_TILL_WORD(index) \
((TIFF_WORD_SIZE - ((index) % TIFF_WORD_SIZE)) % TIFF_WORD_SIZE)
#define WORD_ALIGN(count) \
count += BYTES_TILL_WORD(count);
#define ZERO_TILL_WORD(output, index, ret) \
{ \
size_t remaining = BYTES_TILL_WORD(index); \
if (remaining > 0) { \
BAIL_ON_FAIL((output)->write(ZERO_WORD, 0, remaining), ret); \
} \
}
/**
* Basic TIFF header constants.
*/
enum {
BAD_OFFSET = 0,
TIFF_WORD_SIZE = 4, // Size in bytes
IFD_HEADER_SIZE = 2, // Size in bytes
IFD_FOOTER_SIZE = 4, // Size in bytes
TIFF_ENTRY_SIZE = 12, // Size in bytes
MAX_IFD_ENTRIES = UINT16_MAX,
FILE_HEADER_SIZE = 8, // Size in bytes
ENDIAN_MARKER_SIZE = 2, // Size in bytes
TIFF_MARKER_SIZE = 2, // Size in bytes
OFFSET_MARKER_SIZE = 4, // Size in bytes
TIFF_FILE_MARKER = 42,
BIG_ENDIAN_MARKER = 0x4D4Du,
LITTLE_ENDIAN_MARKER = 0x4949u
};
/**
* Constants for the TIFF tag types.
*/
enum TagType {
UNKNOWN_TAGTYPE = 0,
BYTE=1,
ASCII,
SHORT,
LONG,
RATIONAL,
SBYTE,
UNDEFINED,
SSHORT,
SLONG,
SRATIONAL,
FLOAT,
DOUBLE
};
/**
* Sizes of the TIFF entry fields (in bytes).
*/
enum {
TAG_SIZE = 2,
TYPE_SIZE = 2,
COUNT_SIZE = 4,
OFFSET_SIZE = 4
};
/**
* Convenience IFD id constants.
*/
enum {
IFD_0 = 0,
RAW_IFD,
PROFILE_IFD,
PREVIEW_IFD
};
inline size_t getTypeSize(TagType type) {
switch(type) {
case UNDEFINED:
case ASCII:
case BYTE:
case SBYTE:
return 1;
case SHORT:
case SSHORT:
return 2;
case LONG:
case SLONG:
case FLOAT:
return 4;
case RATIONAL:
case SRATIONAL:
case DOUBLE:
return 8;
default:
return 0;
}
}
inline uint32_t calculateIfdSize(size_t numberOfEntries) {
return IFD_HEADER_SIZE + IFD_FOOTER_SIZE + TIFF_ENTRY_SIZE * numberOfEntries;
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_HELPERS_H*/

@ -1,164 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_IFD_H
#define IMG_UTILS_TIFF_IFD_H
#include <img_utils/TiffWritable.h>
#include <img_utils/TiffEntry.h>
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/StrongPointer.h>
#include <stdint.h>
#include <map>
namespace android {
namespace img_utils {
/**
* This class holds a single TIFF Image File Directory (IFD) structure.
*
* This maps to the TIFF IFD structure that is logically composed of:
* - A 2-byte field listing the number of entries.
* - A list of 12-byte TIFF entries.
* - A 4-byte offset to the next IFD.
*/
class ANDROID_API TiffIfd : public TiffWritable {
public:
explicit TiffIfd(uint32_t ifdId);
virtual ~TiffIfd();
/**
* Add a TiffEntry to this IFD or replace an existing entry with the
* same tag ID. No validation is done.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t addEntry(const sp<TiffEntry>& entry);
/**
* Set the pointer to the next IFD. This is used to create a linked
* list of IFDs as defined by the TIFF 6.0 spec., and is not included
* when calculating the size of IFD and entries for the getSize()
* method (unlike SubIFDs).
*/
virtual void setNextIfd(const sp<TiffIfd>& ifd);
/**
* Get the pointer to the next IFD, or NULL if none exists.
*/
virtual sp<TiffIfd> getNextIfd() const;
/**
* Write the IFD data. This includes the IFD header, entries, footer,
* and the corresponding values for each entry (recursively including
* sub-IFDs). The written amount should end on a word boundary, and
* the given offset should be word aligned.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t writeData(uint32_t offset, /*out*/EndianOutput* out) const;
/**
* Get the size of the IFD. This includes the IFD header, entries, footer,
* and the corresponding values for each entry (recursively including
* any sub-IFDs).
*/
virtual size_t getSize() const;
/**
* Get the id of this IFD.
*/
virtual uint32_t getId() const;
/**
* Get an entry with the given tag ID.
*
* Returns a strong pointer to the entry if it exists, or an empty strong
* pointer.
*/
virtual sp<TiffEntry> getEntry(uint16_t tag) const;
/**
* Remove the entry with the given tag ID if it exists.
*/
virtual void removeEntry(uint16_t tag);
/**
* Convenience method to validate and set strip-related image tags.
*
* This sets all strip related tags, but leaves offset values unitialized.
* setStripOffsets must be called with the desired offset before writing.
* The strip tag values are calculated from the existing tags for image
* dimensions and pixel type set in the IFD.
*
* Does not handle planar image configurations (PlanarConfiguration != 1).
*
* Returns OK on success, or a negative error code.
*/
virtual status_t validateAndSetStripTags();
/**
* Returns true if validateAndSetStripTags has been called, but not setStripOffsets.
*/
virtual bool uninitializedOffsets() const;
/**
* Convenience method to set beginning offset for strips.
*
* Call this to update the strip offsets before calling writeData.
*
* Returns OK on success, or a negative error code.
*/
virtual status_t setStripOffset(uint32_t offset);
/**
* Get the total size of the strips in bytes.
*
* This sums the byte count at each strip offset, and returns
* the total count of bytes stored in strips for this IFD.
*/
virtual uint32_t getStripSize() const;
/**
* Get a formatted string representing this IFD.
*/
virtual std::string toString() const;
/**
* Print a formatted string representing this IFD to logcat.
*/
void log() const;
/**
* Get value used to determine sort order.
*/
virtual uint32_t getComparableValue() const;
protected:
virtual uint32_t checkAndGetOffset(uint32_t offset) const;
std::map<uint16_t, sp<TiffEntry> > mEntries;
sp<TiffIfd> mNextIfd;
uint32_t mIfdId;
bool mStripOffsetsInitialized;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_IFD_H*/

@ -1,60 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_WRITABLE
#define IMG_UTILS_TIFF_WRITABLE
#include <img_utils/Orderable.h>
#include <img_utils/EndianUtils.h>
#include <img_utils/Output.h>
#include <cutils/compiler.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <stdint.h>
namespace android {
namespace img_utils {
/**
* TiffWritable subclasses represent TIFF metadata objects that can be written
* to an EndianOutput object. This is used for TIFF entries and IFDs.
*/
class ANDROID_API TiffWritable : public Orderable, public LightRefBase<TiffWritable> {
public:
TiffWritable();
virtual ~TiffWritable();
/**
* Write the data to the output. The given offset is used to calculate
* the header offset for values written. The offset is defined
* relative to the beginning of the TIFF header, and is word aligned.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t writeData(uint32_t offset, /*out*/EndianOutput* out) const = 0;
/**
* Get the size of the data to write.
*/
virtual size_t getSize() const = 0;
};
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_WRITABLE*/

@ -1,328 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IMG_UTILS_TIFF_WRITER_H
#define IMG_UTILS_TIFF_WRITER_H
#include <img_utils/EndianUtils.h>
#include <img_utils/StripSource.h>
#include <img_utils/TiffEntryImpl.h>
#include <img_utils/TagDefinitions.h>
#include <img_utils/TiffIfd.h>
#include <utils/Log.h>
#include <utils/Errors.h>
#include <utils/StrongPointer.h>
#include <cutils/compiler.h>
#include <stdint.h>
#include <vector>
#include <map>
namespace android {
namespace img_utils {
class TiffEntry;
class TiffIfd;
class Output;
/**
* This class holds a collection of TIFF IFDs that can be written as a
* complete DNG file header.
*
* This maps to the TIFF header structure that is logically composed of:
* - An 8-byte file header containing an endianness indicator, the TIFF
* file marker, and the offset to the first IFD.
* - A list of TIFF IFD structures.
*/
class ANDROID_API TiffWriter : public LightRefBase<TiffWriter> {
public:
enum SubIfdType {
SUBIFD = 0,
GPSINFO
};
/**
* Constructs a TiffWriter with the default tag mappings. This enables
* all of the tags defined in TagDefinitions.h, and uses the following
* mapping precedence to resolve collisions:
* (highest precedence) TIFF/EP > DNG > EXIF 2.3 > TIFF 6.0
*/
TiffWriter();
/**
* Constructs a TiffWriter with the given tag mappings. The mapping
* precedence will be in the order that the definition maps are given,
* where the lower index map gets precedence.
*
* This can be used with user-defined definitions, or definitions form
* TagDefinitions.h
*
* The enabledDefinitions mapping object is owned by the caller, and must
* stay alive for the lifespan of the constructed TiffWriter object.
*/
TiffWriter(std::map<uint16_t, const TagDefinition_t*>* enabledDefinitions,
size_t length);
virtual ~TiffWriter();
/**
* Write a TIFF header containing each IFD set. This will recursively
* write all SubIFDs and tags.
*
* Any StripSources passed in will be written to the output as image strips
* at the appropriate offests. The StripByteCounts, RowsPerStrip, and
* StripOffsets tags must be set to use this. To set these tags in a
* given IFD, use the addStrip method.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t write(Output* out, StripSource** sources, size_t sourcesCount,
Endianness end = LITTLE);
/**
* Write a TIFF header containing each IFD set. This will recursively
* write all SubIFDs and tags.
*
* Image data for strips or tiles must be written separately at the
* appropriate offsets. These offsets must not fall within the file
* header written this way. The size of the header written is given
* by the getTotalSize() method.
*
* Returns OK on success, or a negative error code on failure.
*/
virtual status_t write(Output* out, Endianness end = LITTLE);
/**
* Get the total size in bytes of the TIFF header. This includes all
* IFDs, tags, and values set for this TiffWriter.
*/
virtual uint32_t getTotalSize() const;
/**
* Add an entry to the IFD with the given ID.
*
* Returns OK on success, or a negative error code on failure. Valid
* error codes for this method are:
* - BAD_INDEX - The given tag doesn't exist.
* - BAD_VALUE - The given count doesn't match the required count for
* this tag.
* - BAD_TYPE - The type of the given data isn't compatible with the
* type required for this tag.
* - NAME_NOT_FOUND - No ifd exists with the given ID.
*/
virtual status_t addEntry(const sp<TiffEntry>& entry, uint32_t ifd);
/**
* Build an entry for a known tag and add it to the IFD with the given ID.
* This tag must be defined in one of the definition vectors this TIFF writer
* was constructed with. The count and type are validated.
*
* Returns OK on success, or a negative error code on failure. Valid
* error codes for this method are:
* - BAD_INDEX - The given tag doesn't exist.
* - BAD_VALUE - The given count doesn't match the required count for
* this tag.
* - BAD_TYPE - The type of the given data isn't compatible with the
* type required for this tag.
* - NAME_NOT_FOUND - No ifd exists with the given ID.
*/
template<typename T>
status_t addEntry(uint16_t tag, uint32_t count, const T* data, uint32_t ifd);
/**
* Build an entry for a known tag. This tag must be one of the tags
* defined in one of the definition vectors this TIFF writer was constructed
* with. The count and type are validated. If this succeeds, the resulting
* entry will be placed in the outEntry pointer.
*
* Returns OK on success, or a negative error code on failure. Valid
* error codes for this method are:
* - BAD_INDEX - The given tag doesn't exist.
* - BAD_VALUE - The given count doesn't match the required count for
* this tag.
* - BAD_TYPE - The type of the given data isn't compatible with the
* type required for this tag.
*/
template<typename T>
status_t buildEntry(uint16_t tag, uint32_t count, const T* data,
/*out*/sp<TiffEntry>* outEntry) const;
/**
* Convenience function to set the strip related tags for a given IFD.
*
* Call this before using a StripSource as an input to write.
* The following tags must be set before calling this method:
* - ImageWidth
* - ImageLength
* - SamplesPerPixel
* - BitsPerSample
*
* Returns OK on success, or a negative error code.
*/
virtual status_t addStrip(uint32_t ifd);
/**
* Return the TIFF entry with the given tag ID in the IFD with the given ID,
* or an empty pointer if none exists.
*/
virtual sp<TiffEntry> getEntry(uint16_t tag, uint32_t ifd) const;
/**
* Remove the TIFF entry with the given tag ID in the given IFD if it exists.
*/
virtual void removeEntry(uint16_t tag, uint32_t ifd);
/**
* Create an empty IFD with the given ID and add it to the end of the
* list of IFDs.
*/
virtual status_t addIfd(uint32_t ifd);
/**
* Create an empty IFD with the given ID and add it as a SubIfd of the
* parent IFD.
*/
virtual status_t addSubIfd(uint32_t parentIfd, uint32_t ifd, SubIfdType type = SUBIFD);
/**
* Returns the default type for the given tag ID.
*/
virtual TagType getDefaultType(uint16_t tag) const;
/**
* Returns the default count for a given tag ID, or 0 if this
* tag normally has a variable count.
*/
virtual uint32_t getDefaultCount(uint16_t tag) const;
/**
* Returns true if an IFD with the given ID exists.
*/
virtual bool hasIfd(uint32_t ifd) const;
/**
* Returns true if a definition exist for the given tag ID.
*/
virtual bool checkIfDefined(uint16_t tag) const;
/**
* Returns the name of the tag if a definition exists for the given tag
* ID, or null if no definition exists.
*/
virtual const char* getTagName(uint16_t tag) const;
/**
* Print the currently configured IFDs and entries to logcat.
*/
virtual void log() const;
/**
* Build an entry. No validation is done.
*
* WARNING: Using this method can result in creating poorly formatted
* TIFF files.
*
* Returns a TiffEntry with the given tag, type, count, endianness,
* and data.
*/
template<typename T>
static sp<TiffEntry> uncheckedBuildEntry(uint16_t tag, TagType type,
uint32_t count, Endianness end, const T* data);
/**
* Utility function to build atag-to-definition mapping from a given
* array of tag definitions.
*/
#if 0
static KeyedVector<uint16_t, const TagDefinition_t*> buildTagMap(
const TagDefinition_t* definitions, size_t length);
#endif
protected:
enum {
DEFAULT_NUM_TAG_MAPS = 4,
};
sp<TiffIfd> findLastIfd();
status_t writeFileHeader(EndianOutput& out);
const TagDefinition_t* lookupDefinition(uint16_t tag) const;
status_t calculateOffsets();
sp<TiffIfd> mIfd;
std::map<uint32_t, sp<TiffIfd> > mNamedIfds;
std::vector<std::map<uint16_t, const TagDefinition_t*> > mTagMaps;
size_t mNumTagMaps;
#if 0
static KeyedVector<uint16_t, const TagDefinition_t*> sTagMaps[];
#endif
};
template<typename T>
status_t TiffWriter::buildEntry(uint16_t tag, uint32_t count, const T* data,
/*out*/sp<TiffEntry>* outEntry) const {
const TagDefinition_t* definition = lookupDefinition(tag);
if (definition == NULL) {
ALOGE("%s: No such tag exists for id %x.", __FUNCTION__, tag);
return BAD_INDEX;
}
uint32_t fixedCount = definition->fixedCount;
if (fixedCount > 0 && fixedCount != count) {
ALOGE("%s: Invalid count %d for tag %x (expects %d).", __FUNCTION__, count, tag,
fixedCount);
return BAD_VALUE;
}
TagType fixedType = definition->defaultType;
if (TiffEntry::forceValidType(fixedType, data) == NULL) {
ALOGE("%s: Invalid type used for tag value for tag %x.", __FUNCTION__, tag);
return BAD_TYPE;
}
*outEntry = new TiffEntryImpl<T>(tag, fixedType, count,
definition->fixedEndian, data);
return OK;
}
template<typename T>
status_t TiffWriter::addEntry(uint16_t tag, uint32_t count, const T* data, uint32_t ifd) {
sp<TiffEntry> outEntry;
status_t ret = buildEntry<T>(tag, count, data, &outEntry);
if (ret != OK) {
ALOGE("%s: Could not build entry for tag %x.", __FUNCTION__, tag);
return ret;
}
return addEntry(outEntry, ifd);
}
template<typename T>
sp<TiffEntry> TiffWriter::uncheckedBuildEntry(uint16_t tag, TagType type, uint32_t count,
Endianness end, const T* data) {
TiffEntryImpl<T>* entry = new TiffEntryImpl<T>(tag, type, count, end, data);
return sp<TiffEntry>(entry);
}
} /*namespace img_utils*/
} /*namespace android*/
#endif /*IMG_UTILS_TIFF_WRITER_H*/

@ -1,54 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/ByteArrayOutput.h>
#include <utils/Log.h>
namespace android {
namespace img_utils {
ByteArrayOutput::ByteArrayOutput() {}
ByteArrayOutput::~ByteArrayOutput() {}
status_t ByteArrayOutput::open() {
return OK;
}
status_t ByteArrayOutput::write(const uint8_t* buf, size_t offset, size_t count) {
if (mByteArray.insert(mByteArray.end(), buf + offset, buf + offset + count) == mByteArray.end()) {
ALOGE("%s: Failed to write to ByteArrayOutput.", __FUNCTION__);
return BAD_VALUE;
}
return OK;
}
status_t ByteArrayOutput::close() {
mByteArray.clear();
return OK;
}
size_t ByteArrayOutput::getSize() const {
return mByteArray.size();
}
const uint8_t* ByteArrayOutput::getArray() const {
return &mByteArray[0];
}
} /*namespace img_utils*/
} /*namespace android*/

@ -1,496 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/DngUtils.h>
#include <inttypes.h>
#include <algorithm>
#include <vector>
#include <math.h>
namespace android {
namespace img_utils {
OpcodeListBuilder::OpcodeListBuilder() : mCount(0), mOpList(), mEndianOut(&mOpList, BIG) {
if(mEndianOut.open() != OK) {
ALOGE("%s: Open failed.", __FUNCTION__);
}
}
OpcodeListBuilder::~OpcodeListBuilder() {
if(mEndianOut.close() != OK) {
ALOGE("%s: Close failed.", __FUNCTION__);
}
}
size_t OpcodeListBuilder::getSize() const {
return mOpList.getSize() + sizeof(mCount);
}
uint32_t OpcodeListBuilder::getCount() const {
return mCount;
}
status_t OpcodeListBuilder::buildOpList(uint8_t* buf) const {
uint32_t count = convertToBigEndian(mCount);
memcpy(buf, &count, sizeof(count));
memcpy(buf + sizeof(count), mOpList.getArray(), mOpList.getSize());
return OK;
}
status_t OpcodeListBuilder::addGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaTop,
uint32_t activeAreaLeft,
uint32_t activeAreaBottom,
uint32_t activeAreaRight,
CfaLayout cfa,
const float* lensShadingMap) {
status_t err = OK;
uint32_t activeAreaWidth = activeAreaRight - activeAreaLeft;
uint32_t activeAreaHeight = activeAreaBottom - activeAreaTop;
switch (cfa) {
case CFA_RGGB:
case CFA_GRBG:
case CFA_GBRG:
case CFA_BGGR:
err = addBayerGainMapsForMetadata(lsmWidth, lsmHeight, activeAreaWidth,
activeAreaHeight, cfa, lensShadingMap);
break;
case CFA_NONE:
err = addMonochromeGainMapsForMetadata(lsmWidth, lsmHeight, activeAreaWidth,
activeAreaHeight, lensShadingMap);
break;
default:
ALOGE("%s: Unknown CFA layout %d", __FUNCTION__, cfa);
err = BAD_VALUE;
break;
}
return err;
}
status_t OpcodeListBuilder::addBayerGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
CfaLayout cfa,
const float* lensShadingMap) {
uint32_t redTop = 0;
uint32_t redLeft = 0;
uint32_t greenEvenTop = 0;
uint32_t greenEvenLeft = 1;
uint32_t greenOddTop = 1;
uint32_t greenOddLeft = 0;
uint32_t blueTop = 1;
uint32_t blueLeft = 1;
switch(cfa) {
case CFA_RGGB:
redTop = 0;
redLeft = 0;
greenEvenTop = 0;
greenEvenLeft = 1;
greenOddTop = 1;
greenOddLeft = 0;
blueTop = 1;
blueLeft = 1;
break;
case CFA_GRBG:
redTop = 0;
redLeft = 1;
greenEvenTop = 0;
greenEvenLeft = 0;
greenOddTop = 1;
greenOddLeft = 1;
blueTop = 1;
blueLeft = 0;
break;
case CFA_GBRG:
redTop = 1;
redLeft = 0;
greenEvenTop = 0;
greenEvenLeft = 0;
greenOddTop = 1;
greenOddLeft = 1;
blueTop = 0;
blueLeft = 1;
break;
case CFA_BGGR:
redTop = 1;
redLeft = 1;
greenEvenTop = 0;
greenEvenLeft = 1;
greenOddTop = 1;
greenOddLeft = 0;
blueTop = 0;
blueLeft = 0;
break;
default:
ALOGE("%s: Unknown CFA layout %d", __FUNCTION__, cfa);
return BAD_VALUE;
}
std::vector<float> redMapVector(lsmWidth * lsmHeight);
float *redMap = redMapVector.data();
std::vector<float> greenEvenMapVector(lsmWidth * lsmHeight);
float *greenEvenMap = greenEvenMapVector.data();
std::vector<float> greenOddMapVector(lsmWidth * lsmHeight);
float *greenOddMap = greenOddMapVector.data();
std::vector<float> blueMapVector(lsmWidth * lsmHeight);
float *blueMap = blueMapVector.data();
double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
size_t lsmMapSize = lsmWidth * lsmHeight * 4;
// Split lens shading map channels into separate arrays
size_t j = 0;
for (size_t i = 0; i < lsmMapSize; i += 4, ++j) {
redMap[j] = lensShadingMap[i + LSM_R_IND];
greenEvenMap[j] = lensShadingMap[i + LSM_GE_IND];
greenOddMap[j] = lensShadingMap[i + LSM_GO_IND];
blueMap[j] = lensShadingMap[i + LSM_B_IND];
}
status_t err = addGainMap(/*top*/redTop,
/*left*/redLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/redMap);
if (err != OK) return err;
err = addGainMap(/*top*/greenEvenTop,
/*left*/greenEvenLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/greenEvenMap);
if (err != OK) return err;
err = addGainMap(/*top*/greenOddTop,
/*left*/greenOddLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/greenOddMap);
if (err != OK) return err;
err = addGainMap(/*top*/blueTop,
/*left*/blueLeft,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/2,
/*colPitch*/2,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/blueMap);
return err;
}
status_t OpcodeListBuilder::addMonochromeGainMapsForMetadata(uint32_t lsmWidth,
uint32_t lsmHeight,
uint32_t activeAreaWidth,
uint32_t activeAreaHeight,
const float* lensShadingMap) {
std::vector<float> mapVector(lsmWidth * lsmHeight);
float *map = mapVector.data();
double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
size_t lsmMapSize = lsmWidth * lsmHeight * 4;
// Split lens shading map channels into separate arrays
size_t j = 0;
for (size_t i = 0; i < lsmMapSize; i += 4, ++j) {
map[j] = lensShadingMap[i];
}
status_t err = addGainMap(/*top*/0,
/*left*/0,
/*bottom*/activeAreaHeight,
/*right*/activeAreaWidth,
/*plane*/0,
/*planes*/1,
/*rowPitch*/1,
/*colPitch*/1,
/*mapPointsV*/lsmHeight,
/*mapPointsH*/lsmWidth,
/*mapSpacingV*/spacingV,
/*mapSpacingH*/spacingH,
/*mapOriginV*/0,
/*mapOriginH*/0,
/*mapPlanes*/1,
/*mapGains*/map);
if (err != OK) return err;
return err;
}
status_t OpcodeListBuilder::addGainMap(uint32_t top,
uint32_t left,
uint32_t bottom,
uint32_t right,
uint32_t plane,
uint32_t planes,
uint32_t rowPitch,
uint32_t colPitch,
uint32_t mapPointsV,
uint32_t mapPointsH,
double mapSpacingV,
double mapSpacingH,
double mapOriginV,
double mapOriginH,
uint32_t mapPlanes,
const float* mapGains) {
status_t err = addOpcodePreamble(GAIN_MAP_ID);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
uint32_t flags = FLAG_OPTIONAL;
err = mEndianOut.write(&flags, 0, 1);
if (err != OK) return err;
const uint32_t NUMBER_INT_ARGS = 11;
const uint32_t NUMBER_DOUBLE_ARGS = 4;
uint32_t totalSize = NUMBER_INT_ARGS * sizeof(uint32_t) + NUMBER_DOUBLE_ARGS * sizeof(double) +
mapPointsV * mapPointsH * mapPlanes * sizeof(float);
err = mEndianOut.write(&totalSize, 0, 1);
if (err != OK) return err;
// Batch writes as much as possible
uint32_t settings1[] = { top,
left,
bottom,
right,
plane,
planes,
rowPitch,
colPitch,
mapPointsV,
mapPointsH };
err = mEndianOut.write(settings1, 0, NELEMS(settings1));
if (err != OK) return err;
double settings2[] = { mapSpacingV,
mapSpacingH,
mapOriginV,
mapOriginH };
err = mEndianOut.write(settings2, 0, NELEMS(settings2));
if (err != OK) return err;
err = mEndianOut.write(&mapPlanes, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(mapGains, 0, mapPointsV * mapPointsH * mapPlanes);
if (err != OK) return err;
mCount++;
return OK;
}
status_t OpcodeListBuilder::addWarpRectilinearForMetadata(const float* kCoeffs,
uint32_t activeArrayWidth,
uint32_t activeArrayHeight,
float opticalCenterX,
float opticalCenterY) {
if (activeArrayWidth <= 1 || activeArrayHeight <= 1) {
ALOGE("%s: Cannot add opcode for active array with dimensions w=%" PRIu32 ", h=%" PRIu32,
__FUNCTION__, activeArrayWidth, activeArrayHeight);
return BAD_VALUE;
}
double normalizedOCX = opticalCenterX / static_cast<double>(activeArrayWidth);
double normalizedOCY = opticalCenterY / static_cast<double>(activeArrayHeight);
normalizedOCX = CLAMP(normalizedOCX, 0, 1);
normalizedOCY = CLAMP(normalizedOCY, 0, 1);
double coeffs[6] = {
kCoeffs[0],
kCoeffs[1],
kCoeffs[2],
kCoeffs[3],
kCoeffs[4],
kCoeffs[5]
};
return addWarpRectilinear(/*numPlanes*/1,
/*opticalCenterX*/normalizedOCX,
/*opticalCenterY*/normalizedOCY,
coeffs);
}
status_t OpcodeListBuilder::addWarpRectilinear(uint32_t numPlanes,
double opticalCenterX,
double opticalCenterY,
const double* kCoeffs) {
status_t err = addOpcodePreamble(WARP_RECTILINEAR_ID);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
uint32_t flags = FLAG_OPTIONAL;
err = mEndianOut.write(&flags, 0, 1);
if (err != OK) return err;
const uint32_t NUMBER_CENTER_ARGS = 2;
const uint32_t NUMBER_COEFFS = numPlanes * 6;
uint32_t totalSize = (NUMBER_CENTER_ARGS + NUMBER_COEFFS) * sizeof(double) + sizeof(uint32_t);
err = mEndianOut.write(&totalSize, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&numPlanes, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(kCoeffs, 0, NUMBER_COEFFS);
if (err != OK) return err;
err = mEndianOut.write(&opticalCenterX, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&opticalCenterY, 0, 1);
if (err != OK) return err;
mCount++;
return OK;
}
status_t OpcodeListBuilder::addBadPixelListForMetadata(const uint32_t* hotPixels,
uint32_t xyPairCount,
uint32_t colorFilterArrangement) {
if (colorFilterArrangement > 3) {
ALOGE("%s: Unknown color filter arrangement %" PRIu32, __FUNCTION__,
colorFilterArrangement);
return BAD_VALUE;
}
return addBadPixelList(colorFilterArrangement, xyPairCount, 0, hotPixels, nullptr);
}
status_t OpcodeListBuilder::addBadPixelList(uint32_t bayerPhase,
uint32_t badPointCount,
uint32_t badRectCount,
const uint32_t* badPointRowColPairs,
const uint32_t* badRectTopLeftBottomRightTuples) {
status_t err = addOpcodePreamble(FIX_BAD_PIXELS_LIST);
if (err != OK) return err;
// Allow this opcode to be skipped if not supported
uint32_t flags = FLAG_OPTIONAL;
err = mEndianOut.write(&flags, 0, 1);
if (err != OK) return err;
const uint32_t NUM_NON_VARLEN_FIELDS = 3;
const uint32_t SIZE_OF_POINT = 2;
const uint32_t SIZE_OF_RECT = 4;
uint32_t totalSize = (NUM_NON_VARLEN_FIELDS + badPointCount * SIZE_OF_POINT +
badRectCount * SIZE_OF_RECT) * sizeof(uint32_t);
err = mEndianOut.write(&totalSize, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&bayerPhase, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&badPointCount, 0, 1);
if (err != OK) return err;
err = mEndianOut.write(&badRectCount, 0, 1);
if (err != OK) return err;
if (badPointCount > 0) {
err = mEndianOut.write(badPointRowColPairs, 0, SIZE_OF_POINT * badPointCount);
if (err != OK) return err;
}
if (badRectCount > 0) {
err = mEndianOut.write(badRectTopLeftBottomRightTuples, 0, SIZE_OF_RECT * badRectCount);
if (err != OK) return err;
}
mCount++;
return OK;
}
status_t OpcodeListBuilder::addOpcodePreamble(uint32_t opcodeId) {
status_t err = mEndianOut.write(&opcodeId, 0, 1);
if (err != OK) return err;
uint8_t version[] = {1, 3, 0, 0};
err = mEndianOut.write(version, 0, NELEMS(version));
if (err != OK) return err;
return OK;
}
} /*namespace img_utils*/
} /*namespace android*/

@ -1,83 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <img_utils/EndianUtils.h>
namespace android {
namespace img_utils {
EndianOutput::EndianOutput(Output* out, Endianness end)
: mOffset(0), mOutput(out), mEndian(end) {}
EndianOutput::~EndianOutput() {}
status_t EndianOutput::open() {
mOffset = 0;
return mOutput->open();
}
status_t EndianOutput::close() {
return mOutput->close();
}
void EndianOutput::setEndianness(Endianness end) {
mEndian = end;
}
uint32_t EndianOutput::getCurrentOffset() const {
return mOffset;
}
Endianness EndianOutput::getEndianness() const {
return mEndian;
}
status_t EndianOutput::write(const uint8_t* buf, size_t offset, size_t count) {
status_t res = OK;
if((res = mOutput->write(buf, offset, count)) == OK) {
mOffset += count;
}
return res;
}
status_t EndianOutput::write(const int8_t* buf, size_t offset, size_t count) {
return write(reinterpret_cast<const uint8_t*>(buf), offset, count);
}
#define DEFINE_WRITE(_type_) \
status_t EndianOutput::write(const _type_* buf, size_t offset, size_t count) { \
return writeHelper<_type_>(buf, offset, count); \
}
DEFINE_WRITE(uint16_t)
DEFINE_WRITE(int16_t)
DEFINE_WRITE(uint32_t)
DEFINE_WRITE(int32_t)
DEFINE_WRITE(uint64_t)
DEFINE_WRITE(int64_t)
status_t EndianOutput::write(const float* buf, size_t offset, size_t count) {
assert(sizeof(float) == sizeof(uint32_t));
return writeHelper<uint32_t>(reinterpret_cast<const uint32_t*>(buf), offset, count);
}
status_t EndianOutput::write(const double* buf, size_t offset, size_t count) {
assert(sizeof(double) == sizeof(uint64_t));
return writeHelper<uint64_t>(reinterpret_cast<const uint64_t*>(buf), offset, count);
}
} /*namespace img_utils*/
} /*namespace android*/

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save