Pārlūkot izejas kodu

实现点云转换

LiuZe 1 gadu atpakaļ
vecāks
revīzija
60288d9ceb

+ 539 - 0
NebulaSDK/x86_64/include/VzenseNebula_api.h

@@ -0,0 +1,539 @@
+#ifndef VZENSEDS_API_H
+#define VZENSEDS_API_H
+
+/**
+* @file VzenseDS_api.h
+* @brief Vzense API header file.
+* Copyright (c) 2019-2022 Vzense Interactive, Inc.
+*/
+
+/*! \mainpage VzenseDS API Documentation
+*
+* \section intro_sec Introduction
+*
+* Welcome to the VzenseDS API documentation. This documentation enables you to quickly get started in your development efforts to programmatically interact with the Vzense CW ToF Camera (eg:DS77).
+*/
+
+#include "VzenseNebula_define.h"
+
+/**
+* @brief         Initializes the API on the device. This function must be invoked before any other Vzense APIs.
+* @return        ::VzRetOK if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_Initialize();
+
+/**
+* @brief         Shuts down the API on the device and clears all resources allocated by the API. After invoking this function, no other Vzense APIs can be invoked.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_Shutdown();
+
+/**
+* @brief         Gets the version of SDK.
+* @return        Returns sdk version
+*/
+VZENSE_C_API_EXPORT const char* VZ_GetSDKVersion();
+
+/**
+* @brief         Returns the number of camera devices currently connected.
+* @param[out]    pDeviceCount    Pointer to a 32-bit integer variable in which to return the device count.
+* @return        ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetDeviceCount(uint32_t* pDeviceCount);
+
+/**
+* @brief         Returns the info of the deviceIndex camera device.
+* @param[in]     deviceIndex    The index of the device to open. Device indices range from 0 to device count - 1.
+* @param[out]    pDevicesInfo       Pointer to a buffer in which to store the device info.
+* @return        ::VzRetOK      if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetDeviceInfo(uint32_t deviceIndex, VzDeviceInfo* pDevicesInfo);
+
+/**
+* @brief         Returns the info lists of the deviceCount camera devices.
+* @param[in]     deviceCount         the number of camera devices.
+* @param[out]    pDevicesInfoList    Pointer to a buffer in which to store the devices list infos.
+* @return        ::VzRetOK           if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetDeviceInfoList(uint32_t deviceCount, VzDeviceInfo* pDevicesInfoList);
+
+/**
+* @brief         Opens the device specified by <code>sn</code>. The device must be subsequently closed using VZ_CloseDevice().
+* @param[in]     pURI         the uri of the device. See ::VzDeviceInfo for more information.
+* @param[out]    pDevice     the handle of the device on which to open.
+* @return:       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_OpenDeviceByUri(const char* pURI, VzDeviceHandle* pDevice);
+
+/**
+* @brief         Opens the device specified by <code>alias</code>. The device must be subsequently closed using VZ_CloseDevice().
+* @param[in]     pAlias       the alias of the device. See ::VzDeviceInfo for more information.
+* @param[out]    pDevice     the handle of the device on which to open.
+* @return:       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_OpenDeviceByAlias(const char* pAlias, VzDeviceHandle* pDevice);
+
+/**
+* @brief         Opens the device specified by <code>ip</code>. The device must be subsequently closed using VZ_CloseDevice().
+* @param[in]     pIP          the ip of the device. See ::VzDeviceInfo for more information.
+* @param[out]    pDevice     the handle of the device on which to open.
+* @return:       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_OpenDeviceByIP(const char* pIP, VzDeviceHandle* pDevice);
+
+/**
+* @brief         Closes the device specified by <code>device</code> that was opened using VZ_OpenDevice.
+* @param[in]     pDevice       The handle of the device to close.
+* @return:       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_CloseDevice(VzDeviceHandle* pDevice);
+
+/**
+* @brief         Starts capturing the image stream indicated by <code>device</code>. Invoke VZ_StopStream() to stop capturing the image stream.
+* @param[in]     device          The handle of the device on which to start capturing the image stream.                        
+* @return        ::VzRetOK if    the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_StartStream(VzDeviceHandle device);
+
+/**
+* @brief         Stops capturing the image stream on the device specified by <code>device</code>. that was started using VZ_StartStream.
+* @param[in]     device       The handle of the device on which to stop capturing the image stream.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_StopStream(VzDeviceHandle device);
+
+/**
+* @brief         Captures the next image frame from the device specified by <code>device</code>. This API must be invoked before capturing frame data using VZ_GetFrame().
+* @param[in]     device         The handle of the device on which to read the next frame.
+* @param[in]     waitTime       The unit is millisecond, the value is in the range (0,65535).
+*                               You can change the value according to the frame rate. For example,the frame rate is 30, so the theoretical waittime interval is 33ms, but if set the time value is 20ms,
+*                               it means the max wait time is 20 ms when capturing next frame, so when call the VZ_GetFrameReady, it may return VzRetGetFrameReadyTimeOut(-11).
+*                               So the recommended value is 2 * 1000/ FPS.
+* @param[out]    pFrameReady    Pointer to a buffer in which to store the signal on which image is ready to be get.
+* @return        ::VzRetOK      if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetFrameReady(VzDeviceHandle device, uint16_t waitTime, VzFrameReady* pFrameReady);
+
+/**
+* @brief         Returns the image data for the current frame from the device specified by <code>device</code>. Before invoking this API, invoke VZ_GetFrameReady() to capture one image frame from the device.
+* @param[in]     device       The handle of the device to capture an image frame from.
+* @param[in]     frameType    The image frame type.
+* @param[out]    pVzFrame     Pointer to a buffer in which to store the returned image data.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetFrame(VzDeviceHandle device, VzFrameType frameType, VzFrame* pVzFrame);
+
+/**
+* @brief        Set the working mode of the camera.
+* @param[in]    device      The handle of the device
+* @param[in]    mode        The work mode of camera. For ActiveMode, set the Time filter default true, for SlaveMode, set the Time filter default false.
+* @return       ::VzRetOK   if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetWorkMode(VzDeviceHandle device, VzWorkMode mode);
+
+/**
+* @brief        Get the working mode of the camera.
+* @param[in]    device      The handle of the device
+* @param[in]    pMode       The work mode of camera.
+* @return       ::VzRetOK   if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetWorkMode(VzDeviceHandle device, VzWorkMode* pMode);
+
+/**
+* @brief        Triggering a frame of image is only useful if the camera is in SoftwareTriggerMode
+* @param[in]    device      The handle of the device.
+* @return       ::VzRetOK   if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetSoftwareSlaveTrigger(VzDeviceHandle device);
+
+/**
+* @brief         Returns the internal intrinsic and distortion coefficient parameters from the device specified by <code>device</code>.
+* @param[in]     device                        The handle of the device from which to get the internal parameters. 
+* @param[in]     sensorType                    The type of sensor (depth or color) from which to get parameter information. Pass in the applicable value defined by ::VzSensorType.
+* @param[out]    pSensorIntrinsicParameters    Pointer to a VzSensorIntrinsicParameters variable in which to store the parameter values.
+* @return        ::VzRetOK                     if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetSensorIntrinsicParameters(VzDeviceHandle device, VzSensorType sensorType, VzSensorIntrinsicParameters* pSensorIntrinsicParameters);
+
+/**
+* @brief         Returns the camera rotation and translation coefficient parameters from the device specified by <code>device</code>.
+* @param[in]     device                        The handle of the device from which to get the extrinsic parameters. 
+* @param[out]    pSensorExtrinsicParameters    Pointer to a ::VzSensorExtrinsicParameters variable in which to store the parameters.
+* @return        ::VzRetOK                     if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetSensorExtrinsicParameters(VzDeviceHandle device, VzSensorExtrinsicParameters* pSensorExtrinsicParameters);
+
+/**
+* @brief         Gets the firmware version number.
+* @param[in]     device              The handle of the device on which to set the pulse count.
+* @param[in]     pFirmwareVersion    Pointer to a variable in which to store the returned fw value.
+* @param[in]     length              The maximum length is 64 bytes.
+* @return        ::VzRetOK          if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetFirmwareVersion(VzDeviceHandle device, char* pFirmwareVersion, int length);
+
+/**
+* @brief         Gets the MAC from the device specified by <code>device</code>.
+* @param[in]     device         The handle of the device.
+* @param[out]    pMACAddress    Pointer to a buffer in which to store the device MAC address. the buffer default size is 18, and the last buffer set '\0'.
+* @return        ::VzRetOK      if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetDeviceMACAddress(VzDeviceHandle device, char* pMACAddress);
+
+/**
+* @brief        Sets the device GMM gain on a device.
+* @param[in]    device       The handle of the device on which to set the GMM gain.
+* @param[in]    gmmgain      The GMM gain value to set. See ::VzGMMGain for more information.The GMM gain value is in the range [0,255].
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetIRGMMGain(VzDeviceHandle device, uint8_t gmmgain);
+
+/**
+* @brief        Returns the the device's GMM gain.
+* @param[in]    device       The handle of the device from which to get the GMM gain.
+* @param[out]   pGmmgain      Pointer to a variable in which to store the returned GMM gain.
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetIRGMMGain(VzDeviceHandle device, uint8_t* pGmmgain);
+
+/**
+* @brief         Sets the color image pixel format on the device specified by <code>device</code>. Currently only RGB and BGR formats are supported.
+* @param[in]     device         The handle of the device to set the pixel format. 
+* @param[in]     pixelFormat    The color pixel format to use. Pass in one of the values defined by ::VzPixelFormat. Currently only <code>VzPixelFormatRGB888</code> and <code>VzPixelFormatBGR888</code> are supported.
+* @return        ::VzRetOK      if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetColorPixelFormat(VzDeviceHandle device, VzPixelFormat pixelFormat);
+
+/**
+* @brief        Sets the color frame Resolution.
+* @param[in]    device       The handle of the device.
+* @param[in]    w            The width of color image
+* @param[in]    h            The height of color image
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetColorResolution(VzDeviceHandle device, int w, int h);
+
+/**
+* @brief        Returns the the color frame Resolution.
+* @param[in]    device         The handle of the device.
+* @param[out]    pW            The width of color image
+* @param[out]    pH            The height of color image
+* @return       ::VzRetOK      if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetColorResolution(VzDeviceHandle device, int* pW, int* pH);
+
+/**
+* @brief        Gets a list of image resolutions supported by Sensor
+* @param[in]    type           The sensor type
+* @param[out]    pList         List of supported resolutions
+* @return       ::VzRetOK      if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VzGetSupportedResolutionList(VzDeviceHandle device, VzSensorType type, VzResolutionList* pList);
+
+/**
+* @brief         Sets the ToF frame rate.The interface takes a long time, about 500 ms.
+* @param[in]     device       The handle of the device on which to set the framerate.
+* @param[in]     value        The rate value, in range [1,25].
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetFrameRate(VzDeviceHandle device, int value);
+/**
+* @brief         Gets the ToF frame rate.
+* @param[in]     device       The handle of the device on which to get the framerate.
+* @param[in]     pValue       The rate value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetFrameRate(VzDeviceHandle device, int* pValue);
+
+/**
+* @brief        Set the exposure mode of sensor.
+* @param[in]    device          The handle of the device on which to set the exposure control mode.
+* @param[in]    sensorType      The type of sensor (depth or color) from which to get parameter information. Pass in the applicable value defined by ::VzSensorType.
+* @param[in]    exposureType    the exposure control mode.
+* @return       ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetExposureControlMode(VzDeviceHandle device, VzSensorType sensorType, VzExposureControlMode controlMode);
+
+/**
+* @brief         Get the exposure mode of sensor.
+* @param[in]     device           The handle of the device on which to get the exposure control mode.
+* @param[in]     sensorType       The type of sensor (depth or color) from which to get parameter information. Pass in the applicable value defined by ::VzSensorType.
+* @param[out]    pControlMode     the exposure control mode.
+* @return        ::VzRetOK        if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetExposureControlMode(VzDeviceHandle device, VzSensorType sensorType, VzExposureControlMode* pControlMode);
+
+/**
+* @brief        Set the exposure time of sensor.
+* @param[in]    device          The handle of the device on which to set the exposure time  in microseconds.
+* @param[in]    sensorType      The type of sensor (depth or color) from which to get parameter information. Pass in the applicable value defined by ::VzSensorType.
+* @param[in]    exposureTime    the exposure time.
+* @return       ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetExposureTime(VzDeviceHandle device, VzSensorType sensorType, VzExposureTimeParams exposureTime);
+
+/**
+* @brief         Get the exposure time of sensor.
+* @param[in]     device           The handle of the device on which to get the exposure time in microseconds.
+* @param[in]     sensorType       The type of sensor (depth or color) from which to get parameter information. Pass in the applicable value defined by ::VzSensorType.
+* @param[out]    pExposureTime    the exposure time.
+* @return        ::VzRetOK        if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetExposureTime(VzDeviceHandle device, VzSensorType sensorType, VzExposureTimeParams* pExposureTime);
+
+/**
+* @brief        Set the parameters of the Time filter.
+* @param[in]    device       The handle of the device
+* @param[in]    params       Pointer to a variable in which to store the parameters.
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/ 
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetTimeFilterParams(VzDeviceHandle device, VzTimeFilterParams params);
+
+/**
+* @brief         Get the parameters of the Time Filter feature.
+* @param[in]     device       The handle of the device
+* @param[out]    pParams      Pointer to a variable in which to store the returned value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetTimeFilterParams(VzDeviceHandle device, VzTimeFilterParams *pParams);
+
+/**
+* @brief         Set the parameters of the Confidence filter.
+* @param[in]     device       The handle of the device
+* @param[out]    params       Pointer to a variable in which to store the parameters.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetConfidenceFilterParams(VzDeviceHandle device, VzConfidenceFilterParams params);
+
+/**
+* @brief         Get the parameters of the ConfidenceFilter feature.
+* @param[in]     device       The handle of the device
+* @param[out]    pParams      Pointer to a variable in which to store the returned value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetConfidenceFilterParams(VzDeviceHandle device, VzConfidenceFilterParams *pParams);
+
+/**
+* @brief        Set the parameters of the FlyingPixel filter.
+* @param[in]    device       The handle of the device.
+* @param[out]   params       Pointer to a variable in which to store the parameters.
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetFlyingPixelFilterParams(VzDeviceHandle device, const VzFlyingPixelFilterParams params);
+
+/**
+* @brief         Get the parameters of the Confidence filter.
+* @param[in]     device       The handle of the device
+* @param[out]    pParams      Pointer to a variable in which to store the returned value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetFlyingPixelFilterParams(VzDeviceHandle device, VzFlyingPixelFilterParams* params);
+
+/**
+* @brief        Enables or disables the FillHole filter
+* @param[in]    device       The handle of the device.
+* @param[in]    bEnabled     Set to <code>true</code> to enable the feature or <code>false</code> to disable the feature.
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetFillHoleFilterEnabled(VzDeviceHandle device, bool bEnabled);
+
+/**
+* @brief         Returns the Boolean value of whether the FillHole Filter feature is enabled or disabled.
+* @param[in]     device       The handle of the device
+* @param[out]    pEnabled     Pointer to a variable in which to store the returned Boolean value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetFillHoleFilterEnabled(VzDeviceHandle device, bool *pEnabled);
+
+/**
+* @brief        Enables or disables the Spatial filter
+* @param[in]    device       The handle of the device.
+* @param[in]    bEnabled     Set to <code>true</code> to enable the feature or <code>false</code> to disable the feature.
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetSpatialFilterEnabled(VzDeviceHandle device, bool bEnabled);
+
+/**
+* @brief         Returns the Boolean value of whether the Spatial Filter feature is enabled or disabled.
+* @param[in]     device       The handle of the device
+* @param[out]    pEnabled     Pointer to a variable in which to store the returned Boolean value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetSpatialFilterEnabled(VzDeviceHandle device, bool *pEnabled);
+
+/**
+* @brief         Enables or disables transforms a color image into the geometry of the depth sensor. When enabled, VZ_GetFrame() can\n
+*                be invoked passing ::VzTransformedColorFrame as the frame type for get a color image which each pixel matches the \n
+*                corresponding pixel coordinates of the depth sensor. The resolution of the transformed color frame is the same as that\n
+*                of the depth image.
+* @param[in]     device       The handle of the device on which to enable or disable mapping.
+* @param[in]     bEnabled     Set to <code>true</code> to enable the feature or <code>false</code> to disable the feature.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetTransformColorImgToDepthSensorEnabled(VzDeviceHandle device, bool bEnabled);
+
+/**
+* @brief         Returns the Boolean value of whether the transformed of the color image to depth sensor space feature is enabled or disabled.
+* @param[in]     device       The handle of the device on which to enable or disable the feature.
+* @param[out]    bEnabled     Pointer to a variable in which to store the returned Boolean value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetTransformColorImgToDepthSensorEnabled(VzDeviceHandle device, bool *bEnabled);
+
+/**
+* @brief         Enables or disables transforms the depth map into the geometry of the color sensor. When enabled, VZ_GetFrame() can\n
+*                be invoked passing ::VzTransformedDepthFrame as the frame type for get a depth image which each pixel matches the \n
+*                corresponding pixel coordinates of the color sensor. The resolution of the transformed depth frame is the same as that\n
+*                of the color image.
+* @param[in]     device       The handle of the device on which to enable or disable mapping.
+* @param[in]     bEnabled     Set to <code>true</code> to enable the feature or <code>false</code> to disable the feature.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetTransformDepthImgToColorSensorEnabled(VzDeviceHandle device, bool bEnabled);
+
+/**
+* @brief         Returns the Boolean value of whether the transformed of the depth image to color space feature is enabled or disabled.
+* @param[in]     device       The handle of the device on which to enable or disable the feature.
+* @param[out]    bEnabled     Pointer to a variable in which to store the returned Boolean value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetTransformDepthImgToColorSensorEnabled(VzDeviceHandle device, bool *bEnabled);
+
+/**
+* @brief         Returns the point value of the frame that the mapping of the depth image to Color space.
+* @param[in]     device           The handle of the device on which to enable or disable the feature.
+* @param[in]     pointInDepth     The point in depth frame.
+* @param[in]     colorSize        The size(x = w,y = h) of color frame.
+* @param[out]    pPointInColor    The point in the color frame.
+* @return        ::VzRetOK        if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_TransformedDepthPointToColorPoint(const VzDeviceHandle device, const VzDepthVector3 depthPoint, const VzVector2u16 colorSize, VzVector2u16* pPointInColor);
+
+/**
+* @brief         Converts the input points from depth coordinate space to world coordinate space.
+* @param[in]     device          The handle of the device on which to perform the operation.
+* @param[in]     pDepthVector    Pointer to a buffer containing the x, y, and z values of the depth coordinates to be converted. \n
+*                                x and y are measured in pixels, where 0, 0 is located at the top left corner of the image. \n
+*                                z is measured in millimeters, based on the ::VzPixelFormat depth frame.
+* @param[out]    pWorldVector    Pointer to a buffer in which to output the converted x, y, and z values of the world coordinates, measured in millimeters.
+* @param[in]     pointCount      The number of points to convert.
+* @param[in]     pSensorParam    The intrinsic parameters for the depth sensor. See ::VzSensorIntrinsicParameters.
+* @return        ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_ConvertDepthToPointCloud(VzDeviceHandle device, VzDepthVector3* pDepthVector, VzVector3f* pWorldVector, int32_t pointCount, VzSensorIntrinsicParameters* pSensorParam);
+
+/**
+* @brief         Converts the input Depth frame from depth coordinate space to world coordinate space on the device. Currently supported depth image types are VzDepthFrame and VzTransformDepthImgToColorSensorFrame.
+* @param[in]     device          The handle of the device on which to perform the operation.
+* @param[in]     pDepthFrame      The depth frame.
+* @param[out]    pWorldVector    Pointer to a buffer in which to output the converted x, y, and z values of the world coordinates, measured in millimeters. The length of pWorldVector must is (VzFrame.width * VzFrame.height).
+* @return        ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_ConvertDepthFrameToPointCloudVector(VzDeviceHandle device, const VzFrame* pDepthFrame, VzVector3f* pWorldVector);
+/**
+* @brief        Sets hotplug status callback function
+* @param[in]    pCallback    Pointer to the callback function. See ::PtrHotPlugStatusCallback 
+* @param[in]    pUserData    Pointer to the user data. See ::PtrHotPlugStatusCallback
+* @return       ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetHotPlugStatusCallback(PtrHotPlugStatusCallback pCallback, const void* pUserData);
+
+/**
+* @brief        Reboot the camera.
+* @param[in]    device          The handle of the device
+* @return       ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_RebootDevie(VzDeviceHandle device);
+
+/**
+* @brief        Set the corresponding property value for the device
+* @param[in]    device          The handle of the device from which to set the property value.
+* @param[in]    propertyKey     The type of property to set on the device.
+* @param[in]    pData           Pointer to a buffer containing the property value.
+* @param[in]    dataSize        The size, in bytes, of the property value contained in pData
+* @return       ::VzRetOK       if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetProperty(VzDeviceHandle device, const char* propertyKey, const void* pData, uint32_t dataSize);
+
+/**
+* @brief 		Returns a specific property value from the device
+* @param[in] 	device			The handle of the device from which to get the property value.
+* @param[in] 	propertyType	The type of property to get from the device
+* @param[out]	pData			Pointer to a buffer to store the returned property value.
+* @param[in]	dataSize		The size, in bytes, of the property value returned in pData
+* @return 		::VzRetOK		if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetProperty(VzDeviceHandle device, const char* propertyKey, void* pData, uint32_t dataSize);
+
+/**
+* @brief         Enables or disables the HDR Mode of the ToF sensor with VzExposureControlMode_Manual. Default enabled,  so if you want switch to the VzExposureControlMode_Auto, set HDR Mode disable firstly.
+* @param[in]     device       The handle of the device on which to enable or disable the feature.
+* @param[in]     bEnabled     Set to <code>true</code> to enable the feature or <code>false</code> to disable the feature.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetHDRModeEnabled(VzDeviceHandle device, bool bEnabled);
+/**
+* @brief         Returns the Boolean value of whether the HDRMode of ToF sensor feature is enabled or disabled.
+* @param[in]     device       The handle of the device on which to enable or disable the feature.
+* @param[in]     bEnabled     Set to <code>true</code> to enable the feature or <code>false</code> to disable the feature.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetHDRModeEnabled(VzDeviceHandle device, bool *bEnabled);
+
+/**
+* @brief         Set the input signal parameters for Hardware Trigger.
+* @param[in]     device       The handle of the device
+* @param[in]     params       Pointer to a variable in which to store the parameters.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetInputSignalParamsForHWTrigger(VzDeviceHandle device, VzInputSignalParamsForHWTrigger params);
+
+/**
+* @brief         Get the Input signal parameters for Hardware Trigger.
+* @param[in]     device       The handle of the device
+* @param[out]    pParams      Pointer to a variable in which to store the returned value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetInputSignalParamsForHWTrigger(VzDeviceHandle device, VzInputSignalParamsForHWTrigger *pParams);
+
+/**
+* @brief         Set the output signal parameters.
+* @param[in]     device       The handle of the device
+* @param[in]     params       Pointer to a variable in which to store the parameters.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetOutputSignalParams(VzDeviceHandle device, VzOutputSignalParams params);
+
+/**
+* @brief         Get the output signal parameters.
+* @param[in]     device       The handle of the device
+* @param[out]    pParams      Pointer to a variable in which to store the returned value.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetOutputSignalParams(VzDeviceHandle device, VzOutputSignalParams *pParams);
+
+/**
+* @brief         Set the parameters by Json file that can be saved by NebulaGUITool.
+* @param[in]     device       The handle of the device.
+* @param[in]     pfilePath    Pointer to the path of Json file.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetParamsByJson(VzDeviceHandle device, char* pfilePath);
+
+/**
+* @brief         Set the color Gain with the exposure mode of RGB sensor in VzExposureControlMode_Manual.
+* @param[in]     device       The handle of the device.
+* @param[in]     params       The value of color Gain.Value range: [1.0 15.5]
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_SetColorGain(VzDeviceHandle device, float params);
+
+/**
+* @brief         Get the color Gain.
+* @param[in]     device       The handle of the device.
+* @param[out]    params       The value of color Gain.
+* @return        ::VzRetOK    if the function succeeded, or one of the error values defined by ::VzReturnStatus.
+*/
+VZENSE_C_API_EXPORT VzReturnStatus VZ_GetColorGain(VzDeviceHandle device, float *pParams);
+
+#endif /* VZENSEDS_API_H */

+ 28 - 0
NebulaSDK/x86_64/include/VzenseNebula_define.h

@@ -0,0 +1,28 @@
+#ifndef VZENSEDS_DEFINE_H
+#define VZENSEDS_DEFINE_H
+
+#include "VzenseNebula_enums.h"
+#include "VzenseNebula_types.h"
+
+#ifdef PS_EXPORT_ON
+    #ifdef _WIN32
+        #define VZENSE_API_EXPORT __declspec(dllexport)
+    #else
+        #define VZENSE_API_EXPORT __attribute__((visibility("default")))
+    #endif
+#else
+    #ifdef _WIN32
+        #define VZENSE_API_EXPORT __declspec(dllimport)
+    #else
+        #define VZENSE_API_EXPORT __attribute__((visibility("default")))
+    #endif
+#endif
+
+#ifdef __cplusplus
+#define VZENSE_C_API_EXPORT extern "C" VZENSE_API_EXPORT
+#else
+#define VZENSE_C_API_EXPORT VZENSE_API_EXPORT
+#define bool uint8_t
+#endif
+
+#endif /* VZENSEDS_DEFINE_H */

+ 109 - 0
NebulaSDK/x86_64/include/VzenseNebula_enums.h

@@ -0,0 +1,109 @@
+#ifndef VZENSEDS_ENUMS_H
+#define VZENSEDS_ENUMS_H
+
+/**
+ * @brief Specifies the type of image frame.
+ */
+typedef enum{
+	VzDepthFrame = 0,                           //!< Depth frame with 16 bits per pixel in millimeters.
+	VzIRFrame = 1,                              //!< IR frame with 8 bits per pixel.
+	VzColorFrame = 3,                           //!< Color frame with 24 bits per pixel in RGB/BGR format.
+    VzTransformColorImgToDepthSensorFrame = 4,  //!< Color frame with 24 bits per pixel in RGB/BGR format, that is transformed to depth sensor space where the resolution is the same as the depth frame's resolution.\n 
+	                                            //!< This frame type can be enabled using ::VZ_SetTransformColorImgToDepthSensorEnabled().
+	VzTransformDepthImgToColorSensorFrame = 5,  //!< Depth frame with 16 bits per pixel, in millimeters, that is transformed to color sensor space where the resolution is same as the color frame's resolution.\n 
+	                                            //!< This frame type can be enabled using ::VZ_SetTransformDepthImgToColorSensorEnabled().
+	VzConfidenceFrame = 8,                      //!< Confidence frame with 16 bits per pixel.
+}VzFrameType;
+
+/**
+ * @brief Specifies the image pixel format.
+ */
+typedef enum{
+	VzPixelFormatDepthMM16 = 0,        //!< Depth image pixel format, 16 bits per pixel in mm.
+	VzPixelFormatGray8 = 2,            //!< Gray image pixel format, 8 bits per pixel.
+
+	//Color
+	VzPixelFormatRGB888 = 3,           //!< Color image pixel format, 24 bits per pixel RGB format.
+	VzPixelFormatBGR888 = 4           //!< Color image pixel format, 24 bits per pixel BGR format.
+}VzPixelFormat;
+
+/**
+ * @brief Specifies the type of sensor.
+ */
+typedef enum {
+    VzToFSensor = 0x01,          //!< ToF camera.
+    VzColorSensor = 0x02         //!< Color camera.
+}VzSensorType;
+
+/**
+ * @brief Return status codes for all APIs.\n 
+ * 		  <code>VzRetOK = 0</code> means the API successfully completed its operation.\n 
+ * 		  All other codes indicate a device, parameter, or API usage error.
+ */
+typedef enum
+{
+    VzRetOK                         =  0,   //!< The function completed successfully.
+    VzRetNoDeviceConnected          = -1,   //!< There is no depth camera connected or the camera has not been connected correctly. Check the hardware connection or try unplugging and re-plugging the USB cable.
+    VzRetInvalidDeviceIndex         = -2,   //!< The input device index is invalid.
+    VzRetDevicePointerIsNull        = -3,   //!< The device structure pointer is null.
+    VzRetInvalidFrameType           = -4,   //!< The input frame type is invalid.
+    VzRetFramePointerIsNull         = -5,   //!< The output frame buffer is null.
+    VzRetNoPropertyValueGet         = -6,   //!< Cannot get the value for the specified property.
+    VzRetNoPropertyValueSet         = -7,   //!< Cannot set the value for the specified property.
+    VzRetPropertyPointerIsNull      = -8,   //!< The input property value buffer pointer is null.
+    VzRetPropertySizeNotEnough      = -9,   //!< The input property value buffer size is too small to store the specified property value.
+    VzRetInvalidDepthRange          = -10,  //!< The input depth range mode is invalid.
+    VzRetGetFrameReadyTimeOut       = -11,  //!< Capture the next image frame time out.
+    VzRetInputPointerIsNull         = -12,  //!< An input pointer parameter is null.
+    VzRetCameraNotOpened            = -13,  //!< The camera has not been opened.
+    vzRetInvalidCameraType          = -14,  //!< The specified type of camera is invalid.
+    VzRetInvalidParams              = -15,  //!< One or more of the parameter values provided are invalid.
+    VzRetCurrentVersionNotSupport   = -16,  //!< This feature is not supported in the current version.
+    VzRetUpgradeImgError            = -17,  //!< There is an error in the upgrade file.
+    VzRetUpgradeImgPathTooLong      = -18,  //!< Upgrade file path length greater than 260.
+	VzRetUpgradeCallbackNotSet		= -19,  //!< VZ_SetUpgradeStatusCallback is not called.
+	VzRetProductNotSupport          = -20,  //!< The current product does not support this operation.
+	VzRetNoConfigFolder				= -21,  //!< No product profile found.
+	VzRetWebServerStartError        = -22,  //!< WebServer Start/Restart error(IP or PORT).
+	VzRetGetOverStayFrame           = -23,  //!< The time from frame ready to get frame is out of 1s
+	VzRetCreateLogDirError          = -24,  //!< Create log directory error
+	VzRetCreateLogFileError			= -25,  //!< Create log file error
+	VzRetNoAdapterConnected			= -100,	//!< There is no adapter connected
+	VzRetReInitialized				= -101,	//!< The SDK has been Initialized
+	VzRetNoInitialized				= -102,	//!< The SDK has not been Initialized
+	VzRetCameraOpened				= -103,	//!< The camera has been opened.
+	VzRetCmdError					= -104,	//!< Set/Get cmd control error
+	VzRetCmdSyncTimeOut				= -105,	//!< Set cmd ok.but time out for the sync return 
+    VzRetIPNotMatch                 = -106, //!< IP is not in the same network segment
+    VzRetNotStopStream              = -107, //!< Please invoke VZ_StopStream first to close the data stream
+    VzRetNotStartStream             = -108, //!< Please invoke VZ_StartStream first to get the data stream
+	VzRetNoDriversFolder			= -109, //!< Please invoke VZ_StartStream first to get the data stream
+
+	VzRetOthers = -255,	             //!< An unknown error occurred.
+}VzReturnStatus;
+
+typedef enum {
+	VzConnectUNKNOWN = 0,
+    VzUnconnected = 1,
+    VzConnected = 2,
+    VzOpened = 3,
+    VzUpgradeUnconnected = 4,
+    VzUpgradeConnected = 5,
+}VzConnectStatus;
+
+typedef enum
+{
+    VzActiveMode = 0x00,             //enter the active mode
+    VzHardwareTriggerMode = 0x01,    //enter the hardware salve mode, at this time need to connect the hardware trigger wire, provide hardware signal, to trigger the image
+    VzSoftwareTriggerMode = 0x02,    //enter the software salve mode, at this time need to invoke VZ_SetSoftwareSlaveTrigger, to trigger the image
+}VzWorkMode;
+
+typedef enum
+{
+    VzExposureControlMode_Auto = 0,
+    VzExposureControlMode_Manual = 1,
+}VzExposureControlMode;
+
+
+#endif /* VZENSEDS_ENUMS_H */
+

+ 206 - 0
NebulaSDK/x86_64/include/VzenseNebula_types.h

@@ -0,0 +1,206 @@
+#ifndef VZENSEDS_TYPES_H
+#define VZENSEDS_TYPES_H
+
+#include <stdint.h>
+#include "VzenseNebula_enums.h"
+
+typedef uint16_t VzDepthPixel;  //!< Depth image pixel type in 16-bit
+typedef uint16_t VzGray16Pixel; //!< Gray image pixel type in 16-bit
+typedef uint8_t VzGray8Pixel;   //!< Gray image pixel type in 8-bit
+
+#pragma pack (push, 1)
+/**
+ * @brief Color image pixel type in 24-bit RGB format.
+ */
+typedef struct
+{
+	uint8_t r;	//!< Red
+	uint8_t g;	//!< Green
+	uint8_t b;	//!< Blue
+} VzRGB888Pixel;
+
+/**
+ * @brief Color image pixel type in 24-bit BGR format.
+ */
+typedef struct
+{
+	uint8_t b;	//!< Blue
+	uint8_t g;	//!< Green
+	uint8_t r;	//!< Red
+} VzBGR888Pixel;
+
+/**
+ * @brief Stores the x, y, and z components of a 3D vector.
+ */
+typedef struct  
+{
+	float x, y, z;	//!< The x, y, and z components of the vector.
+}VzVector3f;
+
+/**
+ * @brief Stores the x, y, and z components of a 2D vector.
+ */
+typedef struct
+{
+	uint16_t x;
+	uint16_t y;
+}VzVector2u16;
+
+/**
+ * @brief Contains depth information for a given pixel.
+ */
+typedef struct
+{
+	int          depthX;    //!< The x coordinate of the pixel.
+	int          depthY;    //!< The y coordinate of the pixel.
+	VzDepthPixel depthZ;    //!< The depth of the pixel, in millimeters.
+}VzDepthVector3;
+
+/**
+ * @brief image resolution
+ */
+typedef struct {
+    int width;
+    int height;
+}VzResolution;
+
+/**
+ * @brief Supported resolutions.
+ */
+typedef struct
+{
+    int count;
+    VzResolution resolution[6];
+}VzResolutionList;
+
+/**
+ * @brief Camera intrinsic parameters and distortion coefficients.
+ */
+typedef struct
+{
+	double	fx;  //!< Focal length x (pixel)
+	double	fy;  //!< Focal length y (pixel)
+	double	cx;  //!< Principal point x (pixel)
+	double	cy;  //!< Principal point y (pixel)
+	double	k1;  //!< Radial distortion coefficient, 1st-order
+	double	k2;  //!< Radial distortion coefficient, 2nd-order
+	double	p1;  //!< Tangential distortion coefficient
+	double	p2;  //!< Tangential distortion coefficient
+	double	k3;  //!< Radial distortion coefficient, 3rd-order
+	double	k4;  //!< Radial distortion coefficient, 4st-order
+	double	k5;  //!< Radial distortion coefficient, 5nd-order
+	double	k6;  //!< Radial distortion coefficient, 6rd-order
+}VzSensorIntrinsicParameters;
+
+/** 
+ * @brief Extrinsic parameters defines the physical relationship form tof sensor to color sensor
+ */
+typedef struct
+{
+	double rotation[9];     //!< Orientation stored as an array of 9 double representing a 3x3 rotation matrix.
+	double translation[3];  //!< Location stored as an array of 3 double representing a 3-D translation vector.
+}VzSensorExtrinsicParameters;
+
+/**
+ * @brief Depth/IR/Color image frame data.
+ */
+typedef struct
+{
+	uint32_t       frameIndex;          //!< The index of the frame.
+	VzFrameType    frameType;           //!< The type of frame. See ::VzFrameType for more information.
+	VzPixelFormat  pixelFormat;         //!< The pixel format used by a frame. See ::VzPixelFormat for more information.
+	uint8_t*       pFrameData;          //!< A buffer containing the frame’s image data.
+	uint32_t       dataLen;             //!< The length of pFrame, in bytes.
+	float          exposureTime;        //!< The exposure time, in milliseconds.
+    uint8_t        depthRange;          //!< The depth range mode of the current frame. Used only for depth frames.
+	uint16_t       width;               //!< The width of the frame, in pixels.
+	uint16_t       height;              //!< The height of the frame, in pixels.
+    uint64_t       deviceTimestamp;     //!< The timestamp of the frame from the device.
+}VzFrame;
+
+typedef struct
+{
+	uint32_t depth : 1;
+	uint32_t ir : 1;
+	uint32_t color : 1;
+	uint32_t transformedColor : 1;
+	uint32_t transformedDepth : 1;
+	uint32_t confidence : 1;
+	uint32_t reserved : 26;
+}VzFrameReady;
+
+typedef void* VzDeviceHandle;
+
+typedef struct
+{
+	char productName[64];
+    char uri[256];
+	char alias[64];
+    char serialNumber[64];
+    char ip[17];
+	VzConnectStatus status;
+}VzDeviceInfo;
+
+typedef struct
+{
+	int threshold;//[0, 3],The larger the value is, the more obvious the filtering effect is and The smaller the point cloud wobble
+    bool enable;
+} VzTimeFilterParams;
+
+typedef struct
+{
+	int threshold;//[0, 100],The larger the value is, the more obvious the filtering effect is and the more points are filtered out
+    bool enable;
+} VzConfidenceFilterParams;
+
+typedef struct
+{
+    int	threshold;//[0, 49],The larger the value is, the more obvious the filtering effect is and the more points are filtered out
+    bool enable;
+} VzFlyingPixelFilterParams;
+
+typedef struct
+{
+    VzExposureControlMode mode;
+    int	exposureTime;              //When the control mode is AE,  exposureTime represents the maximum exposure time.
+                                   //When the control mode is Manual, exposureTime represents the current exposure time.
+} VzExposureTimeParams;
+
+
+
+/**
+ * @brief Error informations about the device
+ */
+typedef struct
+{
+    int errorCount;                     //The count of error messages, the maximum number is 10
+    char errorMessage[10][64];          //The maximum length of each error message is 64(contains '\0').
+} VzDeviceErrorInfo;
+
+typedef struct
+{	
+	uint16_t width;                      //[1,65535],The width of input signal.
+	uint16_t interval;                   //[34000,65535],The interval of input signal.
+	uint8_t polarity;                    //[0,1],0 for active low, 1 for active high.
+}VzInputSignalParamsForHWTrigger;        //Input signal parameters for Hardware Trigger.
+
+typedef struct
+{
+	uint16_t width;                      //[1,65535],The width of output signal.
+	uint16_t delay;                      //[0,65535],The delay time of output signal.
+	uint8_t polarity;                    //[0,1],0 for active low, 1 for active high.
+}VzOutputSignalParams;                   //Output signal parameters.
+
+#pragma pack (pop)
+
+/**
+* @brief hotplug status callback function
+* pInfo     return the info of the Device, See ::VzDeviceInfo
+* state     0:device added , 1:device removed
+* pUserData Pointer to user data, which can be null
+*/
+typedef void(*PtrHotPlugStatusCallback)(const VzDeviceInfo* pInfo, int state, void* pUserData);
+
+typedef void(*PtrUpgradeStatusCallback)(int status, int params, void* pUserData);
+
+#endif /* VZENSEDS_TYPES_H */

+ 13 - 0
NebulaSDK/x86_64/lib/Config/DS77CLite_0F.json

@@ -0,0 +1,13 @@
+{
+    "productName": "DS77CLite",
+    "connectType":"socket",
+    "colorSensor": [
+        {
+            "type": "gc2053",
+            "resolutionList": ["1600_1200","800_600", "640_480"]
+        }    ],
+    "toFSensor": [
+        {
+            "type": "sony_cw_2022"
+        }]
+}

+ 13 - 0
NebulaSDK/x86_64/lib/Config/DS77CPro_0E.json

@@ -0,0 +1,13 @@
+{
+    "productName": "DS77CPro",
+    "connectType":"socket",
+    "colorSensor": [
+        {
+            "type": "gc2053",
+            "resolutionList": ["1600_1200","800_600", "640_480"]
+        }    ],
+    "toFSensor": [
+        {
+            "type": "sony_cw_2022"
+        }]
+}

+ 8 - 0
NebulaSDK/x86_64/lib/Config/DS77Lite_11.json

@@ -0,0 +1,8 @@
+{
+    "productName": "DS77Lite",
+    "connectType":"socket",
+    "toFSensor": [
+        {
+            "type": "sony_cw_2022"
+        }]
+}

+ 8 - 0
NebulaSDK/x86_64/lib/Config/DS77Pro_10.json

@@ -0,0 +1,8 @@
+{
+    "productName": "DS77Pro",
+    "connectType":"socket",
+    "toFSensor": [
+        {
+            "type": "sony_cw_2022"
+        }]
+}

+ 13 - 0
NebulaSDK/x86_64/lib/Config/DS86_12.json

@@ -0,0 +1,13 @@
+{
+    "productName": "DS86",
+    "connectType":"socket",
+    "colorSensor": [
+    {
+      "type": "gc2053",
+      "resolutionList": [ "1600_1200", "800_600", "640_480" ]
+    }    ],
+    "toFSensor": [
+        {
+            "type": "sony_cw_2022"
+        }]
+}

+ 13 - 0
NebulaSDK/x86_64/lib/Config/DS87_13.json

@@ -0,0 +1,13 @@
+{
+    "productName": "DS87",
+    "connectType":"socket",
+    "colorSensor": [
+    {
+      "type": "gc2053",
+      "resolutionList": [ "1600_1200", "800_600", "640_480" ]
+    }    ],
+    "toFSensor": [
+        {
+            "type": "sony_cw_2022"
+        }]
+}

+ 183 - 0
detect/onnx/inference.cpp

@@ -0,0 +1,183 @@
+#include "inference.h"
+
+Inference::Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda)
+{
+    modelPath = onnxModelPath;
+    modelShape = modelInputShape;
+    classesPath = classesTxtFile;
+    cudaEnabled = runWithCuda;
+
+    loadOnnxNetwork();
+    // loadClassesFromFile(); The classes are hard-coded for this example
+}
+
+std::vector<Detection> Inference::runInference(const cv::Mat &input)
+{
+    cv::Mat modelInput = input;
+    if (letterBoxForSquare && modelShape.width == modelShape.height)
+        modelInput = formatToSquare(modelInput);
+
+    cv::Mat blob;
+    cv::dnn::blobFromImage(modelInput, blob, 1.0/255.0, modelShape, cv::Scalar(), true, false);
+    net.setInput(blob);
+
+    std::vector<cv::Mat> outputs;
+    net.forward(outputs, net.getUnconnectedOutLayersNames());
+
+    int rows = outputs[0].size[1];
+    int dimensions = outputs[0].size[2];
+
+    bool yolov8 = false;
+    // yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c])
+    // yolov8 has an output of shape (batchSize, 84,  8400) (Num classes + box[x,y,w,h])
+    if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8)
+    {
+        yolov8 = true;
+        rows = outputs[0].size[2];
+        dimensions = outputs[0].size[1];
+
+        outputs[0] = outputs[0].reshape(1, dimensions);
+        cv::transpose(outputs[0], outputs[0]);
+    }
+    float *data = (float *)outputs[0].data;
+
+    float x_factor = modelInput.cols / modelShape.width;
+    float y_factor = modelInput.rows / modelShape.height;
+
+    std::vector<int> class_ids;
+    std::vector<float> confidences;
+    std::vector<cv::Rect> boxes;
+
+    for (int i = 0; i < rows; ++i)
+    {
+        if (yolov8)
+        {
+            float *classes_scores = data+4;
+
+            cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
+            cv::Point class_id;
+            double maxClassScore;
+
+            minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);
+
+            if (maxClassScore > modelScoreThreshold)
+            {
+                confidences.push_back(maxClassScore);
+                class_ids.push_back(class_id.x);
+
+                float x = data[0];
+                float y = data[1];
+                float w = data[2];
+                float h = data[3];
+
+                int left = int((x - 0.5 * w) * x_factor);
+                int top = int((y - 0.5 * h) * y_factor);
+
+                int width = int(w * x_factor);
+                int height = int(h * y_factor);
+
+                boxes.push_back(cv::Rect(left, top, width, height));
+            }
+        }
+        else // yolov5
+        {
+            float confidence = data[4];
+
+            if (confidence >= modelConfidenceThreshold)
+            {
+                float *classes_scores = data+5;
+
+                cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);
+                cv::Point class_id;
+                double max_class_score;
+
+                minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
+
+                if (max_class_score > modelScoreThreshold)
+                {
+                    confidences.push_back(confidence);
+                    class_ids.push_back(class_id.x);
+
+                    float x = data[0];
+                    float y = data[1];
+                    float w = data[2];
+                    float h = data[3];
+
+                    int left = int((x - 0.5 * w) * x_factor);
+                    int top = int((y - 0.5 * h) * y_factor);
+
+                    int width = int(w * x_factor);
+                    int height = int(h * y_factor);
+
+                    boxes.push_back(cv::Rect(left, top, width, height));
+                }
+            }
+        }
+
+        data += dimensions;
+    }
+
+    std::vector<int> nms_result;
+    cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result);
+
+    std::vector<Detection> detections{};
+    for (unsigned long i = 0; i < nms_result.size(); ++i)
+    {
+        int idx = nms_result[i];
+
+        Detection result;
+        result.class_id = class_ids[idx];
+        result.confidence = confidences[idx];
+
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_int_distribution<int> dis(100, 255);
+        result.color = cv::Scalar(dis(gen),
+                                  dis(gen),
+                                  dis(gen));
+
+        result.className = classes[result.class_id];
+        result.box = boxes[idx];
+
+        detections.push_back(result);
+    }
+
+    return detections;
+}
+
+void Inference::loadClassesFromFile()
+{
+    std::ifstream inputFile(classesPath);
+    if (inputFile.is_open())
+    {
+        std::string classLine;
+        while (std::getline(inputFile, classLine))
+            classes.push_back(classLine);
+        inputFile.close();
+    }
+}
+
+void Inference::loadOnnxNetwork()
+{
+    net = cv::dnn::readNetFromONNX(modelPath);
+    if (cudaEnabled)
+    {
+        net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
+        net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
+    }
+    else
+    {
+        net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
+        net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
+    }
+}
+
+cv::Mat Inference::formatToSquare(const cv::Mat &source)
+{
+    int col = source.cols;
+    int row = source.rows;
+    int _max = MAX(col, row);
+    cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3);
+    source.copyTo(result(cv::Rect(0, 0, col, row)));
+    return result;
+}

+ 50 - 0
detect/onnx/inference.h

@@ -0,0 +1,50 @@
+#pragma once
+
+// Cpp native
+#include <fstream>
+#include <vector>
+#include <string>
+#include <random>
+
+// OpenCV / DNN / Inference
+#include <opencv2/imgproc.hpp>
+#include <opencv2/opencv.hpp>
+#include <opencv2/dnn.hpp>
+
+struct Detection
+{
+    int class_id{0};
+    std::string className{};
+    float confidence{0.0};
+    cv::Scalar color{};
+    cv::Rect box{};
+};
+
+class Inference
+{
+public:
+    Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape = {640, 640}, const std::string &classesTxtFile = "", const bool &runWithCuda = false);
+    std::vector<Detection> runInference(const cv::Mat &input);
+
+private:
+    void loadClassesFromFile();
+    void loadOnnxNetwork();
+    cv::Mat formatToSquare(const cv::Mat &source);
+
+    std::string modelPath{};
+    std::string classesPath{};
+    bool cudaEnabled{};
+
+    std::vector<std::string> classes{"car", "wheel"};
+
+    cv::Size2f modelShape{};
+
+    float modelConfidenceThreshold {0.25};
+    float modelScoreThreshold      {0.45};
+    float modelNMSThreshold        {0.50};
+
+    bool letterBoxForSquare = true;
+
+    cv::dnn::Net net;
+};
+

+ 52 - 0
detect/onnx/wheel-detector.cpp

@@ -0,0 +1,52 @@
+#include "wheel-detector.h"
+
+TensorrtWheelDetector::TensorrtWheelDetector(const std::string &model_file, const std::string &class_file){
+    yolov8_ = new Inference(model_file, cv::Size{640, 640}, class_file, false);
+}
+
+TensorrtWheelDetector::~TensorrtWheelDetector(){
+}
+
+bool TensorrtWheelDetector::detect(cv::Mat& img,std::vector<Object>& objs){
+    std::vector<Detection> rets = yolov8_->runInference(img);
+
+    for (auto &ret: rets) {
+        Object obj;
+       obj.rect = ret.box;
+        obj.label = ret.class_id;
+        obj.prob = ret.confidence;
+        objs.push_back(obj);
+    }
+    return true;
+}
+
+bool TensorrtWheelDetector::detect(cv::Mat& img,std::vector<Object>& objs,cv::Mat& res){
+    std::vector<Detection> rets = yolov8_->runInference(img);
+
+    for (auto &ret: rets) {
+        Object obj;
+        obj.rect = ret.box;
+        obj.label = ret.class_id;
+        obj.prob = ret.confidence;
+        obj.boxMask = img;
+        objs.push_back(obj);
+    }
+    return true;
+}
+
+std::vector<cv::Point> TensorrtWheelDetector::getPointsFromObj(const Object &obj){
+    std::vector<cv::Point> ret;
+
+    int x=int(obj.rect.x+0.5);
+    int y=int(obj.rect.y+0.5);
+    int width=int(obj.rect.width);
+    int height=int(obj.rect.height);
+
+    for(int i=0;i<height;++i){
+        for(int j=0;j<width;++j){
+            ret.emplace_back(x+j,y+i);
+        }
+    }
+
+    return ret;
+}

+ 29 - 0
detect/onnx/wheel-detector.h

@@ -0,0 +1,29 @@
+#pragma once
+
+#include "inference.h"
+
+class TensorrtWheelDetector{
+public:
+    struct Object {
+        cv::Rect_<float> rect;
+        int              label = 0;
+        float            prob  = 0.0;
+        cv::Mat          boxMask;
+    };
+
+    TensorrtWheelDetector(const std::string &model_file, const std::string &class_file);
+    ~TensorrtWheelDetector();
+
+    bool detect(cv::Mat& img,std::vector<Object>& objs);
+    bool detect(cv::Mat& img,std::vector<Object>& objs,cv::Mat& res);
+
+    static std::vector<cv::Point> getPointsFromObj(const Object &obj);
+
+private:
+    Inference*  yolov8_;
+    cv::Size     imgsz_;
+    int      seg_h_        = 120;
+    int      seg_w_        = 160;
+    int      seg_channels_ = 32;
+};
+

+ 142 - 0
detect/tensorrt/common.hpp

@@ -0,0 +1,142 @@
+//
+// Created by ubuntu on 3/16/23.
+//
+
+#ifndef JETSON_SEGMENT_COMMON_HPP
+#define JETSON_SEGMENT_COMMON_HPP
+#include "NvInfer.h"
+#include "opencv2/opencv.hpp"
+#include <sys/stat.h>
+#include <unistd.h>
+
+#define CHECK(call)                                                                                                    \
+    do {                                                                                                               \
+        const cudaError_t error_code = call;                                                                           \
+        if (error_code != cudaSuccess) {                                                                               \
+            printf("CUDA Error:\n");                                                                                   \
+            printf("    File:       %s\n", __FILE__);                                                                  \
+            printf("    Line:       %d\n", __LINE__);                                                                  \
+            printf("    Error code: %d\n", error_code);                                                                \
+            printf("    Error text: %s\n", cudaGetErrorString(error_code));                                            \
+            exit(1);                                                                                                   \
+        }                                                                                                              \
+    } while (0)
+
+class Logger: public nvinfer1::ILogger {
+public:
+    nvinfer1::ILogger::Severity reportableSeverity;
+
+    explicit Logger(nvinfer1::ILogger::Severity severity = nvinfer1::ILogger::Severity::kINFO):
+        reportableSeverity(severity)
+    {
+    }
+
+    void log(nvinfer1::ILogger::Severity severity, const char* msg) noexcept override
+    {
+        if (severity > reportableSeverity) {
+            return;
+        }
+        switch (severity) {
+            case nvinfer1::ILogger::Severity::kINTERNAL_ERROR:
+                std::cerr << "INTERNAL_ERROR: ";
+                break;
+            case nvinfer1::ILogger::Severity::kERROR:
+                std::cerr << "ERROR: ";
+                break;
+            case nvinfer1::ILogger::Severity::kWARNING:
+                std::cerr << "WARNING: ";
+                break;
+            case nvinfer1::ILogger::Severity::kINFO:
+                std::cerr << "INFO: ";
+                break;
+            default:
+                std::cerr << "VERBOSE: ";
+                break;
+        }
+        std::cerr << msg << std::endl;
+    }
+};
+
+inline int get_size_by_dims(const nvinfer1::Dims& dims)
+{
+    int size = 1;
+    for (int i = 0; i < dims.nbDims; i++) {
+        size *= dims.d[i];
+    }
+    return size;
+}
+
+inline int type_to_size(const nvinfer1::DataType& dataType)
+{
+    switch (dataType) {
+        case nvinfer1::DataType::kFLOAT:
+            return 4;
+        case nvinfer1::DataType::kHALF:
+            return 2;
+        case nvinfer1::DataType::kINT32:
+            return 4;
+        case nvinfer1::DataType::kINT8:
+            return 1;
+        case nvinfer1::DataType::kBOOL:
+            return 1;
+        default:
+            return 4;
+    }
+}
+
+inline static float clamp(float val, float min, float max)
+{
+    return val > min ? (val < max ? val : max) : min;
+}
+
+inline bool IsPathExist(const std::string& path)
+{
+    if (access(path.c_str(), 0) == F_OK) {
+        return true;
+    }
+    return false;
+}
+
+inline bool IsFile(const std::string& path)
+{
+    if (!IsPathExist(path)) {
+        printf("%s:%d %s not exist\n", __FILE__, __LINE__, path.c_str());
+        return false;
+    }
+    struct stat buffer;
+    return (stat(path.c_str(), &buffer) == 0 && S_ISREG(buffer.st_mode));
+}
+
+inline bool IsFolder(const std::string& path)
+{
+    if (!IsPathExist(path)) {
+        return false;
+    }
+    struct stat buffer;
+    return (stat(path.c_str(), &buffer) == 0 && S_ISDIR(buffer.st_mode));
+}
+
+namespace seg {
+struct Binding {
+    size_t         size  = 1;
+    size_t         dsize = 1;
+    nvinfer1::Dims dims;
+    std::string    name;
+};
+
+struct Object {
+    cv::Rect_<float> rect;
+    int              label = 0;
+    float            prob  = 0.0;
+    cv::Mat          boxMask;
+};
+
+struct PreParam {
+    float ratio  = 1.0f;
+    float dw     = 0.0f;
+    float dh     = 0.0f;
+    float height = 0;
+    float width  = 0;
+};
+}  // namespace seg
+#endif  // JETSON_SEGMENT_COMMON_HPP

+ 69 - 0
detect/tensorrt/wheel-detector.cpp

@@ -0,0 +1,69 @@
+#include "wheel-detector.h"
+
+TensorrtWheelDetector::TensorrtWheelDetector(const std::string &enginfile){
+    yolov8_=new YOLOv8_seg(enginfile);
+    yolov8_->make_pipe(false);
+    imgsz_=cv::Size{640, 480};
+    seg_h_        = 120;
+    seg_w_        = 160;
+    seg_channels_ = 32;
+}
+
+TensorrtWheelDetector::~TensorrtWheelDetector(){
+    if(yolov8_!=nullptr){
+        delete yolov8_;
+        yolov8_=nullptr;
+    }
+
+}
+
+bool TensorrtWheelDetector::detect(cv::Mat& img,std::vector<Object>& objs){
+    if(yolov8_==nullptr){
+        return false;
+    }
+    if(img.size()!=imgsz_){
+        printf("imgsz required [%d,%d],but input is [%d,%d]\n",imgsz_.height,imgsz_.width,img.rows,img.cols);
+        return false;
+    }
+    yolov8_->copy_from_Mat(img, imgsz_);
+    yolov8_->infer();
+    float score_thres=0.9;
+    float iou_thres=0.65;
+    int topk=10;
+    yolov8_->postprocess(objs, score_thres, iou_thres, topk, seg_channels_, seg_h_, seg_w_);
+    return true;
+
+}
+
+bool TensorrtWheelDetector::detect(cv::Mat& img,std::vector<Object>& objs,cv::Mat& res){
+    if(detect(img,objs))
+    {
+        const std::vector<std::string> classes={"none","wheel"};
+        const std::vector<std::vector<unsigned int>> colors = {{0, 114, 189},   {0, 255, 0}};
+        const std::vector<std::vector<unsigned int>> mask_colors = {{255, 56, 56},  {255, 0, 0}};
+        yolov8_->draw_objects(img, res, objs, classes, colors, mask_colors);
+        return true;
+    }else{
+        return false;
+    }
+}
+
+std::vector<cv::Point> TensorrtWheelDetector::getPointsFromObj(Object obj){
+    std::vector<cv::Point> ret;
+    int x=int(obj.rect.x+0.5);
+    int y=int(obj.rect.y+0.5);
+    int width=int(obj.rect.width);
+    int height=int(obj.rect.height);
+    
+    printf("mask type:%d\n",obj.boxMask.type());
+    for(int i=0;i<height;++i){
+        for(int j=0;j<width;++j){
+            //printf("%d    ",obj.boxMask.at<uchar>(i,j));
+            if(obj.boxMask.at<uchar>(i,j)!=0){
+                ret.push_back(cv::Point(x+j,y+i));
+            }
+        }
+    }
+    //printf("\n");
+    return ret;
+}

+ 25 - 0
detect/tensorrt/wheel-detector.h

@@ -0,0 +1,25 @@
+#ifndef WHELL_DETECTOR__HH___
+#define WWHELL_DETECTOR__HH___
+#include "yolov8-seg.h"
+
+class TensorrtWheelDetector{
+public:
+    TensorrtWheelDetector(const std::string &enginfile);
+    ~TensorrtWheelDetector();
+
+
+    bool detect(cv::Mat& img,std::vector<Object>& objs);
+    bool detect(cv::Mat& img,std::vector<Object>& objs,cv::Mat& res);
+
+    static std::vector<cv::Point> getPointsFromObj(Object obj);
+
+private:
+    YOLOv8_seg*  yolov8_;
+    cv::Size     imgsz_;
+    int      seg_h_        = 120;
+    int      seg_w_        = 160;
+    int      seg_channels_ = 32;
+
+};
+
+#endif

+ 319 - 0
detect/tensorrt/yolov8-seg.cpp

@@ -0,0 +1,319 @@
+//
+// Created by ubuntu on 3/16/23.
+//
+
+#include "yolov8-seg.h"
+
+
+using namespace seg;
+
+YOLOv8_seg::YOLOv8_seg(const std::string& engine_file_path)
+{
+    std::ifstream file(engine_file_path, std::ios::binary);
+    assert(file.good());
+    file.seekg(0, std::ios::end);
+    auto size = file.tellg();
+    file.seekg(0, std::ios::beg);
+    char* trtModelStream = new char[size];
+    assert(trtModelStream);
+    file.read(trtModelStream, size);
+    file.close();
+    initLibNvInferPlugins(&this->gLogger, "");
+    this->runtime = nvinfer1::createInferRuntime(this->gLogger);
+    assert(this->runtime != nullptr);
+
+    this->engine = this->runtime->deserializeCudaEngine(trtModelStream, size);
+    assert(this->engine != nullptr);
+    delete[] trtModelStream;
+    this->context = this->engine->createExecutionContext();
+
+    assert(this->context != nullptr);
+    cudaStreamCreate(&this->stream);
+    this->num_bindings = this->engine->getNbBindings();
+
+    for (int i = 0; i < this->num_bindings; ++i) {
+        Binding            binding;
+        nvinfer1::Dims     dims;
+        nvinfer1::DataType dtype = this->engine->getBindingDataType(i);
+        std::string        name  = this->engine->getBindingName(i);
+        binding.name             = name;
+        binding.dsize            = type_to_size(dtype);
+
+        bool IsInput = engine->bindingIsInput(i);
+        if (IsInput) {
+            this->num_inputs += 1;
+            dims         = this->engine->getProfileDimensions(i, 0, nvinfer1::OptProfileSelector::kMAX);
+            binding.size = get_size_by_dims(dims);
+            binding.dims = dims;
+            this->input_bindings.push_back(binding);
+            // set max opt shape
+            this->context->setBindingDimensions(i, dims);
+        }
+        else {
+            dims         = this->context->getBindingDimensions(i);
+            binding.size = get_size_by_dims(dims);
+            binding.dims = dims;
+            this->output_bindings.push_back(binding);
+            this->num_outputs += 1;
+        }
+        // printf("name: %s, size: %ld, dims: %d %d %d %d %d\n", 
+        //         name.c_str(), binding.dsize, dims.nbDims, dims.d[0], dims.d[1], dims.d[2], dims.d[3]);
+    }
+}
+
+YOLOv8_seg::~YOLOv8_seg()
+{
+    this->context->destroy();
+    this->engine->destroy();
+    this->runtime->destroy();
+    cudaStreamDestroy(this->stream);
+    for (auto& ptr : this->device_ptrs) {
+        CHECK(cudaFree(ptr));
+    }
+
+    for (auto& ptr : this->host_ptrs) {
+        CHECK(cudaFreeHost(ptr));
+    }
+}
+
+void YOLOv8_seg::make_pipe(bool warmup)
+{
+
+    for (auto& bindings : this->input_bindings) {
+        void* d_ptr;
+        CHECK(cudaMalloc(&d_ptr, bindings.size * bindings.dsize));
+        this->device_ptrs.push_back(d_ptr);
+    }
+
+    for (auto& bindings : this->output_bindings) {
+        void * d_ptr, *h_ptr;
+        size_t size = bindings.size * bindings.dsize;
+        CHECK(cudaMalloc(&d_ptr, size));
+        CHECK(cudaHostAlloc(&h_ptr, size, 0));
+        this->device_ptrs.push_back(d_ptr);
+        this->host_ptrs.push_back(h_ptr);
+    }
+
+    if (warmup) {
+        for (int i = 0; i < 10; i++) {
+            for (auto& bindings : this->input_bindings) {
+                size_t size  = bindings.size * bindings.dsize;
+                void*  h_ptr = malloc(size);
+                memset(h_ptr, 0, size);
+                CHECK(cudaMemcpyAsync(this->device_ptrs[0], h_ptr, size, cudaMemcpyHostToDevice, this->stream));
+                free(h_ptr);
+            }
+            this->infer();
+        }
+        printf("model warmup 10 times\n");
+    }
+}
+
+void YOLOv8_seg::letterbox(const cv::Mat& image, cv::Mat& out, cv::Size& size)
+{
+    const float inp_h  = size.height;
+    const float inp_w  = size.width;
+    float       height = image.rows;
+    float       width  = image.cols;
+
+    float r    = std::min(inp_h / height, inp_w / width);
+    int   padw = std::round(width * r);
+    int   padh = std::round(height * r);
+
+    cv::Mat tmp;
+    if ((int)width != padw || (int)height != padh) {
+        cv::resize(image, tmp, cv::Size(padw, padh));
+    }
+    else {
+        tmp = image.clone();
+    }
+
+    float dw = inp_w - padw;
+    float dh = inp_h - padh;
+
+    dw /= 2.0f;
+    dh /= 2.0f;
+    int top    = int(std::round(dh - 0.1f));
+    int bottom = int(std::round(dh + 0.1f));
+    int left   = int(std::round(dw - 0.1f));
+    int right  = int(std::round(dw + 0.1f));
+
+    cv::copyMakeBorder(tmp, tmp, top, bottom, left, right, cv::BORDER_CONSTANT, {114, 114, 114});
+
+    cv::dnn::blobFromImage(tmp, out, 1 / 255.f, cv::Size(), cv::Scalar(0, 0, 0), true, false, CV_32F);
+    this->pparam.ratio  = 1 / r;
+    this->pparam.dw     = dw;
+    this->pparam.dh     = dh;
+    this->pparam.height = height;
+    this->pparam.width  = width;
+}
+
+void YOLOv8_seg::copy_from_Mat(const cv::Mat& image)
+{
+    cv::Mat  nchw;
+    auto&    in_binding = this->input_bindings[0];
+    auto     width      = in_binding.dims.d[3];
+    auto     height     = in_binding.dims.d[2];
+    cv::Size size{width, height};
+    this->letterbox(image, nchw, size);
+
+    this->context->setBindingDimensions(0, nvinfer1::Dims{4, {1, 3, height, width}});
+
+    CHECK(cudaMemcpyAsync(
+        this->device_ptrs[0], nchw.ptr<float>(), nchw.total() * nchw.elemSize(), cudaMemcpyHostToDevice, this->stream));
+}
+
+void YOLOv8_seg::copy_from_Mat(const cv::Mat& image, cv::Size& size)
+{
+    cv::Mat nchw;
+    this->letterbox(image, nchw, size);
+    this->context->setBindingDimensions(0, nvinfer1::Dims{4, {1, 3, size.height, size.width}});
+    CHECK(cudaMemcpyAsync(
+        this->device_ptrs[0], nchw.ptr<float>(), nchw.total() * nchw.elemSize(), cudaMemcpyHostToDevice, this->stream));
+}
+
+void YOLOv8_seg::infer()
+{
+
+    this->context->enqueueV2(this->device_ptrs.data(), this->stream, nullptr);
+    for (int i = 0; i < this->num_outputs; i++) {
+        size_t osize = this->output_bindings[i].size * this->output_bindings[i].dsize;
+        CHECK(cudaMemcpyAsync(
+            this->host_ptrs[i], this->device_ptrs[i + this->num_inputs], osize, cudaMemcpyDeviceToHost, this->stream));
+    }
+    cudaStreamSynchronize(this->stream);
+}
+
+void YOLOv8_seg::postprocess(
+    std::vector<Object>& objs, float score_thres, float iou_thres, int topk, int seg_channels, int seg_h, int seg_w)
+{
+    objs.clear();
+    auto input_h      = this->input_bindings[0].dims.d[2];
+    auto input_w      = this->input_bindings[0].dims.d[3];
+    auto num_anchors  = this->output_bindings[0].dims.d[1];
+    auto num_channels = this->output_bindings[0].dims.d[2];
+
+    auto& dw     = this->pparam.dw;
+    auto& dh     = this->pparam.dh;
+    auto& width  = this->pparam.width;
+    auto& height = this->pparam.height;
+    auto& ratio  = this->pparam.ratio;
+
+    auto*   output = static_cast<float*>(this->host_ptrs[0]);
+    cv::Mat protos = cv::Mat(seg_channels, seg_h * seg_w, CV_32F, static_cast<float*>(this->host_ptrs[1]));
+
+    std::vector<int>      labels;
+    std::vector<float>    scores;
+    std::vector<cv::Rect> bboxes;
+    std::vector<cv::Mat>  mask_confs;
+    std::vector<int>      indices;
+
+    for (int i = 0; i < num_anchors; i++) {
+        float* ptr   = output + i * num_channels;
+        float  score = *(ptr + 4);
+		
+         /*if (score > score_thres) {
+             printf("num_channels: %d, score: %f\n", num_channels, score);
+         }*/
+	
+        if (score > score_thres) {
+            float x0 = *ptr++ - dw;
+            float y0 = *ptr++ - dh;
+            float x1 = *ptr++ - dw;
+            float y1 = *ptr++ - dh;
+
+            x0 = clamp(x0 * ratio, 0.f, width);
+            y0 = clamp(y0 * ratio, 0.f, height);
+            x1 = clamp(x1 * ratio, 0.f, width);
+            y1 = clamp(y1 * ratio, 0.f, height);
+
+            int     label     = *(++ptr);
+            cv::Mat mask_conf = cv::Mat(1, seg_channels, CV_32F, ++ptr);
+            mask_confs.push_back(mask_conf);
+            labels.push_back(label);
+            scores.push_back(score);
+            bboxes.push_back(cv::Rect_<float>(x0, y0, x1 - x0, y1 - y0));
+        }
+    }
+
+#if defined(BATCHED_NMS)
+    cv::dnn::NMSBoxesBatched(bboxes, scores, labels, score_thres, iou_thres, indices);
+#else
+    cv::dnn::NMSBoxes(bboxes, scores, score_thres, iou_thres, indices);
+#endif
+
+    cv::Mat masks;
+    int     cnt = 0;
+    for (auto& i : indices) {
+        if (cnt >= topk) {
+            break;
+        }
+        cv::Rect tmp = bboxes[i];
+        Object   obj;
+        obj.label = labels[i];
+        obj.rect  = tmp;
+        obj.prob  = scores[i];
+        masks.push_back(mask_confs[i]);
+        objs.push_back(obj);
+        cnt += 1;
+    }
+    if (masks.empty()) {
+        // masks is empty
+    }
+    else {
+        cv::Mat matmulRes = (masks * protos).t();
+        cv::Mat maskMat   = matmulRes.reshape(indices.size(), {seg_h, seg_w});
+
+        std::vector<cv::Mat> maskChannels;
+        cv::split(maskMat, maskChannels);
+        int scale_dw = dw / input_w * seg_w;
+        int scale_dh = dh / input_h * seg_h;
+
+        cv::Rect roi(scale_dw, scale_dh, seg_w - 2 * scale_dw, seg_h - 2 * scale_dh);
+
+        for (int i = 0; i < indices.size(); i++) {
+            cv::Mat dest, mask;
+            cv::exp(-maskChannels[i], dest);
+            dest = 1.0 / (1.0 + dest);
+            dest = dest(roi);
+            cv::resize(dest, mask, cv::Size((int)width, (int)height), cv::INTER_LINEAR);
+            objs[i].boxMask = mask(objs[i].rect) > 0.5f;
+        }
+    }
+}
+
+void YOLOv8_seg::draw_objects(const cv::Mat&                                image,
+                              cv::Mat&                                      res,
+                              const std::vector<Object>&                    objs,
+                              const std::vector<std::string>&               CLASS_NAMES,
+                              const std::vector<std::vector<unsigned int>>& COLORS,
+                              const std::vector<std::vector<unsigned int>>& MASK_COLORS)
+{
+    res          = image.clone();
+    cv::Mat mask = image.clone();
+    for (auto& obj : objs) {
+        int        idx   = obj.label;
+        cv::Scalar color = cv::Scalar(COLORS[idx][0], COLORS[idx][1], COLORS[idx][2]);
+        cv::Scalar mask_color =
+            cv::Scalar(MASK_COLORS[idx % 20][0], MASK_COLORS[idx % 20][1], MASK_COLORS[idx % 20][2]);
+        cv::rectangle(res, obj.rect, color, 2);
+
+        char text[256];
+        sprintf(text, "%s %.1f%%", CLASS_NAMES[idx].c_str(), obj.prob * 100);
+        mask(obj.rect).setTo(mask_color, obj.boxMask);
+
+        int      baseLine   = 0;
+        cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.4, 1, &baseLine);
+
+        int x = (int)obj.rect.x;
+        int y = (int)obj.rect.y + 1;
+
+        if (y > res.rows)
+            y = res.rows;
+
+        cv::rectangle(res, cv::Rect(x, y, label_size.width, label_size.height + baseLine), {0, 0, 255}, -1);
+
+        cv::putText(res, text, cv::Point(x, y + label_size.height), cv::FONT_HERSHEY_SIMPLEX, 0.4, {255, 255, 255}, 1);
+    }
+    cv::addWeighted(res, 0.5, mask, 0.8, 1, res);
+}

+ 53 - 0
detect/tensorrt/yolov8-seg.h

@@ -0,0 +1,53 @@
+//
+// Created by ubuntu on 3/16/23.
+//
+#ifndef JETSON_SEGMENT_YOLOV8_SEG_HPP
+#define JETSON_SEGMENT_YOLOV8_SEG_HPP
+#include "NvInferPlugin.h"
+#include "common.hpp"
+#include <fstream>
+
+using namespace seg;
+
+class YOLOv8_seg {
+public:
+    explicit YOLOv8_seg(const std::string& engine_file_path);
+    ~YOLOv8_seg();
+
+    void                 make_pipe(bool warmup = true);
+    void                 copy_from_Mat(const cv::Mat& image);
+    void                 copy_from_Mat(const cv::Mat& image, cv::Size& size);
+    void                 letterbox(const cv::Mat& image, cv::Mat& out, cv::Size& size);
+    void                 infer();
+    void                 postprocess(std::vector<Object>& objs,
+                                     float                score_thres  = 0.25f,
+                                     float                iou_thres    = 0.65f,
+                                     int                  topk         = 100,
+                                     int                  seg_channels = 32,
+                                     int                  seg_h        = 160,
+                                     int                  seg_w        = 160);
+    static void          draw_objects(const cv::Mat&                                image,
+                                      cv::Mat&                                      res,
+                                      const std::vector<Object>&                    objs,
+                                      const std::vector<std::string>&               CLASS_NAMES,
+                                      const std::vector<std::vector<unsigned int>>& COLORS,
+                                      const std::vector<std::vector<unsigned int>>& MASK_COLORS);
+    int                  num_bindings;
+    int                  num_inputs  = 0;
+    int                  num_outputs = 0;
+    std::vector<Binding> input_bindings;
+    std::vector<Binding> output_bindings;
+    std::vector<void*>   host_ptrs;
+    std::vector<void*>   device_ptrs;
+
+    PreParam pparam;
+
+private:
+    nvinfer1::ICudaEngine*       engine  = nullptr;
+    nvinfer1::IRuntime*          runtime = nullptr;
+    nvinfer1::IExecutionContext* context = nullptr;
+    cudaStream_t                 stream  = nullptr;
+    Logger                       gLogger{nvinfer1::ILogger::Severity::kERROR};
+};
+
+#endif  // JETSON_SEGMENT_YOLOV8_SEG_HPP