Sfoglia il codice sorgente

1、完善消息格式;

LiuZe 1 anno fa
parent
commit
13db2c6540

+ 0 - 0
Modules/CompareMeasure/XM-Tof&Lidar/compare.py


+ 4 - 12
Modules/Tof3D/main.py

@@ -1,8 +1,8 @@
-import glog, signal
-from tofManager import *
+import glog, signal, os, json
 import SDK.ZX.async_communication as cm
 import SDK.ZX.async_communication_etc_pb2 as rce
 import SDK.ZX.tool as zx_tool
+import receive
 
 
 if __name__ == '__main__':
@@ -20,24 +20,16 @@ if __name__ == '__main__':
     # 初始化
     g_rabbitmq.Init(None, statu_ex_keys)
     g_rabbitmq.start()
-    tofs = TofManager()
-    tofs.searchAllCamera()
-    etc = tofs.getAllCameraEtc()
-    tofs.openAllCamera()
-    tofs.startAllCameraStream()
 
     # 绑定rabbitmq
-    g_rabbitmq.bind_statu_callback(statu_ex_keys[0][0], statu_ex_keys[0][1], tofs.exec)
+    g_rabbitmq.bind_statu_callback(statu_ex_keys[0][0], statu_ex_keys[0][1], receive.ReceivedTofData)
+    g_rabbitmq.bind_statu_callback(statu_ex_keys[1][0], statu_ex_keys[1][1], receive.ReceivedLidarData)
 
     while True:
         message = input("input q or Q exit this progress\n")
         if message == "q" or message == "Q":
             break
 
-    # 释放
-    tofs.stopAllCameraStream()
-    tofs.closeAllCamera()
-
     # 强制结束进程
     pid = os.getpid()
     os.kill(pid, 1)

+ 16 - 0
Modules/CompareMeasure/XM-Tof&Lidar/etc/rabbitmq.json

@@ -0,0 +1,16 @@
+{
+    "ip": "10.211.32.200",
+    "port": 5672,
+    "user": "zx",
+    "password": "123456",
+    "binds": [
+        {
+            "ex": "statu_ex",
+            "route_key": "vzense_tof3d_lite_3219_measure_port"
+        },
+        {
+            "ex": "statu_ex",
+            "route_key": "measure_3219_statu_port"
+        }
+    ]
+}

+ 14 - 0
Modules/CompareMeasure/XM-Tof&Lidar/receive.py

@@ -0,0 +1,14 @@
+import json, glog
+
+def ReceivedLidarData(req_data):
+    try:
+        glog.info(req_data.statu.measure_info_to_plc_forward)
+    except ValueError as e:
+        glog.error(req_data.statu)
+
+
+def ReceivedTofData(req_data):
+    try:
+        glog.info(req_data.statu)
+    except ValueError as e:
+        glog.error(req_data.statu)

+ 0 - 0
Modules/DetectPeople/README.md


+ 40 - 0
Modules/Rename/rename.py

@@ -0,0 +1,40 @@
+import os
+import re
+
+index = 0
+
+def get_files_in_directory(path):
+    files = []
+    for root, dirs, filenames in os.walk(path):
+        for filename in filenames:
+            files.append(os.path.join(root, filename))
+    return files
+
+
+def rename_files(path, dir_name, file_pattern):
+    # 获取目录下所有文件
+    files = get_files_in_directory(path)
+
+    # 创建目标目录
+    output_dir = os.path.join(path, dir_name)
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+
+    # 遍历文件
+    for file in files:
+        # 匹配文件名
+        if file_pattern in file:
+            global index
+            index += 1
+            file_extension = os.path.splitext(file)[1]
+            new_file = file_pattern + "_" + str(index) + file_extension
+            # print(os.path.join(path, file), "to", os.path.join(output_dir, new_file), "\n")
+            # continue
+            os.rename(os.path.join(path, file), os.path.join(output_dir, new_file))
+
+
+# 示例:遍历目录及其子目录中的所有相同名称的文件,并将它们放入一个目录下,按照数字从小到大的顺序重命名
+path = "/home/zx/Tof_3D/saveImage"
+dir_name = "/home/zx/Tof_3D/AllImage"
+file_pattern = "Depth"
+rename_files(path, dir_name, file_pattern)

+ 6 - 0
Modules/Tof3D/etc/command.json

@@ -0,0 +1,6 @@
+{
+    "id": 0,
+    "path": "save/",
+    "depth_name": "Depth",
+    "point_cloud_name": "PointCloud"
+}

+ 3 - 2
Modules/Tof3D/etc/example.txt

@@ -1,2 +1,3 @@
-{"func": "getCameraFrame", "params": {"id":0, "path": "/home/zx/", "depth_name": "111"}}
-{"func": "loopGetCameraFrame", "params": {"id":0, "path": "/home/zx/saveImage/", "times":10, "interval":0}}
+{"func": "getCameraFrame", "params": {"id":0, "path": "/home/zx/", "depth_name": "111", "point_cloud_name": "cloud"}}
+{"func": "loopGetCameraFrame", "params": {"id":0, "path": "/home/zx/saveImage/zl-back-wheels-near/", "times":30, "interval":1}}
+{"func": "stepGetCameraFrame", "params": {"id":0, "path": "/home/zx/saveImage/1/"}}

+ 4 - 0
Modules/Tof3D/rabbitmqSendMessage.py

@@ -2,6 +2,8 @@ import SDK.ZX.async_communication as cm
 import SDK.ZX.async_communication_etc_pb2 as rce
 import SDK.ZX.tool as zx_tool
 import os
+from rabbitmq_ui import *
+
 
 if __name__ == '__main__':
     # 加载配置
@@ -19,6 +21,8 @@ if __name__ == '__main__':
     g_rabbitmq.Init(None, statu_ex_keys)
     g_rabbitmq.start()
 
+    # rabbit_ui = RabbitmqUI()
+    # rabbit_ui.run()
     while True:
         message = input("Input message: ")
         if message == "Q" or message == "q":

+ 49 - 0
Modules/Tof3D/tof3d.py

@@ -0,0 +1,49 @@
+import glog, signal
+from tofManager import *
+import SDK.ZX.async_communication as cm
+import SDK.ZX.async_communication_etc_pb2 as rce
+import SDK.ZX.tool as zx_tool
+
+
+if __name__ == '__main__':
+    # 加载配置
+    rabbitmq_etc = zx_tool.getProtobufJsonConfig(os.path.dirname(os.path.abspath(__file__)) + "/etc/rabbitmq.json", rce.RabbitmqEtc())
+    print(rabbitmq_etc)
+    g_rabbitmq = cm.RabbitAsyncCommunicator(rabbitmq_etc.ip, rabbitmq_etc.port,
+                                            rabbitmq_etc.user, rabbitmq_etc.password)
+    statu_ex_keys = []
+    for bind in rabbitmq_etc.binds:
+        key = [bind.ex, bind.route_key]
+        statu_ex_keys.append(key)
+
+    glog.info(statu_ex_keys)
+    # 初始化
+    g_rabbitmq.Init(None, statu_ex_keys)
+    g_rabbitmq.start()
+    tofs = TofManager()
+    tofs.searchAllCamera()
+    etc = tofs.getAllCameraEtc()
+    tofs.openAllCamera()
+    tofs.startAllCameraStream()
+
+    # 绑定rabbitmq
+    g_rabbitmq.bind_statu_callback(statu_ex_keys[0][0], statu_ex_keys[0][1], tofs.exec)
+    #
+    # while True:
+    #     message = input("input q or Q exit this progress\n")
+    #     if message == "q" or message == "Q":
+    #         break
+    fileName = os.path.dirname(os.path.abspath(__file__)) + "/etc/command.json"
+    with open(fileName, 'r') as jsonFile:
+        json_etc = json.load(jsonFile)
+        print(json_etc)
+        tofs.getCameraFrameCloud(json_etc)
+
+    # 释放
+    tofs.stopAllCameraStream()
+    tofs.closeAllCamera()
+
+    # 强制结束进程
+    pid = os.getpid()
+    os.kill(pid, 1)
+

+ 0 - 64
Modules/Tof3D/tofDevice.py

@@ -1,64 +0,0 @@
-import SDK.Vzense.VzenseDS_api as tof
-import etc_pb2 as etc
-import time
-from ctypes import *
-
-
-class Tof3DDevice:
-    def __init__(self, tof_etc=etc.VzenseTofDevices()):
-        self.tof_etc = tof_etc
-        print(self.tof_etc.ipv4)
-        camera = tof.VzenseTofCam()
-        camera_count = camera.VZ_GetDeviceCount()
-        retry_count = 100
-        while camera_count == 0 and retry_count > 0:
-            retry_count = retry_count - 1
-            camera_count = camera.VZ_GetDeviceCount()
-            time.sleep(1)
-            print("scaning......   ", retry_count)
-
-        device_info = tof.VzDeviceInfo()
-
-        if camera_count > 1:
-            ret, device_infolist = camera.VZ_GetDeviceInfoList(camera_count)
-            if ret == 0:
-                device_info = device_infolist[0]
-                for info in device_infolist:
-                    print('cam uri:  ' + str(info.uri))
-            else:
-                print(' failed:' + ret)
-                exit()
-        elif camera_count == 1:
-            ret, device_info = camera.VZ_GetDeviceInfo()
-            if ret == 0:
-                print('cam uri:' + str(device_info.uri))
-            else:
-                print(' failed:' + ret)
-                exit()
-        else:
-            print("there are no camera found")
-            exit()
-
-        if tof.VzConnectStatus.Connected.value != device_info.status:
-            print("connect statu:", device_info.status)
-            print("Call VZ_OpenDeviceByUri with connect status :", tof.VzConnectStatus.Connected.value)
-            exit()
-        else:
-            print("uri: " + str(device_info.uri))
-            print("alias: " + str(device_info.alias))
-            print("connectStatus: " + str(device_info.status))
-
-        print(self.tof_etc.ipv4)
-        # ret = camera.VZ_OpenDeviceByIP(b'192.168.2.101')
-        ret = camera.VZ_OpenDeviceByIP(self.tof_etc.ipv4.encode())
-
-        if ret == 0:
-            print("open device successful")
-        else:
-            print('VZ_OpenDeviceByUri failed: ' + str(ret))
-
-        ret = camera.VZ_StartStream()
-        if ret == 0:
-            print("start stream successful")
-        else:
-            print("VZ_StartStream failed:", ret)

+ 84 - 6
Modules/Tof3D/tofManager.py

@@ -1,7 +1,7 @@
 import etc_pb2 as etc
 import os, glog, numpy, cv2, SDK.ZX.tool, json, time
-from tofDevice import *
 import SDK.Vzense.VzenseDS_api as tof
+from ctypes import *
 
 
 class TofManager:
@@ -32,6 +32,15 @@ class TofManager:
         filename = path + name
         cv2.imwrite(filename, frametmp, [cv2.IMWRITE_JPEG_QUALITY, 100])
 
+    def savePointCloud(self, width, height, path, name, pointlist):
+        file = open(path + name, "w")
+
+        for i in range(width * height):
+            if pointlist[i].z != 0 and pointlist[i].z != 65535:
+                file.write("{0},{1},{2}\n".format(pointlist[i].x, pointlist[i].y, pointlist[i].z))
+
+        file.close()
+
     def __init__(self):
         self.etc_file = os.path.dirname(os.path.abspath(__file__)) + "/etc/device.json"
         self.tofs_etc = {}
@@ -39,6 +48,8 @@ class TofManager:
         self.camera = tof.VzenseTofCam()
         self.camera_list = {}
         self.search_flag = False
+        self.stepPath = ""
+        self.stepIndex = 0
         self.liscenFunc = [["getCameraFrame", self.getCameraFrame],
                            ["updateTofsEtc", self.updateTofsEtc],
                            ["getCameraEtc", self.getCameraEtc],
@@ -54,8 +65,8 @@ class TofManager:
                            ["stopCamera", self.stopCamera],
                            ["stopAllCameraStream", self.stopAllCameraStream],
                            ["getCameraFrame", self.getCameraFrame],
-                           ["test", self.test],
-                           ["loopGetCameraFrame", self.loopGetCameraFrame]
+                           ["loopGetCameraFrame", self.loopGetCameraFrame],
+                           ["stepGetCameraFrame", self.stepGetCameraFrame]
                            ]
 
         # self.t1 = threading(target=func, args=('第一个线程', 1))
@@ -74,8 +85,7 @@ class TofManager:
                 if json_data["func"] == bind_func[0] and type(json_data["params"]) == dict:
                     bind_func[1](json_data["params"])
                     return
-                glog.warning(json_data["params"])
-                    # {"func": "test", "params": {"id": 1}}
+            glog.warning(json_data["params"])
         except ValueError as e:
             glog.error(req_data.statu)
 
@@ -221,7 +231,7 @@ class TofManager:
                 glog.info(self.tofs_etc[id].ipv4 + ' VZ_StopStream failed: ' + str(ret))
 
     def getCameraFrame(self, receive_data=""):
-        id, path, depth_name, ir_name = 0, "", "", ""
+        id, path, depth_name, ir_name, point_cloud_name = 0, "", "", "", ""
         if "id" not in receive_data or "path" not in receive_data:
             return
         id = receive_data["id"]
@@ -230,6 +240,8 @@ class TofManager:
             depth_name = receive_data["depth_name"]
         if "ir_name" in receive_data:
             ir_name = receive_data["ir_name"]
+        if "point_cloud_name" in receive_data:
+            point_cloud_name = receive_data["point_cloud_name"]
 
         if id == "" or path == "":
             return
@@ -245,6 +257,13 @@ class TofManager:
                 if ret == 0:
                     self.saveDepthFrame2Image(path, depth_name + ".jpg", depthframe)
                     glog.info(self.tofs_etc[id].ipv4 + "  depth frameindex: " + str(depthframe.frameIndex))
+                    if point_cloud_name != "":
+                        ret, pointlist = self.camera_list[id].VZ_ConvertDepthFrameToPointCloudVector(depthframe)
+                        if ret == 0:
+                            self.savePointCloud(depthframe.width, depthframe.height, path, point_cloud_name + ".txt",
+                                                pointlist)
+                        else:
+                            glog.warning("VZ_ConvertDepthFrameToPointCloudVector error %d", ret)
                 else:
                     glog.warning("VZ_GetFrame error %d", ret)
             if frameready.ir and ir_name != "":
@@ -267,10 +286,69 @@ class TofManager:
         while times:
             receive_data["depth_name"] = "Depth_" + str(receive_data["times"] - times + 1)
             receive_data["ir_name"] = "Ir_" + str(receive_data["times"] - times + 1)
+            receive_data["point_cloud_name"] = "cloud_" + str(receive_data["times"] - times + 1)
             glog.info(receive_data)
             self.getCameraFrame(receive_data)
             times = times - 1
             time.sleep(interval)
 
+    def stepGetCameraFrame(self, receive_data=""):
+        if "path" not in receive_data or "id" not in receive_data:
+            return
+        if receive_data["path"] != self.stepPath:
+            self.stepIndex = 0
+            self.stepPath = receive_data["path"]
+        receive_data["depth_name"] = "Depth_" + str(self.stepIndex)
+        receive_data["ir_name"] = "Ir_" + str(self.stepIndex)
+        receive_data["point_cloud_name"] = "cloud_" + str(self.stepIndex)
+        self.getCameraFrame(receive_data)
+        self.stepIndex = self.stepIndex + 1
+
+    def getCameraFrameCloud(self, receive_data=""):
+        id, path, depth_name, point_cloud_name = 0, "", "", ""
+        if "id" not in receive_data or "path" not in receive_data:
+            return
+        id = receive_data["id"]
+        path = receive_data["path"]
+        if "depth_name" in receive_data:
+            depth_name = receive_data["depth_name"]
+        if "point_cloud_name" in receive_data:
+            point_cloud_name = receive_data["point_cloud_name"]
+
+        if id == "" or path == "":
+            return
+
+        if id in self.camera_list:
+            ret, frameready = self.camera_list[id].VZ_GetFrameReady(c_uint16(1000))
+            if ret != 0:
+                glog.error("VZ_GetFrameReady failed: %d", ret)
+                return
+
+            if frameready.depth and depth_name != "":
+                ret, depthframe = self.camera_list[id].VZ_GetFrame(tof.VzFrameType.VzDepthFrame)
+                if ret == 0:
+                    self.saveDepthFrame2Image(path, depth_name + ".jpg", depthframe)
+                    glog.info(self.tofs_etc[id].ipv4 + "  depth frameindex: " + str(depthframe.frameIndex))
+                    pointList = []
+                    if point_cloud_name != "":
+                        for h in range(190, 290):
+                            offset = h * depthframe.width
+                            for w in range(270, 370):
+                                depth = tof.VzDepthVector3()
+                                depth.depthX = w
+                                depth.depthY = h
+                                depth.depthZ = depthframe.pFrameData[offset + w]
+                                ret, point = self.camera_list[id].VZ_ConvertDepthToPointCloud(depth)
+                                pointList.append(point)
+                                # depthList.append(pt)
+                        # ret, pointlist = self.camera_list[id].VZ_ConvertDepthFrameToPointCloudVector(depthframe)
+                        if ret == 0:
+                            self.savePointCloud(depthframe.width, depthframe.height, path, point_cloud_name + ".txt",
+                                                pointList)
+                        else:
+                            glog.warning("VZ_ConvertDepthFrameToPointCloudVector error %d", ret)
+                else:
+                    glog.warning("VZ_GetFrame error %d", ret)
+
     def setCameraEtc(self, ip, tof_etc=etc.VzenseTofDevices()):
         glog.info("=======================")

+ 179 - 0
Modules/moreSpecimens/more_specimens.py

@@ -0,0 +1,179 @@
+from copy import deepcopy
+
+import glog
+import os, cv2, json
+
+import numpy as np
+
+
+def traverse_folder(folder_path, file_list, end=".jpg", all_floder = False):
+    for item in os.listdir(folder_path):
+        item_path = os.path.join(folder_path, item)
+        if os.path.isfile(item_path):
+            if item_path.endswith(".jpg") or item_path.endswith(".png"):
+                file_list.append(item_path)
+        elif os.path.isdir(item_path):
+            if all_floder is True:
+                traverse_folder(item_path, file_list)
+
+
+def mergeNewImage(img0, etc0, img1, etc1, img2, etc2, img3, etc3):
+    img = np.zeros((960, 1280, 3), dtype=np.uint8)
+    img[0: 480, 0: 640, 0:3] = img0
+    img[0: 480, 640: 1280, :] = img1
+    img[480: 960, 0: 640, :] = img2
+    img[480: 960, 640: 1280, :] = img3
+
+    if not etc1 is None:
+        for point in etc1["points"]:
+            point[0] = point[0] + 640
+    if not etc2 is None:
+        for point in etc2["points"]:
+            point[1] = point[1] + 480
+    if not etc3 is None:
+        for point in etc3["points"]:
+            point[0] = point[0] + 640
+            point[1] = point[1] + 480
+    return img, etc0, etc1, etc2, etc3
+
+
+def convertImg(file, output_path):
+    etc = {}
+    ext = os.path.splitext(file)  # 将文件名路径与后缀名分开
+    file_name = ext[0].split("/")[-1]
+    save_file = output_path + file_name
+    json_file = ext[0] + ".json"
+    if os.path.isfile(json_file):
+        f = open(json_file, 'r')
+        content = f.read()
+        etc = json.loads(content)
+    else:
+        return
+
+    img = cv2.imread(file)
+    img_lf = img[0: 480, 0: 640, 0:]
+    img_rf = img[0: 480, 640: 1280, :]
+    img_lr = img[480: 960, 0: 640, :]
+    img_rr = img[480: 960, 640: 1280, :]
+
+    etc_sig_old = [None, None, None, None]
+    for points in etc['shapes']:
+        index = int(points["points"][0][0] // 640) | int(int(points["points"][0][1] // 480) << 1)
+        for point in points["points"]:
+            point[0] = point[0] % 640
+            point[1] = point[1] % 480
+        etc_sig_old[index] = points
+
+    etc['shapes'].clear()
+    etc_sig = deepcopy(etc_sig_old)
+    etc_sig_last = [None, None, None, None]
+    merge_img, etc_sig_last[0], etc_sig_last[1], etc_sig_last[2], etc_sig_last[3] = mergeNewImage(img_rf, etc_sig[1],
+                                                                                                  img_lf, etc_sig[0],
+                                                                                                  img_lr, etc_sig[2],
+                                                                                                  img_rr, etc_sig[3])
+    for last_etc in etc_sig_last:
+        if not last_etc is None:
+            etc['shapes'].append(last_etc)
+    etc['imagePath'] = file_name + "_convert_test_a.jpg"
+    cv2.imwrite(save_file + "_convert_test_a.jpg", merge_img)
+    with open(save_file + "_convert_test_a.json", "w") as w_f:
+        json.dump(etc, w_f, indent=4)
+
+    etc['shapes'].clear()
+    etc_sig = deepcopy(etc_sig_old)
+    etc_sig_last = [None, None, None, None]
+    merge_img, etc_sig_last[0], etc_sig_last[1], etc_sig_last[2], etc_sig_last[3] = mergeNewImage(img_lr, etc_sig[2],
+                                                                                                  img_rf, etc_sig[1],
+                                                                                                  img_lf, etc_sig[0],
+                                                                                                  img_rr, etc_sig[3])
+    for last_etc in etc_sig_last:
+        if not last_etc is None:
+            etc['shapes'].append(last_etc)
+    etc['imagePath'] = file_name + "_convert_test_b.jpg"
+    cv2.imwrite(save_file + "_convert_test_b.jpg", merge_img)
+    with open(save_file + "_convert_test_b.json", "w") as w_f:
+        json.dump(etc, w_f, indent=4)
+
+    etc['shapes'].clear()
+    etc_sig = deepcopy(etc_sig_old)
+    etc_sig_last = [None, None, None, None]
+    merge_img, etc_sig_last[0], etc_sig_last[1], etc_sig_last[2], etc_sig_last[3] = mergeNewImage(img_rr, etc_sig[3],
+                                                                                                  img_rf, etc_sig[1],
+                                                                                                  img_lr, etc_sig[2],
+                                                                                                  img_lf, etc_sig[0])
+    for last_etc in etc_sig_last:
+        if not last_etc is None:
+            etc['shapes'].append(last_etc)
+    etc['imagePath'] = file_name + "_convert_test_c.jpg"
+    cv2.imwrite(save_file + "_convert_test_c.jpg", merge_img)
+    with open(save_file + "_convert_test_c.json", "w") as w_f:
+        json.dump(etc, w_f, indent=4)
+
+    etc['shapes'].clear()
+    etc_sig = deepcopy(etc_sig_old)
+    etc_sig_last = [None, None, None, None]
+    merge_img, etc_sig_last[0], etc_sig_last[1], etc_sig_last[2], etc_sig_last[3] = mergeNewImage(img_lf, etc_sig[0],
+                                                                                                  img_lr, etc_sig[2],
+                                                                                                  img_rf, etc_sig[1],
+                                                                                                  img_rr, etc_sig[3])
+    for last_etc in etc_sig_last:
+        if not last_etc is None:
+            etc['shapes'].append(last_etc)
+    etc['imagePath'] = file_name + "_convert_test_d.jpg"
+    cv2.imwrite(save_file + "_convert_test_d.jpg", merge_img)
+    with open(save_file + "_convert_test_d.json", "w") as w_f:
+        json.dump(etc, w_f, indent=4)
+
+    etc['shapes'].clear()
+    etc_sig = deepcopy(etc_sig_old)
+    etc_sig_last = [None, None, None, None]
+    merge_img, etc_sig_last[0], etc_sig_last[1], etc_sig_last[2], etc_sig_last[3] = mergeNewImage(img_lf, etc_sig[0],
+                                                                                                  img_rr, etc_sig[3],
+                                                                                                  img_lr, etc_sig[2],
+                                                                                                  img_rf, etc_sig[1])
+    for last_etc in etc_sig_last:
+        if not last_etc is None:
+            etc['shapes'].append(last_etc)
+    etc['imagePath'] = file_name + "_convert_test_e.jpg"
+    cv2.imwrite(save_file + "_convert_test_e.jpg", merge_img)
+    with open(save_file + "_convert_test_e.json", "w") as w_f:
+        json.dump(etc, w_f, indent=4)
+
+    etc['shapes'].clear()
+    etc_sig = deepcopy(etc_sig_old)
+    etc_sig_last = [None, None, None, None]
+    merge_img, etc_sig_last[0], etc_sig_last[1], etc_sig_last[2], etc_sig_last[3] = mergeNewImage(img_lf, etc_sig[0],
+                                                                                                  img_rf, etc_sig[1],
+                                                                                                  img_rr, etc_sig[3],
+                                                                                                  img_lr, etc_sig[2])
+    for last_etc in etc_sig_last:
+        if not last_etc is None:
+            etc['shapes'].append(last_etc)
+    etc['imagePath'] = file_name + "_convert_test_f.jpg"
+    cv2.imwrite(save_file + "_convert_test_f.jpg", merge_img)
+    with open(save_file + "_convert_test_f.json", "w") as w_f:
+        json.dump(etc, w_f, indent=4)
+
+
+if __name__ == '__main__':
+    path = "/home/zx/DataAcquisition-Python/Modules/moreSpecimens/image"
+    output_path = path + "/output/"
+    if not os.path.exists(output_path):
+        os.mkdir(output_path)
+    file_list = []
+    # 加载配置
+    if os.path.isdir(path):
+        traverse_folder(path, file_list)
+    else:
+        print("it's a special file(socket,FIFO,device file)")
+        exit(0)
+
+    for file in file_list:
+        print(file)
+        if file.endswith(".png"):
+            mat = cv2.imread(file)
+            # os.remove(file)
+            ext = os.path.splitext(file)
+            file = ext[0] + ".jpg"
+            cv2.imwrite(file, mat)
+        convertImg(file, output_path)

+ 36 - 0
Modules/roration_euler/raration2euler.py

@@ -0,0 +1,36 @@
+import math
+import numpy as np
+
+# inputRotation = [
+#     [0, 0, 0, 0],
+#     [0, 0, 0, 0],
+#     [0, 0, 0, 0],
+#     [0, 0, 0, 0]
+# ]
+num = "-0.639065146446 0.767805159092 -0.045507796109 -0.792347908020 -0.768585383892 -0.639746963978 -0.000546242518 1.655516386032 -0.029532883316 0.034627545625 0.998963832855 -0.625453591347 0.000000000000 0.000000000000 0.000000000000 1.000000000000"
+nums = num.split(" ")
+
+t = []
+for n in nums:
+    t.append(float(n))
+inputRotation = np.array(t).reshape((4, 4))
+# inputRotation = [[-0.639065146446, 0.767805159092, -0.045507796109, -0.792347908020],
+# [-0.768585383892, -0.639746963978, -0.000546242518, 1.655516386032],
+# [-0.029532883316, 0.034627545625, 0.998963832855, -0.625453591347],
+# [0.000000000000, 0.000000000000, 0.000000000000, 1.000000000000]]
+
+if __name__ == "__main__":
+    x = inputRotation[0][3]
+    y = inputRotation[1][3]
+    z = inputRotation[2][3]
+    yaw = -math.asin(inputRotation[2][0])
+    roll = math.atan2(inputRotation[2][1]/math.cos(yaw), inputRotation[2][2]/math.cos(yaw))
+    pitch = math.atan2(inputRotation[1][0]/math.cos(yaw), inputRotation[0][0]/math.cos(yaw))
+
+    theta_z = math.degrees(yaw)
+    theta_x = math.degrees(roll)
+    theta_y = math.degrees(pitch)
+
+    print("{0:.3f}".format(x), "{0:.3f}".format(y), "{0:.3f}".format(z))
+    print("{0:.3f}".format(theta_x), "{0:.3f}".format(theta_y), "{0:.3f}".format(theta_z))
+

BIN
SDK/Vzense/Ubuntu18.04/Doc/VZense_NebulaSDK开发者指南_Linux_cn.pdf


+ 137 - 119
SDK/Vzense/VzenseDS_api.py

@@ -1,12 +1,17 @@
+import math
+
 from SDK.Vzense.VzenseDS_define import *
 import platform
 import os
 import ctypes
 from ctypes import *
-gCallbackFuncList=[]
+
+gCallbackFuncList = []
+
 
 class VzenseTofCam():
     device_handle = c_void_p(0)
+
     def __init__(self):
         system_ = platform.system().lower()
         machine_ = platform.machine().lower()
@@ -21,15 +26,15 @@ class VzenseTofCam():
                 system_info = os_info.version
                 # print('version:',system_info)
                 if system_info.find('18.04') != -1 or system_info.find('20.04') != -1:
-                    libpath = os.path.dirname(os.path.abspath(__file__))+"/Ubuntu18.04/Lib/libNebula_api.so"
+                    libpath = os.path.dirname(os.path.abspath(__file__)) + "/Ubuntu18.04/Lib/libNebula_api.so"
                     # print("load lib from ", libpath)
                     self.vz_cam_lib = cdll.LoadLibrary(libpath)
                 else:
-                    libpath = os.path.dirname(os.path.abspath(__file__))+"/Ubuntu16.04/Lib/libNebula_api.so"
+                    libpath = os.path.dirname(os.path.abspath(__file__)) + "/Ubuntu16.04/Lib/libNebula_api.so"
                     # print(libpath)
                     self.vz_cam_lib = cdll.LoadLibrary(libpath)
             elif machine_ == 'aarch64':
-                libpath = os.path.dirname(os.path.abspath(__file__))+"/AArch64/Lib/libNebula_api.so"
+                libpath = os.path.dirname(os.path.abspath(__file__)) + "/AArch64/Lib/libNebula_api.so"
                 # print(libpath)
                 self.vz_cam_lib = cdll.LoadLibrary(libpath)
             else:
@@ -38,11 +43,11 @@ class VzenseTofCam():
         elif platform.system() == 'Windows':
             if machine_ == 'amd64':
                 if architecture_ == '64bit':
-                    libpath = os.path.dirname(os.path.abspath(__file__))+"/Windows/Bin/x64/Nebula_api.dll"
+                    libpath = os.path.dirname(os.path.abspath(__file__)) + "/Windows/Bin/x64/Nebula_api.dll"
                     # print(libpath)
                     self.vz_cam_lib = cdll.LoadLibrary(libpath)
                 else:
-                    libpath = os.path.dirname(os.path.abspath(__file__))+"/Windows/Bin/x86/Nebula_api.dll"
+                    libpath = os.path.dirname(os.path.abspath(__file__)) + "/Windows/Bin/x86/Nebula_api.dll"
                     # print(libpath)
                     self.vz_cam_lib = cdll.LoadLibrary(libpath)
             else:
@@ -51,214 +56,227 @@ class VzenseTofCam():
         else:
             # print('do not supported OS', system_, machine_)
             exit()
-            
+
         self.device_handle = c_void_p(0)
         self.vz_cam_lib.VZ_Initialize()
-        
+
     def __del__(self):
         self.vz_cam_lib.VZ_Shutdown()
-    
-    def VZ_GetSDKVersion(self): 
+
+    def VZ_GetSDKVersion(self):
         tmp = c_char * 64
         version = tmp()
-        return self.vz_cam_lib.VZ_GetSDKVersion(version),version.value
- 
+        return self.vz_cam_lib.VZ_GetSDKVersion(version), version.value
+
     def VZ_GetDeviceCount(self):
         count = c_int()
         self.vz_cam_lib.VZ_GetDeviceCount(byref(count))
         return count.value
-    
-    def VZ_GetDeviceInfoList(self, cam_count = 1):
-        tmp  = VzDeviceInfo* cam_count
-        device_infolist = tmp() 
-        return self.vz_cam_lib.VZ_GetDeviceInfoList(cam_count, device_infolist),device_infolist
-    
-    def VZ_GetDeviceInfo(self, cam_index = 0):
+
+    def VZ_GetDeviceInfoList(self, cam_count=1):
+        tmp = VzDeviceInfo * cam_count
+        device_infolist = tmp()
+        return self.vz_cam_lib.VZ_GetDeviceInfoList(cam_count, device_infolist), device_infolist
+
+    def VZ_GetDeviceInfo(self, cam_index=0):
         device_info = VzDeviceInfo()
-        return self.vz_cam_lib.VZ_GetDeviceInfo(cam_index,byref(device_info)), device_info
-         
-    def VZ_OpenDeviceByUri(self,  uri=c_char_p()):
+        return self.vz_cam_lib.VZ_GetDeviceInfo(cam_index, byref(device_info)), device_info
+
+    def VZ_OpenDeviceByUri(self, uri=c_char_p()):
         if uri:
             return self.vz_cam_lib.VZ_OpenDeviceByUri(uri, byref(self.device_handle))
         else:
             return VzReturnStatus.VzRetInputPointerIsNull
-    
-    def VZ_OpenDeviceByAlias(self,  alias=c_char_p()):
+
+    def VZ_OpenDeviceByAlias(self, alias=c_char_p()):
         if alias:
             return self.vz_cam_lib.VZ_OpenDeviceByAlias(alias, byref(self.device_handle))
         else:
             return VzReturnStatus.VzRetInputPointerIsNull
-    
-    def VZ_OpenDeviceByIP(self,  ip=c_char_p()):
+
+    def VZ_OpenDeviceByIP(self, ip=c_char_p()):
         if ip:
             return self.vz_cam_lib.VZ_OpenDeviceByIP(ip, byref(self.device_handle))
         else:
             return VzReturnStatus.VzRetInputPointerIsNull, 0
-        
+
     def VZ_CloseDevice(self):
         return self.vz_cam_lib.VZ_CloseDevice(byref(self.device_handle))
 
     def VZ_StartStream(self):
         return self.vz_cam_lib.VZ_StartStream(self.device_handle)
-         
+
     def VZ_StopStream(self):
         return self.vz_cam_lib.VZ_StopStream(self.device_handle)
-         
-    def VZ_GetFrameReady(self,waitTime = c_uint16(33)):
+
+    def VZ_GetFrameReady(self, waitTime=c_uint16(33)):
         frameready = VzFrameReady()
         return self.vz_cam_lib.VZ_GetFrameReady(self.device_handle, waitTime, byref(frameready)), frameready
 
-    def VZ_GetFrame(self,  frametype = VzFrameType.VzDepthFrame):   
-        Vzframe = VzFrame() 
+    def VZ_GetFrame(self, frametype=VzFrameType.VzDepthFrame):
+        Vzframe = VzFrame()
         return self.vz_cam_lib.VZ_GetFrame(self.device_handle, frametype.value, byref(Vzframe)), Vzframe
-       
-    def VZ_SetWorkMode(self,  mode = VzWorkMode.ActiveMode):
-        return self.vz_cam_lib.VZ_SetWorkMode(self.device_handle, mode.value) 
-               
+
+    def VZ_SetWorkMode(self, mode=VzWorkMode.ActiveMode):
+        return self.vz_cam_lib.VZ_SetWorkMode(self.device_handle, mode.value)
+
     def VZ_GetWorkMode(self):
         mode = VzWorkMode(0)
         return self.vz_cam_lib.VZ_GetWorkMode(self.device_handle, byref(mode)), mode
 
     def VZ_SetSoftwareSlaveTrigger(self):
         return self.vz_cam_lib.VZ_SetSoftwareSlaveTrigger(self.device_handle)
-    
+
     def VZ_RebootDevie(self):
         return self.vz_cam_lib.VZ_RebootDevie(self.device_handle)
-    
-    def VZ_GetSensorIntrinsicParameters(self, sensorType = VzSensorType.VzToFSensor):
+
+    def VZ_GetSensorIntrinsicParameters(self, sensorType=VzSensorType.VzToFSensor):
         CameraParameters = VzSensorIntrinsicParameters()
-        return self.vz_cam_lib.VZ_GetSensorIntrinsicParameters(self.device_handle, sensorType.value, byref(CameraParameters)), CameraParameters
+        return self.vz_cam_lib.VZ_GetSensorIntrinsicParameters(self.device_handle, sensorType.value,
+                                                               byref(CameraParameters)), CameraParameters
 
     def VZ_GetSensorExtrinsicParameters(self):
         CameraExtrinsicParameters = VzSensorExtrinsicParameters()
-        return self.vz_cam_lib.VZ_GetSensorExtrinsicParameters(self.device_handle, byref(CameraExtrinsicParameters)), CameraExtrinsicParameters
-    
-    def VZ_GetFirmwareVersion(self): 
+        return self.vz_cam_lib.VZ_GetSensorExtrinsicParameters(self.device_handle, byref(
+            CameraExtrinsicParameters)), CameraExtrinsicParameters
+
+    def VZ_GetFirmwareVersion(self):
         tmp = c_char * 64
         fw = tmp()
-        return self.vz_cam_lib.VZ_GetFirmwareVersion(self.device_handle, fw, 63),fw.value
+        return self.vz_cam_lib.VZ_GetFirmwareVersion(self.device_handle, fw, 63), fw.value
 
     def VZ_GetDeviceMACAddress(self):
         tmp = c_char * 18
         mac = tmp()
         return self.vz_cam_lib.VZ_GetDeviceMACAddress(self.device_handle, mac), mac.value
 
-    def VZ_SetIRGMMGain(self, gmmgain = c_uint8(20)):
-        return self.vz_cam_lib.VZ_SetIRGMMGain(self.device_handle, gmmgain) 
-     
+    def VZ_SetIRGMMGain(self, gmmgain=c_uint8(20)):
+        return self.vz_cam_lib.VZ_SetIRGMMGain(self.device_handle, gmmgain)
+
     def VZ_GetIRGMMGain(self):
         gmmgain = c_uint8(1)
         return self.vz_cam_lib.VZ_GetIRGMMGain(self.device_handle, byref(gmmgain)), gmmgain.value
 
-    def VZ_SetColorPixelFormat(self,pixelFormat=VzPixelFormat.VzPixelFormatBGR888):
+    def VZ_SetColorPixelFormat(self, pixelFormat=VzPixelFormat.VzPixelFormatBGR888):
         return self.vz_cam_lib.VZ_SetColorPixelFormat(self.device_handle, pixelFormat)
 
-    def VZ_SetColorResolution(self, w = c_int32(1600), h = c_int32(1200)):
-        return self.vz_cam_lib.VZ_SetColorResolution(self.device_handle, w, h) 
-     
+    def VZ_SetColorResolution(self, w=c_int32(1600), h=c_int32(1200)):
+        return self.vz_cam_lib.VZ_SetColorResolution(self.device_handle, w, h)
+
     def VZ_GetColorResolution(self):
         w = c_int32(1600)
         h = c_int32(1200)
         return self.vz_cam_lib.VZ_GetColorResolution(self.device_handle, byref(w), byref(h)), w, h
-        
-    def VZ_SetFrameRate(self, value = c_uint8(30)):
-        return self.vz_cam_lib.VZ_SetFrameRate(self.device_handle, value) 
-     
+
+    def VZ_SetFrameRate(self, value=c_uint8(30)):
+        return self.vz_cam_lib.VZ_SetFrameRate(self.device_handle, value)
+
     def VZ_GetFrameRate(self):
         value = c_uint8(1)
         return self.vz_cam_lib.VZ_GetFrameRate(self.device_handle, byref(value)), value.value
 
-    def VZ_SetExposureControlMode(self, sensorType = VzSensorType.VzToFSensor, mode = VzExposureControlMode.VzExposureControlMode_Manual):
-        return self.vz_cam_lib.VZ_SetExposureControlMode(self.device_handle, sensorType.value, mode.value) 
-    
-    def VZ_SetExposureTime(self, sensorType = VzSensorType.VzToFSensor, params = c_int32(0)):
-        ExposureTime = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Manual.value,params) 
-        return self.vz_cam_lib.VZ_SetExposureTime(self.device_handle, sensorType.value, ExposureTime) 
-     
-    def VZ_GetExposureTime(self, sensorType = VzSensorType.VzToFSensor):
-        params = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Manual.value,0)
+    def VZ_SetExposureControlMode(self, sensorType=VzSensorType.VzToFSensor,
+                                  mode=VzExposureControlMode.VzExposureControlMode_Manual):
+        return self.vz_cam_lib.VZ_SetExposureControlMode(self.device_handle, sensorType.value, mode.value)
+
+    def VZ_SetExposureTime(self, sensorType=VzSensorType.VzToFSensor, params=c_int32(0)):
+        ExposureTime = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Manual.value, params)
+        return self.vz_cam_lib.VZ_SetExposureTime(self.device_handle, sensorType.value, ExposureTime)
+
+    def VZ_GetExposureTime(self, sensorType=VzSensorType.VzToFSensor):
+        params = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Manual.value, 0)
         return self.vz_cam_lib.VZ_GetExposureTime(self.device_handle, sensorType.value, byref(params)), params
- 
-    def VZ_SetTimeFilterParams(self, params = VzTimeFilterParams()): 
+
+    def VZ_SetTimeFilterParams(self, params=VzTimeFilterParams()):
         return self.vz_cam_lib.VZ_SetTimeFilterParams(self.device_handle, params)
-    
-    def VZ_GetTimeFilterParams(self): 
+
+    def VZ_GetTimeFilterParams(self):
         params = VzTimeFilterParams()
-        return self.vz_cam_lib.VZ_GetTimeFilterParams(self.device_handle, byref(params)),params
+        return self.vz_cam_lib.VZ_GetTimeFilterParams(self.device_handle, byref(params)), params
 
-    def VZ_SetConfidenceFilterParams(self, params = VzConfidenceFilterParams()): 
+    def VZ_SetConfidenceFilterParams(self, params=VzConfidenceFilterParams()):
         return self.vz_cam_lib.VZ_SetConfidenceFilterParams(self.device_handle, params)
-    
-    def VZ_GetConfidenceFilterParams(self): 
+
+    def VZ_GetConfidenceFilterParams(self):
         params = VzConfidenceFilterParams()
-        return self.vz_cam_lib.VZ_GetConfidenceFilterParams(self.device_handle, byref(params)),params
-    
-    def VZ_SetFlyingPixelFilterParams(self, params = VzFlyingPixelFilterParams()): 
+        return self.vz_cam_lib.VZ_GetConfidenceFilterParams(self.device_handle, byref(params)), params
+
+    def VZ_SetFlyingPixelFilterParams(self, params=VzFlyingPixelFilterParams()):
         return self.vz_cam_lib.VZ_SetFlyingPixelFilterParams(self.device_handle, params)
-    
-    def VZ_GetFlyingPixelFilterParams(self): 
+
+    def VZ_GetFlyingPixelFilterParams(self):
         params = VzFlyingPixelFilterParams()
-        return self.vz_cam_lib.VZ_GetFlyingPixelFilterParams(self.device_handle, byref(params)),params
+        return self.vz_cam_lib.VZ_GetFlyingPixelFilterParams(self.device_handle, byref(params)), params
 
-    def VZ_SetFillHoleFilterEnabled(self, enable = c_bool(True)): 
+    def VZ_SetFillHoleFilterEnabled(self, enable=c_bool(True)):
         return self.vz_cam_lib.VZ_SetFillHoleFilterEnabled(self.device_handle, enable)
-    
-    def VZ_GetFillHoleFilterEnabled(self): 
+
+    def VZ_GetFillHoleFilterEnabled(self):
         enable = c_bool(True)
-        return self.vz_cam_lib.VZ_GetFillHoleFilterEnabled(self.device_handle, byref(enable)),enable.value
+        return self.vz_cam_lib.VZ_GetFillHoleFilterEnabled(self.device_handle, byref(enable)), enable.value
 
-    def VZ_SetSpatialFilterEnabled(self, enable = c_bool(True)): 
+    def VZ_SetSpatialFilterEnabled(self, enable=c_bool(True)):
         return self.vz_cam_lib.VZ_SetSpatialFilterEnabled(self.device_handle, enable)
-    
-    def VZ_GetSpatialFilterEnabled(self): 
+
+    def VZ_GetSpatialFilterEnabled(self):
         enable = c_bool(True)
-        return self.vz_cam_lib.VZ_GetSpatialFilterEnabled(self.device_handle, byref(enable)),enable.value
+        return self.vz_cam_lib.VZ_GetSpatialFilterEnabled(self.device_handle, byref(enable)), enable.value
+
+    def VZ_SetTransformColorImgToDepthSensorEnabled(self, enabled=c_bool(True)):
+        return self.vz_cam_lib.VZ_SetTransformColorImgToDepthSensorEnabled(self.device_handle, enabled)
 
-    def VZ_SetTransformColorImgToDepthSensorEnabled(self, enabled = c_bool(True)): 
-        return self.vz_cam_lib.VZ_SetTransformColorImgToDepthSensorEnabled(self.device_handle,  enabled)
-    
-    def VZ_GetTransformColorImgToDepthSensorEnabled(self): 
+    def VZ_GetTransformColorImgToDepthSensorEnabled(self):
         enabled = c_bool(True)
-        return self.vz_cam_lib.VZ_GetTransformColorImgToDepthSensorEnabled(self.device_handle,  byref(enabled)),enabled
+        return self.vz_cam_lib.VZ_GetTransformColorImgToDepthSensorEnabled(self.device_handle, byref(enabled)), enabled
+
+    def VZ_SetTransformDepthImgToColorSensorEnabled(self, enabled=c_bool(True)):
+        return self.vz_cam_lib.VZ_SetTransformDepthImgToColorSensorEnabled(self.device_handle, enabled)
 
-    def VZ_SetTransformDepthImgToColorSensorEnabled(self, enabled = c_bool(True)): 
-        return self.vz_cam_lib.VZ_SetTransformDepthImgToColorSensorEnabled(self.device_handle,  enabled)
-    
-    def VZ_GetTransformDepthImgToColorSensorEnabled(self): 
+    def VZ_GetTransformDepthImgToColorSensorEnabled(self):
         enabled = c_bool(True)
-        return self.vz_cam_lib.VZ_GetTransformDepthImgToColorSensorEnabled(self.device_handle,  byref(enabled)),enabled
+        return self.vz_cam_lib.VZ_GetTransformDepthImgToColorSensorEnabled(self.device_handle, byref(enabled)), enabled
 
-    def VZ_ConvertDepthFrameToPointCloudVector(self, depthFrame = VzFrame()): 
-        len = depthFrame.width*depthFrame.height
-        tmp =VzVector3f*len
+    def VZ_ConvertDepthFrameToPointCloudVector(self, depthFrame=VzFrame()):
+        len = depthFrame.width * depthFrame.height
+        tmp = VzVector3f * len
         pointlist = tmp()
-        return self.vz_cam_lib.VZ_ConvertDepthFrameToPointCloudVector(self.device_handle, byref(depthFrame) ,pointlist),pointlist
- 
-    def VZ_SetHotPlugStatusCallback(self,callbackfunc= c_void_p): 
-        callbackFunc_= ctypes.CFUNCTYPE(c_void_p,POINTER(VzDeviceInfo),c_int32)(callbackfunc)    
+        return self.vz_cam_lib.VZ_ConvertDepthFrameToPointCloudVector(self.device_handle, byref(depthFrame),
+                                                                      pointlist), pointlist
+
+    def VZ_ConvertDepthToPointCloud(self, depth = VzDepthVector3()):
+        point = VzVector3f()
+        ret, params = self.VZ_GetSensorIntrinsicParameters()
+        ret = self.vz_cam_lib.VZ_ConvertDepthToPointCloud(self.device_handle, id(depth), id(point), 1, params)
+
+        print("-------------------------")
+        return ret, point
+
+    def VZ_SetHotPlugStatusCallback(self, callbackfunc=c_void_p):
+        callbackFunc_ = ctypes.CFUNCTYPE(c_void_p, POINTER(VzDeviceInfo), c_int32)(callbackfunc)
         gCallbackFuncList.append(callbackFunc_)
         return self.vz_cam_lib.VZ_SetHotPlugStatusCallback(callbackFunc_)
 
-    def VZ_GetManaulMaxExposureTime(self):     
+    def VZ_GetManaulMaxExposureTime(self):
         tmp = r"Py_ToFExposureTimeMax"
-        propertyKey = (c_char * 100)(*bytes(tmp, 'utf-8')) 
-        maxExposureTime = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Manual.value) 
-        return self.vz_cam_lib.VZ_GetProperty(self.device_handle,   byref(propertyKey),  byref(maxExposureTime),8),maxExposureTime.exposureTime
+        propertyKey = (c_char * 100)(*bytes(tmp, 'utf-8'))
+        maxExposureTime = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Manual.value)
+        return self.vz_cam_lib.VZ_GetProperty(self.device_handle, byref(propertyKey), byref(maxExposureTime),
+                                              8), maxExposureTime.exposureTime
 
     def VZ_SetParamsByJson(self, imgpath):
         pimgpath = (c_char * 1000)(*bytes(imgpath, 'utf-8'))
-        return self.vz_cam_lib.VZ_SetParamsByJson(self.device_handle,  byref(pimgpath))
+        return self.vz_cam_lib.VZ_SetParamsByJson(self.device_handle, byref(pimgpath))
 
-    def VZ_SetColorGain(self, params = c_float(1.0)):
-        return self.vz_cam_lib.VZ_SetColorGain(self.device_handle,  params)
+    def VZ_SetColorGain(self, params=c_float(1.0)):
+        return self.vz_cam_lib.VZ_SetColorGain(self.device_handle, params)
 
     def VZ_GetColorGain(self):
-        tmp = c_float*1
+        tmp = c_float * 1
         params = tmp()
-        return self.vz_cam_lib.VZ_GetColorGain(self.device_handle,  params), params
+        return self.vz_cam_lib.VZ_GetColorGain(self.device_handle, params), params
 
-    def VZ_GetProperty(self, propertyKey=c_char_p(), pData= c_void_p, dataSize = c_int32(8)):
-        return self.vz_cam_lib.VZ_GetProperty(self.device_handle,  propertyKey, pData, dataSize),pData
+    def VZ_GetProperty(self, propertyKey=c_char_p(), pData=c_void_p, dataSize=c_int32(8)):
+        return self.vz_cam_lib.VZ_GetProperty(self.device_handle, propertyKey, pData, dataSize), pData
 
     def VZ_GetAutoMaxExposureTime(self):
         tmp = r"Py_RGBExposureTimeMax"
@@ -274,10 +292,10 @@ class VzenseTofCam():
         return self.vz_cam_lib.VZ_GetProperty(self.device_handle, byref(propertyKey), byref(maxExposureTime),
                                               8), maxExposureTime.exposureTime
 
-    def VZ_SetAutoExposureTime(self, sensorType = VzSensorType.VzColorSensor, params = c_int32(0)):
-        ExposureTime = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Auto.value,params)
+    def VZ_SetAutoExposureTime(self, sensorType=VzSensorType.VzColorSensor, params=c_int32(0)):
+        ExposureTime = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Auto.value, params)
         return self.vz_cam_lib.VZ_SetExposureTime(self.device_handle, sensorType.value, ExposureTime)
 
-    def VZ_GetAutoExposureTime(self, sensorType = VzSensorType.VzColorSensor):
-        params = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Auto.value,0)
-        return self.vz_cam_lib.VZ_GetExposureTime(self.device_handle, sensorType.value, byref(params)), params
+    def VZ_GetAutoExposureTime(self, sensorType=VzSensorType.VzColorSensor):
+        params = VzExposureTimeParams(VzExposureControlMode.VzExposureControlMode_Auto.value, 0)
+        return self.vz_cam_lib.VZ_GetExposureTime(self.device_handle, sensorType.value, byref(params)), params

+ 4 - 1
setup.py

@@ -46,7 +46,10 @@ setup(
         # zx sdk
         'glog',
         'aio-pika >= 9.0.0',
-        'protobuf == 4.23.4'
+        'protobuf == 4.23.4',
+        # 工具
+        'pyqt5',
+        'PyQt5-tools'
     ],
 
     # setup.py 本身要依赖的包,这通常是为一些setuptools的插件准备的配置,这里列出的包,不会自动安装。