Swift - 将 TrueDepth 点云数据导出到 .PLY 和 SceneKit

问题描述 投票:0回答:1

因此,我尝试转换 Apple 的 TrueDepth Camera 的流式深度数据 示例给出的深度数据。问题是,当我尝试将深度和颜色数据导出为 .PLY 格式时,结果我在“之前”下得到了这个 3D 模型。我应该如何使它看起来像“之后”下的模型?

这是我为视频+深度处理编写的代码片段,修改并添加了用于调试/导出的新功能。

创建PointCloudGeometry 并导出GeometryAsPLY

struct ExternalData {
    static var renderingEnabled = true
    static var isSavingFileAsPLY = false
    static var exportPLYData: Data?
    static var pointCloudGeometry: SCNGeometry?
    
    // Function to convert depth and color data into a point cloud geometry
    static func createPointCloudGeometry(depthData: AVDepthData, colorData: UnsafePointer<UInt8>, width: Int, height: Int, bytesPerRow: Int) -> SCNGeometry {
        var vertices: [SCNVector3] = []
        var colors: [UIColor] = []
        
        let convertedDepthData = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
        let depthDataMap = convertedDepthData.depthDataMap
        CVPixelBufferLockBaseAddress(depthDataMap, .readOnly)
        defer { CVPixelBufferUnlockBaseAddress(depthDataMap, .readOnly) }
        
        for y in 0..<height {
            for x in 0..<width {
                let depthOffset = y * CVPixelBufferGetBytesPerRow(depthDataMap) + x * MemoryLayout<Float32>.size
                let depthPointer = CVPixelBufferGetBaseAddress(depthDataMap)!.advanced(by: depthOffset).assumingMemoryBound(to: Float32.self)
                let depth = depthPointer.pointee
                
                // Scale and offset the depth as needed to fit your scene
                let vertex = SCNVector3(x: Float(x), y: Float(y), z: Float(depth))
                
                vertices.append(vertex)
                
                let colorOffset = y * bytesPerRow + x * 4 // Assuming BGRA format
                let bComponent = Double(colorData[colorOffset]) / 255.0
                let gComponent = Double(colorData[colorOffset + 1]) / 255.0
                let rComponent = Double(colorData[colorOffset + 2]) / 255.0
                let aComponent = Double(colorData[colorOffset + 3]) / 255.0
                
                let color = UIColor(red: CGFloat(rComponent), green: CGFloat(gComponent), blue: CGFloat(bComponent), alpha: CGFloat(aComponent))
                colors.append(color)
            }
        }
        
        // Create the geometry source for vertices
        let vertexSource = SCNGeometrySource(vertices: vertices)
        
        // Assuming the UIColor's data is not properly formatted for the SCNGeometrySource
        // Instead, create an array of normalized float values representing the color data
        
        var colorComponents: [CGFloat] = []
        
        var counter = 0
        
        for y in 0..<height {
            for x in 0..<width {
                let colorOffset = y * bytesPerRow + x * 4 // Assuming BGRA format
                let bComponent = CGFloat(colorData[colorOffset]) / 255.0
                let gComponent = CGFloat(colorData[colorOffset + 1]) / 255.0
                let rComponent = CGFloat(colorData[colorOffset + 2]) / 255.0
                let aComponent = CGFloat(colorData[colorOffset + 3]) / 255.0
                
                print("Converting \(counter)th point: \([rComponent, gComponent, bComponent, aComponent])")
                LogManager.shared.log("Converting \(counter)th point: \([rComponent, gComponent, bComponent, aComponent])")
                
                // Append color components in RGBA order, which is typically used in SceneKit
                colorComponents += [rComponent, gComponent, bComponent, aComponent]
                
                counter += 1
            }
        }
        
        let colorData = Data(buffer: UnsafeBufferPointer(start: &colorComponents, count: colorComponents.count))
        let colorSource = SCNGeometrySource(data: colorData,
                                            semantic: .color,
                                            vectorCount: vertices.count,
                                            usesFloatComponents: true,
                                            componentsPerVector: 4,
                                            bytesPerComponent: MemoryLayout<CGFloat>.size,
                                            dataOffset: 0,
                                            dataStride: MemoryLayout<CGFloat>.stride * 4)
        
        // Create the geometry element
        let indices: [Int32] = Array(0..<Int32(vertices.count))
        let indexData = Data(bytes: indices, count: indices.count * MemoryLayout<Int32>.size)
        let element = SCNGeometryElement(data: indexData,
                                         primitiveType: .point,
                                         primitiveCount: vertices.count,
                                         bytesPerIndex: MemoryLayout<Int32>.size)
        
        // Create the point cloud geometry
        pointCloudGeometry = SCNGeometry(sources: [vertexSource, colorSource], elements: [element])
        
        // Set the shader modifier to change the point size
        let pointSize: CGFloat = 5.0 // Adjust the point size as necessary
        let shaderModifier = """
            #pragma transparent
            #pragma body
            gl_PointSize = \(pointSize);
        """
        pointCloudGeometry!.shaderModifiers = [.geometry: shaderModifier]
        
        // Set the lighting model to constant to ensure the points are fully lit
        pointCloudGeometry!.firstMaterial?.lightingModel = .constant
        
        // Set additional material properties as needed, for example, to make the points more visible
        pointCloudGeometry!.firstMaterial?.isDoubleSided = true
        
        print("Done constructing the 3D object!")
        LogManager.shared.log("Done constructing the 3D object!")
        
        return pointCloudGeometry!
    }
    
    static func exportGeometryAsPLY(to url: URL) {
        guard let geometry = pointCloudGeometry,
              let vertexSource = geometry.sources.first(where: { $0.semantic == .vertex }),
              let colorSource = geometry.sources.first(where: { $0.semantic == .color }) else {
            print("Unable to access vertex or color source from geometry")
            return
        }
        
        // Access vertex data
        guard let vertexData: Data? = vertexSource.data else {
            print("Unable to access vertex data")
            return
        }
        
        // Access color data
        guard let colorData: Data? = colorSource.data else {
            print("Unable to access color data")
            return
        }
        
        let vertexCount = vertexSource.vectorCount
        let colorStride = colorSource.dataStride / MemoryLayout<CGFloat>.size
        let vertices = vertexData!.toArray(type: SCNVector3.self, count: vertexCount)
        let colors = colorData!.toArray(type: CGFloat.self, count: vertexCount * colorStride)
        
        var plyString = "ply\n"
        plyString += "format ascii 1.0\n"
        plyString += "element vertex \(vertexCount)\n"
        plyString += "property float x\n"
        plyString += "property float y\n"
        plyString += "property float z\n"
        plyString += "property uchar red\n"
        plyString += "property uchar green\n"
        plyString += "property uchar blue\n"
        plyString += "property uchar alpha\n"
        plyString += "end_header\n"
        
        for i in 0..<vertexCount {
            let vertex = vertices[i]
            let colorIndex = i * colorStride
            
            // Ensure the index is within the bounds of the colors array
            guard colorIndex + 3 < colors.count else {
                print("Color data index out of range for vertex \(i).")
                continue
            }
            
            let color: [UInt8] = (0..<4).compactMap { i -> UInt8? in
                let index = colorIndex + i
                guard index < colors.count else {
                    return nil
                }
                return UInt8(colors[index] * 255)
            }
            
            // Only proceed if we have all four color components
            guard color.count == 4 else {
                print("Incomplete color data for vertex \(i).")
                continue
            }
            
            plyString += "\(vertex.x) \(vertex.y) \(vertex.z) \(color[0]) \(color[1]) \(color[2]) \(color[3])\n"
        }
        
        do {
            try plyString.write(to: url, atomically: true, encoding: .ascii)
            print("PLY file was successfully saved to: \(url.path)")
        } catch {
            print("Failed to write PLY file: \(error)")
        }
    }
}

printDepthData 和 dataOutputSynchronizer

 func printDepthData(depthData: AVDepthData, imageData: CVImageBuffer) {
        let depthPixelBuffer = depthData.depthDataMap
        let colorPixelBuffer = imageData
        
        CVPixelBufferLockBaseAddress(depthPixelBuffer, .readOnly)
        CVPixelBufferLockBaseAddress(colorPixelBuffer, .readOnly)
        defer {
            CVPixelBufferUnlockBaseAddress(depthPixelBuffer, .readOnly)
            CVPixelBufferUnlockBaseAddress(colorPixelBuffer, .readOnly)
        }
        
        let colorWidth = CVPixelBufferGetWidth(colorPixelBuffer)
        let colorHeight = CVPixelBufferGetHeight(colorPixelBuffer)
        let depthWidth = CVPixelBufferGetWidth(depthPixelBuffer)
        let depthHeight = CVPixelBufferGetHeight(depthPixelBuffer)
        
        print("Image Width: \(colorWidth) | Image Height: \(colorHeight)")
        print("Depth Data Width: \(depthWidth) | Depth Data Height: \(depthHeight)")
        
        guard let colorData = CVPixelBufferGetBaseAddress(colorPixelBuffer) else {
            print("Unable to get image buffer base address.")
            return
        }
        
        let colorBytesPerRow = CVPixelBufferGetBytesPerRow(colorPixelBuffer)
        let colorBytesPerPixel = 4 // BGRA format
        
        guard let depthDataAddress = CVPixelBufferGetBaseAddress(depthPixelBuffer) else {
            print("Unable to get depth buffer base address.")
            return
        }
        
        let depthBytesPerRow = CVPixelBufferGetBytesPerRow(depthPixelBuffer)
        // Determine the bytes per pixel based on the depth format type
        let depthPixelFormatType = CVPixelBufferGetPixelFormatType(depthPixelBuffer)
        var depthBytesPerPixel: Int = 0 // Initialize with zero
        
        switch depthPixelFormatType {
        case kCVPixelFormatType_DepthFloat32:
            depthBytesPerPixel = 4
        case kCVPixelFormatType_DepthFloat16:
            depthBytesPerPixel = 2
            // Add more cases as necessary for different formats
        default:
            print("Unsupported depth pixel format type")
            return
        }
        
        // Ensure that you're iterating within the bounds of both buffers
        let commonWidth = min(colorWidth, depthWidth)
        let commonHeight = min(colorHeight, depthHeight)
        
        print("Starting iteration with commonWidth: \(commonWidth), commonHeight: \(commonHeight)")
        
        // Iterate over the image buffer
        for y in stride(from: 0, to: commonHeight, by: 10) {
            for x in stride(from: 0, to: commonWidth, by: 10) {
                let colorPixelOffset = y * colorBytesPerRow + x * colorBytesPerPixel
                let colorPixel = colorData.advanced(by: colorPixelOffset).assumingMemoryBound(to: UInt8.self)
                
                // Extract BGRA components
                let blue = colorPixel[0]
                let green = colorPixel[1]
                let red = colorPixel[2]
                let alpha = colorPixel[3]
                
                // Print the (x, y) coordinates and color value in BGRA
                print("Color at (\(x), \(y)): B:\(blue) G:\(green) R:\(red) A:\(alpha)")
                
                // Calculate the depth data's corresponding pixel offset
                let depthPixelOffset = y * depthBytesPerRow + x * depthBytesPerPixel
                let depthPixel = depthDataAddress.advanced(by: depthPixelOffset).assumingMemoryBound(to: Float.self)
                let depthValue = depthPixel.pointee
                
                // Print the (x, y) coordinates and depth value
                print("Depth at (\(x), \(y)): \(depthValue)")
            }
        }
        
        print("Completed iteration")
        
        // Assuming colorData is the base address for the BGRA image buffer
        let colorBaseAddress = CVPixelBufferGetBaseAddress(colorPixelBuffer)!.assumingMemoryBound(to: UInt8.self)
        
        // Call the point cloud creation function
        let pointCloudGeometry = ExternalData.createPointCloudGeometry(
            depthData: depthData,
            colorData: colorBaseAddress,
            width: commonWidth,
            height: commonHeight,
            bytesPerRow: colorBytesPerRow // Use the correct bytes per row for color data
        )
        
        // Synchronize access to the shared resource
        DispatchQueue.main.async {
            ExternalData.renderingEnabled.toggle()
        }
    }
    
    // MARK: - Video + Depth Frame Processing
    
    func dataOutputSynchronizer(_ synchronizer: AVCaptureDataOutputSynchronizer,
                                didOutput synchronizedDataCollection: AVCaptureSynchronizedDataCollection) {
        if !ExternalData.renderingEnabled {
            return
        }
        
        // Read all outputs
        guard ExternalData.renderingEnabled,
              let syncedDepthData: AVCaptureSynchronizedDepthData =
                synchronizedDataCollection.synchronizedData(for: depthDataOutput) as? AVCaptureSynchronizedDepthData,
              let syncedVideoData: AVCaptureSynchronizedSampleBufferData =
                synchronizedDataCollection.synchronizedData(for: videoDataOutput) as? AVCaptureSynchronizedSampleBufferData else {
            // only work on synced pairs
            return
        }
        
        if syncedDepthData.depthDataWasDropped || syncedVideoData.sampleBufferWasDropped {
            return
        }
        
        let depthData = syncedDepthData.depthData
        let depthPixelBuffer = depthData.depthDataMap
        let sampleBuffer = syncedVideoData.sampleBuffer
        guard let videoPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
              let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer) else {
            return
        }
        
        print("ExternalData.isSavingFileAsPLY: \(ExternalData.isSavingFileAsPLY)")
        
        if ExternalData.isSavingFileAsPLY {
            printDepthData(depthData: depthData, imageData: videoPixelBuffer)
            
            // Set cloudView to empty depth data and texture
            // cloudView?.setDepthFrame(nil, withTexture: nil)
            
            ExternalData.isSavingFileAsPLY = false
        }
        
        globalDepthData = depthData
        globalVideoPixelBuffer = videoPixelBuffer
        
        cloudView?.setDepthFrame(depthData, withTexture: videoPixelBuffer)
    }
    
    @IBSegueAction func embedSwiftUIView(_ coder: NSCoder) -> UIViewController? {
        // Upon Scan Completion...
        let hostingController = UIHostingController(coder: coder, rootView: SwiftUIView())!
        hostingController.view.backgroundColor = .clear
        return hostingController
    }
}

ios swift swiftui vision
1个回答
0
投票

要将深度图像转换为 3D 点云,在计算 XYZ 坐标时,您必须使用 AVCameraCalibrationData 类中的附加数据。最重要的字段是

intrinsicMatrix
intrinsicMatrixReferenceDimensions
。该课程还提供镜头畸变数据,但它不会对结果产生太大影响,除非你想要非常准确。

© www.soinside.com 2019 - 2024. All rights reserved.