AVFoundation 快速切换前后摄像头会导致错误

问题描述 投票:0回答:1

我有一个用 AVFoundation 在 Swift 中构建的录像机。录制器可以拍摄多个剪辑并将它们连接成一个视频。例如,您可以启动视频然后停止,然后再次启动并停止;最终视频将由两个剪辑合并而成。如果我使用前置摄像头拍摄第一个视频,然后停止剪辑,切换到后置摄像头并拍摄另一个剪辑,则最终剪辑成功制作。但是,如果我同时使用前置摄像头和后置摄像头拍摄一个剪辑,则视频会失败并且剪辑无法处理。为什么在视频期间切换相机视角会导致此问题?

import SwiftUI
import SwiftUI
import AVKit
import AVFoundation

class CameraViewModel: NSObject, ObservableObject, AVCaptureFileOutputRecordingDelegate {
   @Published var session = AVCaptureSession()
   @Published var alert = false
   @Published var output = AVCaptureMovieFileOutput()
   @Published var preview: AVCaptureVideoPreviewLayer!
   @Published var isRecording: Bool = false
   @Published var recordedURLs: [URL] = []
   @Published var previewURL: URL?
   @Published var showPreview: Bool = false
   @Published var recordedDuration: CGFloat = 0
   @Published var maxDuration: CGFloat = 20
   var currentCameraPosition: AVCaptureDevice.Position = .back
   
   override init() {
       super.init()
       self.checkPermission()
       self.preview = AVCaptureVideoPreviewLayer(session: session)
       self.preview.videoGravity = .resizeAspectFill
   }

   func flipCamera() {
       // Create a discovery session to find all available video devices
       let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .unspecified)

       // Get all available video devices
       let videoDevices = discoverySession.devices

       // Check if there is more than one video device
       guard videoDevices.count > 1 else {
           return // If not, return early
       }

       // Get the current input
       guard let currentVideoInput = session.inputs.first as? AVCaptureDeviceInput else {
           return
       }

       // Get the new camera position
       let newCameraPosition: AVCaptureDevice.Position = (currentCameraPosition == .back) ? .front : .back

       // Find the new camera device
       if let newCamera = videoDevices.first(where: { $0.position == newCameraPosition }) {
           // Create a new video input
           do {
               let newVideoInput = try AVCaptureDeviceInput(device: newCamera)

               // Remove the current input
               session.removeInput(currentVideoInput)

               // Add the new input
               if session.canAddInput(newVideoInput) {
                   session.addInput(newVideoInput)
                   currentCameraPosition = newCameraPosition
               } else {
                   // Handle the case where adding the new input fails
                   print("Failed to add new camera input")
               }
           } catch {
               // Handle any errors that occur while creating the new input
               print("Error creating new camera input: \(error.localizedDescription)")
           }
       }
   }
   
   func checkPermission(){
       switch AVCaptureDevice.authorizationStatus(for: .video) {
       case .authorized:
           checkAudioPermission()
           return
       case .notDetermined:
           AVCaptureDevice.requestAccess(for: .video) { (status) in
               if status {
                   self.checkAudioPermission()
               }
           }
       case .denied:
           self.alert.toggle()
           return
       default:
           return
       }
   }
   
   func checkAudioPermission() {
       switch AVCaptureDevice.authorizationStatus(for: .audio) {
       case .authorized:
           setUp()
           return
       case .notDetermined:
           AVCaptureDevice.requestAccess(for: .audio) { (audioStatus) in
               if audioStatus {
                   self.setUp()
               }
           }
       case .denied:
           self.alert.toggle()
           return
       default:
           return
       }
   }
   
   func setUp(){
       do {
           self.session.beginConfiguration()
           let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
           let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
           let audioDevice = AVCaptureDevice.default(for: .audio)
           let audioInput = try AVCaptureDeviceInput(device: audioDevice!)

           if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){
               self.session.addInput(videoInput)
               self.session.addInput(audioInput)
           }

           if self.session.canAddOutput(self.output){
               self.session.addOutput(self.output)
           }
           self.session.commitConfiguration()
       }
       catch{
           print(error.localizedDescription)
       }
   }
   
   func startRecording(){
       // MARK: Temporary URL for recording Video
       let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
       output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
       isRecording = true
   }
   
   func stopRecording(){
       output.stopRecording()
       isRecording = false
   }
   
   func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
       if let error = error {
           print(error.localizedDescription)
           return
       }
       
       // CREATED SUCCESSFULLY
       print(outputFileURL)
       self.recordedURLs.append(outputFileURL)
       if self.recordedURLs.count == 1{
           self.previewURL = outputFileURL
           return
       }
       
       // CONVERTING URLs TO ASSETS
       let assets = recordedURLs.compactMap { url -> AVURLAsset in
           return AVURLAsset(url: url)
       }
       
       self.previewURL = nil
       // MERGING VIDEOS
       Task {
           await mergeVideos(assets: assets) { exporter in
               exporter.exportAsynchronously {
                   if exporter.status == .failed{
                       // HANDLE ERROR
                       print(exporter.error!)
                   }
                   else{
                       if let finalURL = exporter.outputURL{
                           print(finalURL)
                           DispatchQueue.main.async {
                               self.previewURL = finalURL
                           }
                       }
                   }
               }
           }
       }
   }
   
   func mergeVideos(assets: [AVURLAsset],completion: @escaping (_ exporter: AVAssetExportSession)->()) async {
       
       let compostion = AVMutableComposition()
       var lastTime: CMTime = .zero
       
       guard let videoTrack = compostion.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
       guard let audioTrack = compostion.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
       
       for asset in assets {
           // Linking Audio and Video
           do {
               try await videoTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.load(.duration)), of: asset.loadTracks(withMediaType: .video)[0], at: lastTime)
               // Safe Check if Video has Audio
               if try await !asset.loadTracks(withMediaType: .audio).isEmpty {
                   try await audioTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.load(.duration)), of: asset.loadTracks(withMediaType: .audio)[0], at: lastTime)
               }
           }
           catch {
               print(error.localizedDescription)
           }
           
           // Updating Last Time
           do {
               lastTime = try await CMTimeAdd(lastTime, asset.load(.duration))
           } catch {
               print(error.localizedDescription)
           }
       }
       
       // MARK: Temp Output URL
       let tempURL = URL(fileURLWithPath: NSTemporaryDirectory() + "Reel-\(Date()).mp4")
       
       // VIDEO IS ROTATED
       // BRINGING BACK TO ORIGNINAL TRANSFORM
       
       let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
       
       // MARK: Transform
       var transform = CGAffineTransform.identity
       transform = transform.rotated(by: 90 * (.pi / 180))
       transform = transform.translatedBy(x: 0, y: -videoTrack.naturalSize.height)
       layerInstructions.setTransform(transform, at: .zero)
       
       let instructions = AVMutableVideoCompositionInstruction()
       instructions.timeRange = CMTimeRange(start: .zero, duration: lastTime)
       instructions.layerInstructions = [layerInstructions]
       
       let videoComposition = AVMutableVideoComposition()
       videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
       videoComposition.instructions = [instructions]
       videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
       
       guard let exporter = AVAssetExportSession(asset: compostion, presetName: AVAssetExportPresetHighestQuality) else{return}
       exporter.outputFileType = .mp4
       exporter.outputURL = tempURL
       exporter.videoComposition = videoComposition
       completion(exporter)
   }
}


//IGNORE NOT IMPORTANT TO QUESTION
struct HomeStory: View {
   @StateObject var cameraModel = CameraViewModel()
   
   var body: some View {
       ZStack(alignment: .bottom) {
           CameraStoryView()
               .environmentObject(cameraModel)
               .clipShape(RoundedRectangle(cornerRadius: 30, style: .continuous))
               .padding(.top,10)
               .padding(.bottom,30)
           
           ZStack {
               Button {
                   if cameraModel.isRecording {
                       cameraModel.stopRecording()
                   } else {
                       cameraModel.startRecording()
                   }
               } label: {
                   if cameraModel.isRecording {
                       Circle().frame(width: 95, height: 95).foregroundStyle(.red).opacity(0.7)
                   } else {
                       ZStack {
                           Color.gray.opacity(0.001)
                           Circle().stroke(.white, lineWidth: 7).frame(width: 80, height: 80)
                       }.frame(width: 95, height: 95)
                   }
               }
               
               Button {
                   cameraModel.flipCamera()
               } label: {
                   Image(systemName: "arrow.triangle.2.circlepath.camera")
                       .font(.title)
                       .foregroundColor(.white)
                       .padding()
                       .background(Circle().fill(Color.black.opacity(0.7)))
               }.offset(x: -100)
               
               Button {
                   if let _ = cameraModel.previewURL {
                       cameraModel.showPreview.toggle()
                   }
               } label: {
                   if cameraModel.previewURL == nil && !cameraModel.recordedURLs.isEmpty {
                       ProgressView().tint(.black)
                   } else {
                       HStack {
                           Text("Preview")
                           Image(systemName: "chevron.right")
                       }
                       .padding()
                       .foregroundColor(.black).font(.body)
                       .background {
                           Capsule().foregroundStyle(.ultraThinMaterial)
                       }
                   }
               }
               .padding(.horizontal,20)
               .padding(.vertical,8)
               .frame(maxWidth: .infinity,alignment: .trailing)
               .padding(.trailing)
               .opacity((cameraModel.previewURL == nil && cameraModel.recordedURLs.isEmpty) || cameraModel.isRecording ? 0 : 1)
           }
           .frame(maxHeight: .infinity,alignment: .bottom)
           .padding(.bottom,10)
           .padding(.bottom,30)
           
           Button {
               cameraModel.recordedDuration = 0
               cameraModel.previewURL = nil
               cameraModel.recordedURLs.removeAll()
           } label: {
               Image(systemName: "xmark")
                   .font(.title)
                   .foregroundColor(.white)
           }
           .frame(maxWidth: .infinity,maxHeight: .infinity,alignment: .topLeading)
           .padding()
           .padding(.top)
           .opacity(!cameraModel.recordedURLs.isEmpty && cameraModel.previewURL != nil && !cameraModel.isRecording ? 1 : 0)
       }
       .overlay(content: {
           if let url = cameraModel.previewURL, cameraModel.showPreview {
               FinalPreview(url: url, showPreview: $cameraModel.showPreview)
                   .transition(.move(edge: .trailing))
           }
       })
       .animation(.easeInOut, value: cameraModel.showPreview)
       .preferredColorScheme(.dark)
   }
}
struct FinalPreview: View {
   var url: URL
   @Binding var showPreview: Bool
   
   var body: some View {
       GeometryReader { proxy in
           let size = proxy.size
           
           VideoPlayer(player: AVPlayer(url: url))
               .aspectRatio(contentMode: .fill)
               .frame(width: size.width, height: size.height)
               .clipShape(RoundedRectangle(cornerRadius: 30, style: .continuous))
               .overlay(alignment: .topLeading) {
                   Button {
                       showPreview.toggle()
                   } label: {
                       Label {
                           Text("Back")
                       } icon: {
                           Image(systemName: "chevron.left")
                       }
                       .foregroundColor(.white)
                   }
                   .padding(.leading)
                   .padding(.top,22)
               }
       }
   }
}

struct CameraStoryView: View {
   @EnvironmentObject var cameraModel: CameraViewModel
   var body: some View {
       
       GeometryReader { proxy in
           let size = proxy.size
           
           CameraPreview(size: size).environmentObject(cameraModel)
          
       }
       .onReceive(Timer.publish(every: 0.01, on: .main, in: .common).autoconnect()) { _ in
           if cameraModel.recordedDuration <= cameraModel.maxDuration && cameraModel.isRecording{
               cameraModel.recordedDuration += 0.01
           }
           
           if cameraModel.recordedDuration >= cameraModel.maxDuration && cameraModel.isRecording{
               cameraModel.stopRecording()
               cameraModel.isRecording = false
           }
       }
   }
}

struct CameraPreview: UIViewRepresentable {
   @EnvironmentObject var cameraModel : CameraViewModel
   var size: CGSize
   
   func makeUIView(context: Context) -> UIView {
       let view = UIView(frame: CGRect(origin: .zero, size: size))
       guard let preview = cameraModel.preview else { return view }

       preview.frame = view.bounds
       preview.videoGravity = .resizeAspectFill
       view.layer.addSublayer(preview)

       DispatchQueue.global(qos: .userInitiated).async {
           if !self.cameraModel.session.isRunning {
               self.cameraModel.session.startRunning()
           }
       }
       
       return view
   }
   
   func updateUIView(_ uiView: UIView, context: Context) { }
}
ios swift swiftui video avfoundation
1个回答
0
投票

您是否使用标准相机应用程序尝试过此操作?我认为那里不支持它。

大多数智能手机(包括 iPhone)上的标准相机应用程序通常不支持在正在进行的视频录制会话期间在前置摄像头和后置摄像头之间切换的功能。
在标准相机应用程序中,用户通常需要在开始视频录制之前选择所需的相机(前置或后置)。

在您的

flipCamera()
函数中,您可以正确切换摄像机输入,但在持续录制会话期间可能需要执行其他步骤来无缝处理输入之间的转换。

如果您想探索这是否可行,您应该确保会话配置自动更新,以减少摄像头切换期间出现不一致的可能性。

并确保

mergeVideos(assets:completion:)
功能中的视频处理逻辑正确处理视频方向和元数据。不同的相机输入可能具有不同的默认方向或元数据配置,必须在合并过程中对其进行标准化。

您的

flipCamera()
方法将是(通过错误管理,捕获任何错误,这可能会确认该功能不受支持):

func flipCamera() {
    guard let currentVideoInput = session.inputs.first as? AVCaptureDeviceInput else {
        return
    }

    session.beginConfiguration()
    session.removeInput(currentVideoInput)

    let newCameraPosition: AVCaptureDevice.Position = (currentCameraPosition == .back) ? .front : .back
    guard let newCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: newCameraPosition) else {
        session.commitConfiguration()
        return
    }

    do {
        let newVideoInput = try AVCaptureDeviceInput(device: newCamera)

        if session.canAddInput(newVideoInput) {
            session.addInput(newVideoInput)
            currentCameraPosition = newCameraPosition
        } else {
            session.addInput(currentVideoInput) // Re-add the old input if new input fails
        }
    } catch {
        print("Error creating new camera input: \(error)")
        session.addInput(currentVideoInput) // Re-add the old input in case of error
    }

    session.commitConfiguration()
}

会话配置现在包含在

session.beginConfiguration()
session.commitConfiguration()
中。这可确保立即应用对会话的所有更改,从而减少中间状态导致问题的可能性。

我没有遍历所有可用设备来查找新相机,而是直接使用

AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: newCameraPosition)
根据新位置获取所需的相机设备(
front
back
)。这简化了寻找和选择合适相机的过程。
从“为什么
AVCaptureDevice.default
在 swiftui 中返回 nil?
”,如果指定的设备类型在设备上不可用,
AVCaptureDevice.default()
返回
nil
。您需要确保他们请求用户设备上存在的相机类型。对于大多数一般用途,使用
.builtInWideAngleCamera
是更安全的选择,因为它在不同的 iPhone 型号上更普遍可用。

如果创建新输入时出现错误或者新输入无法添加到会话中,则会重新添加原始输入。这是一种后备机制,可确保在新配置失败时会话继续使用先前的配置运行。

在尝试添加新输入之前,当前输入将从会话中删除。这对于确保会话不会以错误或冲突的输入结束非常重要。

© www.soinside.com 2019 - 2024. All rights reserved.