单镜头设备上的视频比照片更裁剪

问题描述 投票:0回答:0

我有一个应用程序,可以以预定的缩放量拍摄照片和视频。它通过简单地裁剪和重新缩放原始照片/视频输出来实现此目的。要裁剪照片,我只需创建一个具有裁剪尺寸的新视图并将照片放入其中,对于视频,我使用 AVMutableCompostion(我还需要应用水印,因此任何替代解决方案都需要考虑到这一点)。

在多镜头设备上,裁剪工作成功,并且在照片和视频上看起来相同,但在单镜头设备上,视频上有额外的裁剪。例如,这是 iPhone 13:

这是 iPhone SE:

最终的裁剪功能有很多代码,其中一些代码在单镜头和多镜头设备之间有所不同,但我认为问题在于裁剪功能,因为当我关闭该功能时,我得到了这个:

这是照片裁剪功能:

    var image = UIImage(data: data)!
    
    var height = image.size.height
    var width = image.size.width
    var sensor = SensorSize(width: 0, height: 0, name: "")
    if showExtractionsAndCropsAsOverlays == true {
        sensor = currentSensorSizeWithAccessoriesExceptFramelinesAndCrops
    } else {
        sensor = currentSensorSizeWithAccessories
    }
    if currentLens.anamorphic == true { sensor.width *= currentSqueezeFactor }
    let aspect = sensor.aspect
    if sensorBackgroundLeading.isActive == true {
        height = width / aspect
    } else {
        width = height * aspect
    }
    let frame = CGRect(x: 0, y: 0, width: width, height: height)
    let croppedImageView = UIView(frame: frame)
    let imageView = UIImageView(frame: frame)
    
    imageView.image = image
    croppedImageView.insertSubview(imageView, at: 0)
    imageView.center = croppedImageView.center
    imageView.clipsToBounds = true
    imageView.contentMode = .scaleAspectFill
    croppedImageView.clipsToBounds = true
    
    if currentZoomRange != .withinRange {
        let factor = currentActiveImageWidth / sensorViewForBackground.bounds.width
        imageView.transform = imageView.transform.scaledBy(x: factor, y: factor)
    }
    
    UIGraphicsBeginImageContextWithOptions(croppedImageView.frame.size, true, 1.0)
    croppedImageView.layer.render(in: UIGraphicsGetCurrentContext()!)
    let croppedImageWithBackground = UIGraphicsGetImageFromCurrentImageContext()
    return croppedImageWithBackground!

这是视频:

    let asset = videoData.getAVAsset()
    let composition = AVMutableComposition()
    guard
        let compositionTrack = composition.addMutableTrack(
            withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
        let assetTrack = asset.tracks(withMediaType: .video).first
    else {
        print("^^Something is wrong with the asset.")
        return URL(string: "")
    }
    
    do {
        let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
        try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
        
        if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
           let compositionAudioTrack = composition.addMutableTrack(
            withMediaType: .audio,
            preferredTrackID: kCMPersistentTrackID_Invalid) {
            try compositionAudioTrack.insertTimeRange(
                timeRange,
                of: audioAssetTrack,
                at: .zero)
        }
    } catch {
        print(error)
    }
    
    compositionTrack.preferredTransform = assetTrack.preferredTransform
    
    var height = assetTrack.naturalSize.height
    var width = assetTrack.naturalSize.width
    var sensor = SensorSize(width: 0, height: 0, name: "")
    if showExtractionsAndCropsAsOverlays == true {
        sensor = currentSensorSizeWithAccessoriesExceptFramelinesAndCrops
    } else {
        sensor = currentSensorSizeWithAccessories
    }
    if currentLens.anamorphic == true { sensor.width *= currentSqueezeFactor }
    let aspect = sensor.aspect
    if sensorBackgroundLeading.isActive == true {
        height = width / aspect
    } else {
        width = height * aspect
    }
    let videoSize = CGSize(width: width, height: height)
    
    let backgroundLayer = CALayer()
    backgroundLayer.frame = CGRect(origin: .zero, size: videoSize)
    let videoLayer = CALayer()
    videoLayer.frame = CGRect(origin: .zero, size: videoSize)
    videoLayer.contentsGravity = .resizeAspectFill
    let overlayLayer = CALayer()
    overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
    
    let widthMultiplier = currentActiveImageWidth / sensorViewForBackground.bounds.width
    let newWidth = videoSize.width * widthMultiplier
    let difference = videoSize.width - newWidth
    videoLayer.frame = CGRect(
        x: difference/2,
        y: difference/2,
        width: videoSize.width - difference,
        height: videoSize.height - difference)
    let outputLayer = CALayer()
    outputLayer.frame = CGRect(origin: .zero, size: videoSize)
    outputLayer.addSublayer(backgroundLayer)
    outputLayer.addSublayer(videoLayer)
    outputLayer.addSublayer(overlayLayer)
    
    let videoComposition = AVMutableVideoComposition()
    videoComposition.renderSize = CGSize(width: videoSize.width, height: videoSize.height)
    videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
    videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
        postProcessingAsVideoLayer: videoLayer,
        in: outputLayer)
    
    func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack) -> AVMutableVideoCompositionLayerInstruction {
        let transformInstruction: AVMutableVideoCompositionLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)

        let compWidth = width
        let compHeight = height
        var videoWidth = assetTrack.naturalSize.width
        var videoHeight = assetTrack.naturalSize.height

        var scaleOffset = 1.0
        if videoWidth < compWidth {
            scaleOffset = compWidth/videoWidth
        } else if videoHeight < compHeight {
            scaleOffset = compHeight/videoHeight
        }

        videoWidth *= scaleOffset
        videoHeight *= scaleOffset

        let compCenterX = compWidth/2
        let videoCenterX = videoWidth/2
        let xOffset = compCenterX - videoCenterX
        let compCenterY = compHeight/2
        let videoCenterY = videoHeight/2
        let yOffset = compCenterY - videoCenterY

        let translate = CGAffineTransformMakeTranslation(xOffset, yOffset)
        let rotate = CGAffineTransformRotate(translate, 2*CGFloat(Double.pi))
        let scale = CGAffineTransformScale(rotate, scaleOffset, scaleOffset)
        transformInstruction.setTransform(scale, at: .zero)
        return transformInstruction
    }
    
    let instruction = AVMutableVideoCompositionInstruction()
    instruction.timeRange = CMTimeRange(
        start: .zero,
        duration: composition.duration)
    videoComposition.instructions = [instruction]
    let layerInstruction = compositionLayerInstruction(
        for: compositionTrack,
        assetTrack: assetTrack)
    instruction.layerInstructions = [layerInstruction]
    
    guard let export1 = AVAssetExportSession(
        asset: composition,
        presetName: AVAssetExportPresetHighestQuality)
    else {
        print("Cannot create export session.")
        return URL(string: "")
    }
    
    
    let videoName = UUID().uuidString
    let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
        .appendingPathComponent(videoName)
        .appendingPathExtension("mov")
    
    export1.videoComposition = videoComposition
    export1.outputFileType = .mov
    export1.outputURL = exportURL
    
    
    await export1.export()
    return await withCheckedContinuation { continuation in
        DispatchQueue.main.async {
            switch export1.status {
            case .completed:
                continuation.resume(returning: exportURL)
            default:
                print("Something went wrong during export.")
                print(export1.error ?? "unknown error")
                continuation.resume(returning: nil)
                break
            }
        }
    }

(显然,其中有一些变量我没有显示它们是如何计算的,但因为这些变量是在裁剪函数之前计算的,并且照片和视频函数都使用相同的变量,所以这不应该是问题的根源。也就是说,如果有人愿意,我很乐意发布更多代码。)

swift avcapturesession avmutablecomposition
© www.soinside.com 2019 - 2024. All rights reserved.