我正在创建一个应用程序,使相机显示精确的视野。在设置我的 AVCapture 会话时,我使用 device.activeFormat.videoFieldOfView 中提供的 FOV 作为计算的基础。然后,该应用程序使用 videoZoomFactor 按用户指定的数量放大。
我正在使用 13 Pro、13 和 SE 2022 进行测试,所有 3 个都给我略微不同的 FOV,彼此之间的差异在 5-10% 之间。我决定只关注 13 和 13 Pro,因为它们应该具有完全相同的广角镜头。
我从 device.activeFormat.videoFieldOfView 打印了系统报告的 FOV,13 Pro 的 102º 和 13 的 101º。我认为这不足以成为原因,但以防万一我尝试手动设置我的计算两种设备的基准都为 100º。没有区别。
我用 stock Photos 应用程序测试过,照片是一样的,所以我知道我的应用程序出了问题。
这里是所有可能相关的代码,不好意思太多了。如果有人愿意,我可以发布整个课程。
func setup() {
Bundle.main.loadNibNamed("CameraView", owner: self, options: nil)
addSubview(contentView)
contentView.frame = self.bounds
contentView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
imageOutputView.backgroundColor = Colors.specLabel
imageOutputView.clipsToBounds = true
currentActiveImageWidth = imageOutputWidth.constant
currentMaskWidth = aspectMaskWidth.constant
sensorViewForBackground.backgroundColor = Colors.specLabel
sensorViewForBackground.layer.cornerRadius = imageCornerRadius
imageOutputView.layer.addSublayer(previewLayer)
checkCameraPermissionsAndSetupCamera()
previewLayer.frame = imageOutputView.bounds
if let previewLayerConnection = previewLayer.connection, previewLayerConnection.isVideoOrientationSupported {
previewLayerConnection.videoOrientation = .landscapeRight
}
if let captureSessionConnection = session!.connections.first, captureSessionConnection.isVideoOrientationSupported {
captureSessionConnection.videoOrientation = .landscapeRight
}
}
func updateUI() {
var sensor = currentSensorSizeWithAccessories!
if currentLens.anamorphic == true { sensor.width *= currentSqueezeFactor }
let sensorAspect = (sensor.aspect)
let viewWidth = contentView.bounds.width
let viewHeight = contentView.bounds.height
viewAspect = Double(viewWidth / viewHeight)
// SET BACKGROUND SENSOR VIEW
if viewAspect > sensorAspect {
// the contentView is wider than the camera sensor. Pin the sensor to the top and bottom.
sensorBackgroundTop.isActive = true
sensorBackgroundBottom.isActive = true
sensorBackgroundLeading.isActive = false
sensorBackgroundTrailing.isActive = false
gridHeight.isActive = true
gridWidth.isActive = false
} else {
// the contentView is taller than the camera sensor. Pin the sensor to the sides.
sensorBackgroundTop.isActive = false
sensorBackgroundBottom.isActive = false
sensorBackgroundLeading.isActive = true
sensorBackgroundTrailing.isActive = true
gridHeight.isActive = false
gridWidth.isActive = true
}
let newMultiplier = sensorBackgroundAspectConstraint.constraintWithMultiplier(sensorAspect)
contentView.removeConstraint(sensorBackgroundAspectConstraint)
contentView.addConstraint(newMultiplier)
sensorBackgroundAspectConstraint = newMultiplier
// SET ASPECT MASK RATIO
if (imageOutputWidth.constant/imageOutputHeight.constant) > sensorAspect {
// the phone sensor is wider than the camera sensor
let previousWidth = aspectMaskWidth.constant
aspectMaskWidth.constant = imageOutputHeight.constant*sensorAspect
aspectMaskHeight.constant = imageOutputHeight.constant
let newWidth = aspectMaskWidth.constant
let multiplier = newWidth / previousWidth
currentActiveImageWidth *= multiplier
currentMaskWidth *= multiplier
} else {
// the phone sensor is taller than the camera sensor
let previousWidth = aspectMaskWidth.constant
aspectMaskHeight.constant = imageOutputWidth.constant/sensorAspect
aspectMaskWidth.constant = imageOutputWidth.constant
let newWidth = aspectMaskWidth.constant
let multiplier = newWidth / previousWidth
currentActiveImageWidth *= multiplier
currentMaskWidth *= multiplier
}
// SET SIZE
DispatchQueue.main.async { [self] in
let radians = phoneCameraHAOV * Float.pi/180.0
let phoneFocalHorizontal = Double(18.0 / tan(radians/2))
var activePhoneFocalHorizontal = phoneFocalHorizontal * (Double(aspectMaskWidth.constant)/initialBaseWidth) // this will be a full frame equivalent
if (imageOutputWidth.constant/imageOutputHeight.constant) > sensorAspect {
activePhoneFocalHorizontal /= (sensorAspect / (initialBaseWidth/initialBaseHeight))
}
let activePhoneSensorWidth = 36 * (Double(aspectMaskWidth.constant)/initialBaseWidth) // this will be a full frame equivalent
let sensorWidthScaleFactor = activePhoneSensorWidth / sensor.width
let cineCameraHorizontalFocalEquivalent = currentFocalLength * sensorWidthScaleFactor
let focalScaleFactor = cineCameraHorizontalFocalEquivalent / activePhoneFocalHorizontal
var requiredImageWidth = 0.0
if focalScaleFactor < 1 {
// transform smaller than backgroundView
if useVirtualCamera == true && availableVirtualCamera != nil { // it's a multi-lens device and the user wants to use the dual or triple (virtual) camera.
if let device = AVCaptureDevice.default(availableVirtualCamera!, for: .video, position: .back) {
try? device.lockForConfiguration()
device.videoZoomFactor = 1
}
}
requiredImageWidth = sensorViewForBackground.bounds.width * focalScaleFactor
currentZoomRange = .belowMinimum
} else if focalScaleFactor >= 1 && focalScaleFactor <= deviceMaximumZoomFactor {
// transform to backgroundView width and use zoomFactor
if useVirtualCamera == true && availableVirtualCamera != nil {
if let device = AVCaptureDevice.default(availableVirtualCamera!, for: .video, position: .back) {
try? device.lockForConfiguration()
device.videoZoomFactor = focalScaleFactor
}
}
requiredImageWidth = sensorViewForBackground.bounds.width
currentZoomRange = .withinRange
} else if focalScaleFactor > deviceMaximumZoomFactor {
// transform larger than backgroundView
if useVirtualCamera == true && availableVirtualCamera != nil {
if let device = AVCaptureDevice.default(availableVirtualCamera!, for: .video, position: .back) {
try? device.lockForConfiguration()
device.videoZoomFactor = deviceMaximumZoomFactor
}
}
let remainingZoomFactor = focalScaleFactor / deviceMaximumZoomFactor
requiredImageWidth = sensorViewForBackground.bounds.width * remainingZoomFactor
currentZoomRange = .aboveMaximum
}
let finalImageScaleFactor = requiredImageWidth / currentActiveImageWidth
imageOutputView.transform = imageOutputView.transform.scaledBy(x: finalImageScaleFactor, y: finalImageScaleFactor)
currentActiveImageWidth *= finalImageScaleFactor
let maxMaskWidth = sensorViewForBackground.bounds.width
let requiredMaskWidth = min(maxMaskWidth, requiredImageWidth)
let finalMaskScaleFactor = requiredMaskWidth / currentMaskWidth
aspectMask.transform = aspectMask.transform.scaledBy(x: finalMaskScaleFactor, y: finalMaskScaleFactor)
currentMaskWidth *= finalMaskScaleFactor
// SET MASK CORNER RADIUS
var originalMaskWidthToCurrentImageWidth = initialBaseWidth / currentMaskWidth
if originalMaskWidthToCurrentImageWidth > 1 { originalMaskWidthToCurrentImageWidth = 1 }
var cornerRadius = imageCornerRadius
cornerRadius *= originalMaskWidthToCurrentImageWidth
aspectMask.layer.cornerRadius = cornerRadius
aspectMaskContainer.isHidden = false
imageOutputContainer.layer.mask = aspectMaskContainer.layer
}
// ANIMATE
UIView.animate(withDuration: 0.5, delay: 0, usingSpringWithDamping: 0.8, initialSpringVelocity: 0.5, options: [ UIView.AnimationOptions.allowUserInteraction, UIView.AnimationOptions.curveEaseInOut], animations: { [self] in
contentView.layoutIfNeeded()
})
}
func takeImage() {
selectionFeedbackGenerator.selectionChanged()
let photoSettings = AVCapturePhotoSettings.init(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
output.capturePhoto(with: photoSettings, delegate: self)
}
private func checkCameraPermissionsAndSetupCamera() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video, completionHandler: { [] granted in
guard granted else {
return
}
DispatchQueue.main.async {
setupCamera()
}
})
case .restricted:
break
case .denied:
break
case .authorized:
setupCamera()
@unknown default:
break
}
func setupCamera() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(
deviceTypes: [ .builtInWideAngleCamera, .builtInUltraWideCamera, .builtInTelephotoCamera, .builtInTripleCamera, .builtInDualCamera, .builtInDualWideCamera ],
mediaType: .video,
position: .unspecified
)
let avCaptureDevices = deviceDiscoverySession.devices.map { $0.deviceType }
if avCaptureDevices.contains(.builtInTripleCamera) {
availableVirtualCamera = .builtInTripleCamera
}
else if avCaptureDevices.contains(.builtInDualWideCamera) {
availableVirtualCamera = .builtInDualWideCamera
}
else if avCaptureDevices.contains(.builtInDualCamera) {
availableVirtualCamera = .builtInDualCamera
}
// if there is no virtual camera available, the availableVirtualCamera property will remain nil
if availableVirtualCamera != nil {
let device = AVCaptureDevice.default(availableVirtualCamera!, for: .video, position: .back)
deviceSwitchOverFactors = device!.virtualDeviceSwitchOverVideoZoomFactors.map({CGFloat(truncating: $0)})
deviceMaximumZoomFactor = device!.maxAvailableVideoZoomFactor
}
if avCaptureDevices.contains(.builtInUltraWideCamera) { widestDeviceCamera = .builtInUltraWideCamera }
else { widestDeviceCamera = .builtInWideAngleCamera }
let session = AVCaptureSession()
var deviceType: AVCaptureDevice.DeviceType {
if useVirtualCamera == true && availableVirtualCamera != nil { return availableVirtualCamera! }
else { return widestDeviceCamera }
}
currentDeviceCamera = deviceType
if let device = AVCaptureDevice.default(deviceType, for: .video, position: .back) {
do {
let input = try AVCaptureDeviceInput(device: device)
if session.canAddInput(input) {
session.addInput(input)
}
if session.canAddOutput(output) {
session.addOutput(output)
}
if session.canAddOutput(movieOutput) {
session.addOutput(movieOutput)
}
previewLayer.videoGravity = .resizeAspect
try! device.lockForConfiguration()
defer { device.unlockForConfiguration() }
previewLayer.session = session
session.sessionPreset = .photo
phoneCameraHAOV = 100
print("^^phoneCameraHAOV \(phoneCameraHAOV)")
DispatchQueue.global(qos: .userInitiated).async {
session.startRunning()
}
self.session = session
} catch {
// handle error
}
}
}
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation() else {
return
}
let image = UIImage(data: data)!
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}