// // CameraView.swift // im-client-ios // // Created by 北京居家科技有限公司 on 2022/3/10. // import SwiftUI import AVFoundation let cameraPreview = CameraPreview() fileprivate var captureSession = AVCaptureSession() fileprivate var videoDataOutput = AVCaptureVideoDataOutput() struct CameraPreviewHolder: ContentView{ typealias NSViewType = CameraPreview func makeNSView(context: NSViewRepresentableContext) -> CameraPreview { let cameraPreview = CameraPreview() return cameraPreview } func updateNSView(_ nsView: CameraPreview, context: NSViewRepresentableContext) { } } final class CameraPreview: NSView { private var tmp:URL? init() { super.init(frame: .zero) var allowedAccess = false let blocker = DispatchGroup() blocker.enter() AVCaptureDevice.requestAccess(for: .video) { flag in allowedAccess = flag blocker.leave() } blocker.wait() guard allowedAccess else { print("No camera access") return } showCameraViewSeting() } func showCameraViewSeting() { let session = captureSession session.beginConfiguration() //分辨率 session.sessionPreset = AVCaptureSession.Preset.high guard let videoDevice = AVCaptureDevice.default(for: .video) else { print("No video device") return } guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice), session.canAddInput(videoDeviceInput) else { print("Unable to determine video device input") return } session.addInput(videoDeviceInput) let videoOutput = videoDataOutput videoOutput.alwaysDiscardsLateVideoFrames = true if session.canAddOutput(videoOutput) { session.addOutput(videoOutput) } session.commitConfiguration() captureSession = session self.wantsLayer = true self.layer = AVCaptureVideoPreviewLayer() } required init?(coder: NSCoder) { fatalError("init(coder:) has not been implemented") } var videoPreviewLayer: AVCaptureVideoPreviewLayer { return self.layer as! AVCaptureVideoPreviewLayer } override func viewDidMoveToSuperview() { // on iOS .didMoveToSuperview super.viewDidMoveToSuperview() if nil != self.superview { self.videoPreviewLayer.session = captureSession self.videoPreviewLayer.videoGravity = .resizeAspect captureSession.startRunning() } else { captureSession.stopRunning() } } func scan(){ videoDataOutput.setSampleBufferDelegate(cameraCaptureDelegate, queue: DispatchQueue.main) } func removeCameraPreview() { self.removeFromSuperview() } } class CameraCaptureDelegate: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate{ func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { let cgImage:CGImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) print("\(cgImage)") //获取的图片经过EFQRCode(GitHub上搜这个库)识别,如果含有二维码,即可识别出二维码数据 // let result = EFQRCode.recognize(image: cgImage) // print("didOutput : \(String(describing: result))") } func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { print("didDrop sampleBuffer") } } func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> CGImage { let imageBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)! let ciImage : CIImage = CIImage(cvPixelBuffer: imageBuffer) let context:CIContext = CIContext.init(options: nil) let cgImage:CGImage = context.createCGImage(ciImage, from: ciImage.extent)! return cgImage }