mirror of https://github.com/oxen-io/session-ios
Implement VideoCallVC & CameraManager
parent
b513eeb898
commit
170da7a276
@ -0,0 +1,69 @@
|
||||
import Foundation
|
||||
import AVFoundation
|
||||
|
||||
@objc
|
||||
protocol CameraCaptureDelegate : AnyObject {
|
||||
|
||||
func captureVideoOutput(sampleBuffer: CMSampleBuffer)
|
||||
}
|
||||
|
||||
final class CameraManager : NSObject {
|
||||
private let captureSession = AVCaptureSession()
|
||||
private let videoDataOutput = AVCaptureVideoDataOutput()
|
||||
private let audioDataOutput = AVCaptureAudioDataOutput()
|
||||
private let dataOutputQueue = DispatchQueue(label: "CameraManager.dataOutputQueue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
|
||||
private var isCapturing = false
|
||||
weak var delegate: CameraCaptureDelegate?
|
||||
|
||||
private lazy var videoCaptureDevice: AVCaptureDevice? = {
|
||||
return AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
|
||||
}()
|
||||
|
||||
static let shared = CameraManager()
|
||||
|
||||
private override init() { }
|
||||
|
||||
func prepare() {
|
||||
if let videoCaptureDevice = videoCaptureDevice,
|
||||
let videoInput = try? AVCaptureDeviceInput(device: videoCaptureDevice), captureSession.canAddInput(videoInput) {
|
||||
captureSession.addInput(videoInput)
|
||||
}
|
||||
if captureSession.canAddOutput(videoDataOutput) {
|
||||
captureSession.addOutput(videoDataOutput)
|
||||
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
|
||||
videoDataOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
|
||||
videoDataOutput.connection(with: .video)?.videoOrientation = .portrait
|
||||
videoDataOutput.connection(with: .video)?.automaticallyAdjustsVideoMirroring = false
|
||||
videoDataOutput.connection(with: .video)?.isVideoMirrored = true
|
||||
} else {
|
||||
SNLog("Couldn't add video data output to capture session.")
|
||||
captureSession.commitConfiguration()
|
||||
}
|
||||
}
|
||||
|
||||
func start() {
|
||||
guard !isCapturing else { return }
|
||||
isCapturing = true
|
||||
#if arch(arm64)
|
||||
captureSession.startRunning()
|
||||
#endif
|
||||
}
|
||||
|
||||
func stop() {
|
||||
guard isCapturing else { return }
|
||||
isCapturing = false
|
||||
#if arch(arm64)
|
||||
captureSession.stopRunning()
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
extension CameraManager : AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
|
||||
|
||||
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
|
||||
guard connection == videoDataOutput.connection(with: .video) else { return }
|
||||
delegate?.captureVideoOutput(sampleBuffer: sampleBuffer)
|
||||
}
|
||||
|
||||
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { }
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
import UIKit
|
||||
import AVFoundation
|
||||
import WebRTC
|
||||
|
||||
class VideoCallVC : UIViewController {
|
||||
private var localVideoView: UIView!
|
||||
private var remoteVideoView: UIView!
|
||||
|
||||
override func viewDidLoad() {
|
||||
super.viewDidLoad()
|
||||
setUpViewHierarchy()
|
||||
CameraManager.shared.delegate = self
|
||||
}
|
||||
|
||||
private func setUpViewHierarchy() {
|
||||
// Create video views
|
||||
#if arch(arm64)
|
||||
// Use Metal
|
||||
let localRenderer = RTCMTLVideoView(frame: self.localVideoView.frame)
|
||||
localRenderer.contentMode = .scaleAspectFill
|
||||
let remoteRenderer = RTCMTLVideoView(frame: self.remoteVideoView.frame)
|
||||
remoteRenderer.contentMode = .scaleAspectFill
|
||||
#else
|
||||
// Use OpenGLES
|
||||
let localRenderer = RTCEAGLVideoView(frame: self.localVideoView.frame)
|
||||
let remoteRenderer = RTCEAGLVideoView(frame: self.remoteVideoView.frame)
|
||||
#endif
|
||||
// Set up stack view
|
||||
let stackView = UIStackView(arrangedSubviews: [ localVideoView, remoteVideoView ])
|
||||
stackView.axis = .vertical
|
||||
stackView.distribution = .fillEqually
|
||||
stackView.alignment = .fill
|
||||
view.addSubview(stackView)
|
||||
stackView.translatesAutoresizingMaskIntoConstraints = false
|
||||
stackView.pin(to: view)
|
||||
// Attach video views
|
||||
CallManager.shared.attachLocalRenderer(localRenderer)
|
||||
CallManager.shared.attachRemoteRenderer(remoteRenderer)
|
||||
localVideoView.addSubview(localRenderer)
|
||||
localRenderer.translatesAutoresizingMaskIntoConstraints = false
|
||||
localRenderer.pin(to: localVideoView)
|
||||
remoteVideoView.addSubview(remoteRenderer)
|
||||
remoteRenderer.translatesAutoresizingMaskIntoConstraints = false
|
||||
remoteRenderer.pin(to: remoteVideoView)
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: Camera
|
||||
extension VideoCallVC : CameraCaptureDelegate {
|
||||
|
||||
func captureVideoOutput(sampleBuffer: CMSampleBuffer) {
|
||||
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
|
||||
let rtcpixelBuffer = RTCCVPixelBuffer(pixelBuffer: pixelBuffer)
|
||||
let timeStampNs = Int64(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) * 1000000000)
|
||||
let videoFrame = RTCVideoFrame(buffer: rtcpixelBuffer, rotation: RTCVideoRotation._0, timeStampNs: timeStampNs)
|
||||
CallManager.shared.handleLocalFrameCaptured(videoFrame)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue