mirror of https://github.com/oxen-io/session-ios
				
				
				
			
			You cannot select more than 25 topics
			Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
		
		
		
		
		
			
		
			
				
	
	
		
			156 lines
		
	
	
		
			5.3 KiB
		
	
	
	
		
			Swift
		
	
			
		
		
	
	
			156 lines
		
	
	
		
			5.3 KiB
		
	
	
	
		
			Swift
		
	
//
 | 
						|
//  Copyright (c) 2018 Open Whisper Systems. All rights reserved.
 | 
						|
//
 | 
						|
 | 
						|
import Foundation
 | 
						|
import WebRTC
 | 
						|
 | 
						|
@objc
 | 
						|
public class AudioActivity: NSObject {
 | 
						|
    let audioDescription: String
 | 
						|
 | 
						|
    override public var description: String {
 | 
						|
        return "<\(self.logTag) audioDescription: \"\(audioDescription)\">"
 | 
						|
    }
 | 
						|
 | 
						|
    public
 | 
						|
    init(audioDescription: String) {
 | 
						|
        self.audioDescription = audioDescription
 | 
						|
    }
 | 
						|
 | 
						|
    deinit {
 | 
						|
        OWSAudioSession.shared.ensureAudioSessionActivationState()
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
@objc
 | 
						|
public class OWSAudioSession: NSObject {
 | 
						|
 | 
						|
    // Force singleton access
 | 
						|
    public static let shared = OWSAudioSession()
 | 
						|
    private override init() {}
 | 
						|
    private let avAudioSession = AVAudioSession.sharedInstance()
 | 
						|
 | 
						|
    private var currentActivities: [Weak<AudioActivity>] = []
 | 
						|
 | 
						|
    // Ignores hardware mute switch, plays through external speaker
 | 
						|
    public func setPlaybackCategory(audioActivity: AudioActivity) {
 | 
						|
        Logger.debug("\(logTag) in \(#function)")
 | 
						|
 | 
						|
        // In general, we should have put the audio session back to it's default
 | 
						|
        // category when we were done with whatever activity required it to be modified
 | 
						|
        assert(avAudioSession.category == AVAudioSessionCategorySoloAmbient)
 | 
						|
 | 
						|
        startAudioActivity(audioActivity)
 | 
						|
 | 
						|
        do {
 | 
						|
            try avAudioSession.setCategory(AVAudioSessionCategoryPlayback)
 | 
						|
        } catch {
 | 
						|
            owsFail("\(logTag) in \(#function) failed with error: \(error)")
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    public func setRecordCategory(audioActivity: AudioActivity) -> Bool {
 | 
						|
        Logger.debug("\(logTag) in \(#function)")
 | 
						|
 | 
						|
        // In general, we should have put the audio session back to it's default
 | 
						|
        // category when we were done with whatever activity required it to be modified
 | 
						|
        assert(avAudioSession.category == AVAudioSessionCategorySoloAmbient)
 | 
						|
 | 
						|
        assert(avAudioSession.recordPermission() == .granted)
 | 
						|
 | 
						|
        startAudioActivity(audioActivity)
 | 
						|
 | 
						|
        do {
 | 
						|
            try avAudioSession.setCategory(AVAudioSessionCategoryRecord)
 | 
						|
            return true
 | 
						|
        } catch {
 | 
						|
            owsFail("\(logTag) in \(#function) failed with error: \(error)")
 | 
						|
            return false
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    public func startAudioActivity(_ audioActivity: AudioActivity) {
 | 
						|
        Logger.debug("\(logTag) in \(#function) with \(audioActivity)")
 | 
						|
 | 
						|
        self.currentActivities.append(Weak(value: audioActivity))
 | 
						|
    }
 | 
						|
 | 
						|
    public func endAudioActivity(_ audioActivity: AudioActivity) {
 | 
						|
        Logger.debug("\(logTag) in \(#function) with audioActivity: \(audioActivity)")
 | 
						|
 | 
						|
        currentActivities = currentActivities.filter { return $0.value != audioActivity }
 | 
						|
        ensureAudioSessionActivationState()
 | 
						|
    }
 | 
						|
 | 
						|
    fileprivate func ensureAudioSessionActivationState() {
 | 
						|
        // Cull any stale activities
 | 
						|
        currentActivities = currentActivities.flatMap { oldActivity in
 | 
						|
            guard oldActivity.value != nil else {
 | 
						|
                // Normally we should be explicitly stopping an audio activity, but this allows
 | 
						|
                // for recovery if the owner of the AudioAcivity was GC'd without ending it's
 | 
						|
                // audio activity
 | 
						|
                Logger.warn("\(logTag) an old activity has been gc'd")
 | 
						|
                return nil
 | 
						|
            }
 | 
						|
 | 
						|
            // return any still-active activities
 | 
						|
            return oldActivity
 | 
						|
        }
 | 
						|
 | 
						|
        guard currentActivities.count == 0 else {
 | 
						|
            Logger.debug("\(logTag) not deactivating due to currentActivities: \(currentActivities)")
 | 
						|
            return
 | 
						|
        }
 | 
						|
 | 
						|
        do {
 | 
						|
            try avAudioSession.setCategory(AVAudioSessionCategorySoloAmbient)
 | 
						|
 | 
						|
            // When playing audio in Signal, other apps audio (e.g. Music) is paused.
 | 
						|
            // By notifying when we deactivate, the other app can resume playback.
 | 
						|
            try avAudioSession.setActive(false, with: [.notifyOthersOnDeactivation])
 | 
						|
        } catch {
 | 
						|
            owsFail("\(logTag) in \(#function) failed with error: \(error)")
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    // MARK: - WebRTC Audio
 | 
						|
 | 
						|
    /**
 | 
						|
     * By default WebRTC starts the audio session (PlayAndRecord) immediately upon creating the peer connection
 | 
						|
     * but we want to create the peer connection and set up all the signaling channels before we prompt the user
 | 
						|
     * for an incoming call. Without manually handling the session, this would result in the user seeing a recording
 | 
						|
     * permission requested (and recording banner) before they even know they have an incoming call.
 | 
						|
     *
 | 
						|
     * By using the `useManualAudio` and `isAudioEnabled` attributes of the RTCAudioSession we can delay recording until
 | 
						|
     * it makes sense.
 | 
						|
     */
 | 
						|
 | 
						|
    /**
 | 
						|
     * The private class that manages AVAudioSession for WebRTC
 | 
						|
     */
 | 
						|
    private let rtcAudioSession = RTCAudioSession.sharedInstance()
 | 
						|
 | 
						|
    /**
 | 
						|
     * This must be called before any audio tracks are added to the peerConnection, else we'll start recording before all
 | 
						|
     * our signaling is set up.
 | 
						|
     */
 | 
						|
    public func configureRTCAudio() {
 | 
						|
        Logger.info("\(logTag) in \(#function)")
 | 
						|
        rtcAudioSession.useManualAudio = true
 | 
						|
    }
 | 
						|
 | 
						|
    /**
 | 
						|
     * Because we useManualAudio with our RTCAudioSession, we have to start/stop the recording audio session ourselves.
 | 
						|
     * See header for details on  manual audio.
 | 
						|
     */
 | 
						|
    public var isRTCAudioEnabled: Bool {
 | 
						|
        get {
 | 
						|
            return rtcAudioSession.isAudioEnabled
 | 
						|
        }
 | 
						|
        set {
 | 
						|
            rtcAudioSession.isAudioEnabled = newValue
 | 
						|
        }
 | 
						|
    }
 | 
						|
}
 |