我是靠谱客的博主 香蕉星星,这篇文章主要介绍Android 11 Audio框架探索(一),现在分享给大家,希望可以做个参考。

1、mian_audioservice.cpp文件启动AudioPolicyService与AudioFlinger服务

在面mian函数调用了AudioFlinger,AudioPolicyService的instantiate()函数。

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
int main(int argc __unused, char **argv) { // TODO: update with refined parameters limitProcessMemory( "audio.maxmem", /* "ro.audio.maxmem", property that defines limit */ (size_t)512 * (1 << 20), /* SIZE_MAX, upper limit in bytes */ 20 /* upper limit as percentage of physical RAM */); signal(SIGPIPE, SIG_IGN); bool doLog = (bool) property_get_bool("ro.test_harness", 0); pid_t childPid; // FIXME The advantage of making the process containing media.log service the parent process of // the process that contains the other audio services, is that it allows us to collect more // detailed information such as signal numbers, stop and continue, resource usage, etc. // But it is also more complex. Consider replacing this by independent processes, and using // binder on death notification instead. if (doLog && (childPid = fork()) != 0) { // media.log service //prctl(PR_SET_NAME, (unsigned long) "media.log", 0, 0, 0); // unfortunately ps ignores PR_SET_NAME for the main thread, so use this ugly hack strcpy(argv[0], "media.log"); sp<ProcessState> proc(ProcessState::self()); MediaLogService::instantiate(); ProcessState::self()->startThreadPool(); IPCThreadState::self()->joinThreadPool(); ... ... ... android::hardware::configureRpcThreadpool(4, false /*callerWillJoin*/); sp<ProcessState> proc(ProcessState::self()); sp<IServiceManager> sm = defaultServiceManager(); ALOGI("ServiceManager: %p", sm.get()); AudioFlinger::instantiate(); AudioPolicyService::instantiate(); ... ... ... ProcessState::self()->startThreadPool(); IPCThreadState::self()->joinThreadPool(); } }

在BinderService.h中会创建它们的实例,并注册到SystemManager中

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
template<typename SERVICE> class BinderService { public: static status_t publish(bool allowIsolated = false, int dumpFlags = IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT) { sp<IServiceManager> sm(defaultServiceManager()); return sm->addService(String16(SERVICE::getServiceName()), new SERVICE(), allowIsolated, dumpFlags); } static void publishAndJoinThreadPool( bool allowIsolated = false, int dumpFlags = IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT) { publish(allowIsolated, dumpFlags); joinThreadPool(); } static void instantiate() { publish(); } static status_t shutdown() { return NO_ERROR; } private: static void joinThreadPool() { sp<ProcessState> ps(ProcessState::self()); ps->startThreadPool(); ps->giveThreadPoolName(); IPCThreadState::self()->joinThreadPool(); } }; } // namespace android

关于如何启动一个Native service这里不赘叙,可参考下面连接:

通过C++实现Android Native Service - 简书

2、AudioProlicyService初始化

AudioPolicyService用于控制音频播放策略(比如插耳机的时候来电用什么设备去播放音乐)、管理音频设备等。

AudioPolicyService的构造函数更简单,只是初始化主要成员。
AudioPolicyService会在onFristRef中做比较多的工作,比如创建command线程,创建AudioPolicyManager。

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
AudioPolicyService::AudioPolicyService() : BnAudioPolicyService(), mAudioPolicyManager(NULL), mAudioPolicyClient(NULL), mPhoneState(AUDIO_MODE_INVALID), mCaptureStateNotifier(false) { } void AudioPolicyService::onFirstRef() { { Mutex::Autolock _l(mLock); // start audio commands thread mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this); // start output activity command thread mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this); //创建AudioPolicyClient mAudioPolicyClient = new AudioPolicyClient(this); //创建AudioPolicyManager mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient); } // load audio processing modules sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects(); sp<UidPolicy> uidPolicy = new UidPolicy(this); sp<SensorPrivacyPolicy> sensorPrivacyPolicy = new SensorPrivacyPolicy(this); { Mutex::Autolock _l(mLock); mAudioPolicyEffects = audioPolicyEffects; mUidPolicy = uidPolicy; mSensorPrivacyPolicy = sensorPrivacyPolicy; } uidPolicy->registerSelf(); sensorPrivacyPolicy->registerSelf(); }

这里createAudioPolicyManager()方法定义在AudioPolicyInterface.h,AudioPolicyManager.h继承了AudioPolicyInterface。

复制代码
1
class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver

AudioPolicyFactory.cpp中实现了createAudioPolicyManager().

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#include <AudioPolicyManager.h> namespace android { extern "C" AudioPolicyInterface* createAudioPolicyManager( AudioPolicyClientInterface *clientInterface) { AudioPolicyManager *apm = new AudioPolicyManager(clientInterface); status_t status = apm->initialize(); if (status != NO_ERROR) { delete apm; apm = nullptr; } return apm; } extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface) { delete interface; } } // namespace android

这里创建了AudioPolicyManager并且调用的它的initialize()。

话不多说我们直接上代码分析,AudioPolicyManager.cpp:

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface, bool /*forTesting*/) : mUidCached(AID_AUDIOSERVER), // no need to call getuid(), there's only one of us running. mpClientInterface(clientInterface), mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f), mA2dpSuspended(false), mConfig(mHwModulesAll, mOutputDevicesAll, mInputDevicesAll, mDefaultOutputDevice), mAudioPortGeneration(1), mBeaconMuteRefCount(0), mBeaconPlayingRefCount(0), mBeaconMuted(false), mTtsOutputAvailable(false), mMasterMono(false), mMusicEffectOutput(AUDIO_IO_HANDLE_NONE) { } AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface) : AudioPolicyManager(clientInterface, false /*forTesting*/) { loadConfig(); } void AudioPolicyManager::loadConfig() { if (deserializeAudioPolicyXmlConfig(getConfig()) != NO_ERROR) { ALOGE("could not load audio policy configuration file, setting defaults"); getConfig().setDefault(); } } status_t AudioPolicyManager::initialize() { { //代理引擎 auto engLib = EngineLibrary::load( "libaudiopolicyengine" + getConfig().getEngineLibraryNameSuffix() + ".so"); if (!engLib) { ALOGE("%s: Failed to load the engine library", __FUNCTION__); return NO_INIT; } //获取引擎 mEngine = engLib->createEngine(); if (mEngine == nullptr) { ALOGE("%s: Failed to instantiate the APM engine", __FUNCTION__); return NO_INIT; } } //设为观察者 mEngine->setObserver(this); status_t status = mEngine->initCheck(); //初始 if (status != NO_ERROR) { LOG_FATAL("Policy engine not initialized(err=%d)", status); return status; } // after parsing the config, mOutputDevicesAll and mInputDevicesAll contain all known devices; // open all output streams needed to access attached devices // 打开访问附加设备所需的所有输出流 onNewAudioModulesAvailableInt(nullptr /*newDevices*/); // make sure default device is reachable(确保可以访问默认设备) if (mDefaultOutputDevice == 0 || !mAvailableOutputDevices.contains(mDefaultOutputDevice)) { ALOGE_IF(mDefaultOutputDevice != 0, "Default device %s is unreachable", mDefaultOutputDevice->toString().c_str()); status = NO_INIT; } // If microphones address is empty, set it according to device type //如果麦克风地址为空,请根据设备类型进行设置 for (size_t i = 0; i < mAvailableInputDevices.size(); i++) { if (mAvailableInputDevices[i]->address().empty()) { if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) { mAvailableInputDevices[i]->setAddress(AUDIO_BOTTOM_MICROPHONE_ADDRESS); } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) { mAvailableInputDevices[i]->setAddress(AUDIO_BACK_MICROPHONE_ADDRESS); } } } if (mPrimaryOutput == 0) { ALOGE("Failed to open primary output"); status = NO_INIT; } // Silence ALOGV statements property_set("log.tag." LOG_TAG, "D"); updateDevicesAndOutputs(); return status; }

AudioPolicyManager的构造中代码并不多,主要逻都在initialize()函数中。这里有个很重要的函数onNewAudioModulesAvailableInt()。

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
void AudioPolicyManager::onNewAudioModulesAvailableInt(DeviceVector *newDevices) { for (const auto& hwModule : mHwModulesAll) { if (std::find(mHwModules.begin(), mHwModules.end(), hwModule) != mHwModules.end()) { continue; } //调用ClientInterface加载Audio模块,ClientInterface将调用AudioFlinger的loadHwModule hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName())); if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) { ALOGW("could not open HW module %s", hwModule->getName()); continue; } mHwModules.push_back(hwModule); // open all output streams needed to access attached devices // except for direct output streams that are only opened when they are actually // required by an app. // This also validates mAvailableOutputDevices list //打开访问连接设备所需的所有输出流 // 除了直接输出流,它们只有在实际打开时才打开 // 应用程序需要。 // 这也验证了 mAvailableOutputDevices 列表 for (const auto& outProfile : hwModule->getOutputProfiles()) { if (!outProfile->canOpenNewIo()) { ALOGE("Invalid Output profile max open count %u for profile %s", outProfile->maxOpenCount, outProfile->getTagName().c_str()); continue; } if (!outProfile->hasSupportedDevices()) { ALOGW("Output profile contains no device on module %s", hwModule->getName()); continue; } if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) { mTtsOutputAvailable = true; } const DeviceVector &supportedDevices = outProfile->getSupportedDevices(); DeviceVector availProfileDevices = supportedDevices.filter(mOutputDevicesAll); sp<DeviceDescriptor> supportedDevice = 0; if (supportedDevices.contains(mDefaultOutputDevice)) { supportedDevice = mDefaultOutputDevice; } else { // choose first device present in profile's SupportedDevices also part of // mAvailableOutputDevices. if (availProfileDevices.isEmpty()) { continue; } supportedDevice = availProfileDevices.itemAt(0); } if (!mOutputDevicesAll.contains(supportedDevice)) { continue; } //根据outProfile 得到一个描述符,设置mpClientInterface sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile, mpClientInterface); audio_io_handle_t output = AUDIO_IO_HANDLE_NONE; //AudioOutputDescriptor.cpp 的open方法会调用AF的openOutput方法 status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice), AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output); if (status != NO_ERROR) { ALOGW("Cannot open output stream for devices %s on hw module %s", supportedDevice->toString().c_str(), hwModule->getName()); continue; } for (const auto &device : availProfileDevices) { // give a valid ID to an attached device once confirmed it is reachable //一旦确认连接的设备可以访问,就为它提供一个有效的 ID if (!device->isAttached()) { device->attach(hwModule); mAvailableOutputDevices.add(device); device->setEncapsulationInfoFromHal(mpClientInterface); if (newDevices) newDevices->add(device); setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE); } } if (mPrimaryOutput == 0 && outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) { mPrimaryOutput = outputDesc; } if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) { outputDesc->close(); } else { //根据output添加outputDesc描述符 addOutput(output, outputDesc); setOutputDevices(outputDesc, DeviceVector(supportedDevice), true, 0, NULL); } } // open input streams needed to access attached devices to validate //打开访问连接设备所需的输入流以进行验证 // mAvailableInputDevices list for (const auto& inProfile : hwModule->getInputProfiles()) { if (!inProfile->canOpenNewIo()) { ALOGE("Invalid Input profile max open count %u for profile %s", inProfile->maxOpenCount, inProfile->getTagName().c_str()); continue; } if (!inProfile->hasSupportedDevices()) { ALOGW("Input profile contains no device on module %s", hwModule->getName()); continue; } // chose first device present in profile's SupportedDevices also part of // available input devices //选择配置文件的 SupportedDevices 中存在的第一个设备也是可用的输入设备 const DeviceVector &supportedDevices = inProfile->getSupportedDevices(); DeviceVector availProfileDevices = supportedDevices.filter(mInputDevicesAll); if (availProfileDevices.isEmpty()) { ALOGE("%s: Input device list is empty!", __FUNCTION__); continue; } sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile, mpClientInterface); audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; status_t status = inputDesc->open(nullptr, availProfileDevices.itemAt(0), AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_NONE, &input); if (status != NO_ERROR) { ALOGW("Cannot open input stream for device %s on hw module %s", availProfileDevices.toString().c_str(), hwModule->getName()); continue; } for (const auto &device : availProfileDevices) { // give a valid ID to an attached device once confirmed it is reachable if (!device->isAttached()) { device->attach(hwModule); device->importAudioPortAndPickAudioProfile(inProfile, true); mAvailableInputDevices.add(device); if (newDevices) newDevices->add(device); setEngineDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_AVAILABLE); } } inputDesc->close(); } } }

这里创建了SwAudioOutputDescriptor指针,这个类定义在AudioOutputDescriptor.h。

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
// Audio output driven by a software mixer in audio flinger. class SwAudioOutputDescriptor: public AudioOutputDescriptor { public: ... ... ... status_t open(const audio_config_t *config, const DeviceVector &devices, audio_stream_type_t stream, audio_output_flags_t flags, audio_io_handle_t *output); // Called when a stream is about to be started // Note: called before setClientActive(true); status_t start(); // Called after a stream is stopped. // Note: called after setClientActive(false); void stop(); void close(); status_t openDuplicating(const sp<SwAudioOutputDescriptor>& output1, const sp<SwAudioOutputDescriptor>& output2, audio_io_handle_t *ioHandle); ... ... ... ... };

open函数的具体实现在SwAudioOutputDescriptor.cpp

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
status_t SwAudioOutputDescriptor::open(const audio_config_t *config, const DeviceVector &devices, audio_stream_type_t stream, audio_output_flags_t flags, audio_io_handle_t *output) { mDevices = devices; sp<DeviceDescriptor> device = devices.getDeviceForOpening(); LOG_ALWAYS_FATAL_IF(device == nullptr, "%s failed to get device descriptor for opening " "with the requested devices, all device types: %s", __func__, dumpDeviceTypes(devices.types()).c_str()); audio_config_t lConfig; if (config == nullptr) { lConfig = AUDIO_CONFIG_INITIALIZER; lConfig.sample_rate = mSamplingRate; lConfig.channel_mask = mChannelMask; lConfig.format = mFormat; } else { lConfig = *config; } // if the selected profile is offloaded and no offload info was specified, // create a default one if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) { flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD); lConfig.offload_info = AUDIO_INFO_INITIALIZER; lConfig.offload_info.sample_rate = lConfig.sample_rate; lConfig.offload_info.channel_mask = lConfig.channel_mask; lConfig.offload_info.format = lConfig.format; lConfig.offload_info.stream_type = stream; lConfig.offload_info.duration_us = -1; lConfig.offload_info.has_video = true; // conservative lConfig.offload_info.is_streaming = true; // likely } mFlags = (audio_output_flags_t)(mFlags | flags); ALOGV("opening output for device %s profile %p name %s", mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str()); //熟悉的openOutput status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(), output, &lConfig, device, &mLatency, mFlags); if (status == NO_ERROR) { LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE, "%s openOutput returned output handle %d for device %s, " "selected device %s for opening", __FUNCTION__, *output, devices.toString().c_str(), device->toString().c_str()); mSamplingRate = lConfig.sample_rate; mChannelMask = lConfig.channel_mask; mFormat = lConfig.format; mId = PolicyAudioPort::getNextUniqueId(); mIoHandle = *output; mProfile->curOpenCount++; } return status; }

这里调用mClientInterface->openOutput()这个函数最终是在AudioFlicy.cpp中实现。这个mClientInterface是在SwAudioOutputDescriptor的构造中传过来的。

复制代码
1
2
3
4
//根据outProfile 得到一个描述符,设置mpClientInterface sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile, mpClientInterface);
复制代码
1
2
3
4
5
6
AudioPolicyClientInterface *mpClientInterface; // audio policy client interface sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor // list of descriptors for outputs currently opened

下面我们看一下AudioPolicyClientInterface这个类。

AudioPolicyClientInterface.h

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
// Audio Policy client Interface class AudioPolicyClientInterface { public: virtual ~AudioPolicyClientInterface() {} // // Audio HW module functions // // loads a HW module. virtual audio_module_handle_t loadHwModule(const char *name) = 0; // // Audio output Control functions // // opens an audio output with the requested parameters. The parameter values can indicate to use the default values // in case the audio policy manager has no specific requirements for the output being opened. // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream. // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly. virtual status_t openOutput(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, const sp<DeviceDescriptorBase>& device, uint32_t *latencyMs, audio_output_flags_t flags) = 0; // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by // a special mixer thread in the AudioFlinger. virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2) = 0; // closes the output stream virtual status_t closeOutput(audio_io_handle_t output) = 0; // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded. virtual status_t suspendOutput(audio_io_handle_t output) = 0; // restores a suspended output. virtual status_t restoreOutput(audio_io_handle_t output) = 0; // // Audio input Control functions // // opens an audio input virtual status_t openInput(audio_module_handle_t module, audio_io_handle_t *input, audio_config_t *config, audio_devices_t *device, const String8& address, audio_source_t source, audio_input_flags_t flags) = 0; ... ... ... ... };

openOutput()在AudioPolicyClientImpl.cpp中被实现。

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, const sp<DeviceDescriptorBase>& device, uint32_t *latencyMs, audio_output_flags_t flags) { sp<IAudioFlinger> af = AudioSystem::get_audio_flinger(); if (af == 0) { ALOGW("%s: could not get AudioFlinger", __func__); return PERMISSION_DENIED; } return af->openOutput(module, output, config, device, latencyMs, flags); }

获取获取AF的代理并调用了openoutput()函数。

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
status_t AudioFlinger::openOutput(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, const sp<DeviceDescriptorBase>& device, uint32_t *latencyMs, audio_output_flags_t flags) { ALOGI("openOutput() this %p, module %d Device %s, SamplingRate %d, Format %#08x, " "Channels %#x, flags %#x", this, module, device->toString().c_str(), config->sample_rate, config->format, config->channel_mask, flags); audio_devices_t deviceType = device->type(); const String8 address = String8(device->address().c_str()); if (deviceType == AUDIO_DEVICE_NONE) { return BAD_VALUE; } Mutex::Autolock _l(mLock); sp<ThreadBase> thread = openOutput_l(module, output, config, deviceType, address, flags); if (thread != 0) { if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) { PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); *latencyMs = playbackThread->latency(); // notify client processes of the new output creation playbackThread->ioConfigChanged(AUDIO_OUTPUT_OPENED); // the first primary output opened designates the primary hw device if no HW module // named "primary" was already loaded. AutoMutex lock(mHardwareLock); if ((mPrimaryHardwareDev == nullptr) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) { ALOGI("Using module %d as the primary audio interface", module); mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev; mHardwareStatus = AUDIO_HW_SET_MODE; mPrimaryHardwareDev->hwDevice()->setMode(mMode); mHardwareStatus = AUDIO_HW_IDLE; } } else { MmapThread *mmapThread = (MmapThread *)thread.get(); mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED); } return NO_ERROR; } return NO_INIT; }

接着看下openOutput_l()函数:

复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module, audio_io_handle_t *output, audio_config_t *config, audio_devices_t deviceType, const String8& address, audio_output_flags_t flags) { //AudioHwDevice 代表一个打开的音频接口设备 AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType); if (outHwDev == NULL) { return 0; } if (*output == AUDIO_IO_HANDLE_NONE) { *output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT); } else { // Audio Policy does not currently request a specific output handle. // If this is ever needed, see openInput_l() for example code. ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output); return 0; } mHardwareStatus = AUDIO_HW_OUTPUT_OPEN; // FOR TESTING ONLY: // This if statement allows overriding the audio policy settings // and forcing a specific format or channel mask to the HAL/Sink device for testing. if (!(flags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT))) { // Check only for Normal Mixing mode if (kEnableExtendedPrecision) { // Specify format (uncomment one below to choose) //config->format = AUDIO_FORMAT_PCM_FLOAT; //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED; //config->format = AUDIO_FORMAT_PCM_32_BIT; //config->format = AUDIO_FORMAT_PCM_8_24_BIT; // ALOGV("openOutput_l() upgrading format to %#08x", config->format); } if (kEnableExtendedChannels) { // Specify channel mask (uncomment one below to choose) //config->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch //config->channel_mask = audio_channel_mask_from_representation_and_bits( // AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example } } ///为设备打开一个输出流,会获得一个audio_stream_out_t *stream; 一个audio_devices_t devices,其中会生成一个AudioStreamOut //(AudioStreamOut *outputStream = new AudioStreamOut(this, flags);)就是来封装audio_stream_out_t和audio_devices_t的。 AudioStreamOut *outputStream = NULL; status_t status = outHwDev->openOutputStream( &outputStream, *output, deviceType, flags, config, address.string()); mHardwareStatus = AUDIO_HW_IDLE; if (status == NO_ERROR) { if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) { sp<MmapPlaybackThread> thread = new MmapPlaybackThread(this, *output, outHwDev, outputStream, mSystemReady); mMmapThreads.add(*output, thread); ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p", *output, thread.get()); return thread; } else { //创建播放线程 sp<PlaybackThread> thread; if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { thread = new OffloadThread(this, outputStream, *output, mSystemReady); ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread.get()); } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) || !isValidPcmSinkFormat(config->format) || !isValidPcmSinkChannelMask(config->channel_mask)) { thread = new DirectOutputThread(this, outputStream, *output, mSystemReady); ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread.get()); } else { thread = new MixerThread(this, outputStream, *output, mSystemReady); ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread.get()); } //添加到mPlaybackThreads中 mPlaybackThreads.add(*output, thread); mPatchPanel.notifyStreamOpened(outHwDev, *output); return thread; } } return 0; }

 在openOutput_l函数中这个findSuitableHwDev_l函数很重要。

这里暂时不做分析....

最后

以上就是香蕉星星最近收集整理的关于Android 11 Audio框架探索(一)的全部内容,更多相关Android内容请搜索靠谱客的其他文章。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(50)

评论列表共有 0 条评论

立即
投稿
返回
顶部