Andorid QQ for TVマイクのデバッグの問題

12175 ワード

1.Androidシステムのデフォルトサンプリングレートは8000 Hzである.
2.QQ応用サンプリング率は44100 Hzまたは16000 Hzである.
3.QQ for TVアプリケーションは、実際に音声チャットを開始する前に、最適なサンプリングレートをテストする(すなわち、デバイスのオンとオフのテストを先に実行する;注意:一部の駆動は高速オンとオフに異常がある;この場合、HALのタイムスタンプを設定することを考慮することができ、HALのオフとオン関数を呼び出す時間間隔が1 Sを超えない場合、オフ動作を実行しない;終了後、ステータスを記録し、再びオンになった場合、オン動作を実行しない).4.再サンプリングはAudioFlingerサービスによって行われ、駆動およびHALモジュールの参加を必要としない.
一、プロセス
E/AudioRecord( 2019): Could not get audio input for record source 1
E/AudioRecord-JNI( 2019): Error creating AudioRecord instance: initialization check failed.
E/AudioRecord-Java( 2019): [ android.media.AudioRecord ] Error code -20 when initializing native AudioRecord object.
上記のエラーは、現在のmicのプロファイルに設定を適用するサンプリングレート、サンプリング精度、またはチャネルが存在しないことです.
frameworks/base/media/java/android/media/AudioRecord.java
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, 
            int bufferSizeInBytes)
    throws IllegalArgumentException {
  int initResult = native_setup( new WeakReference<AudioRecord>(this), 
                mRecordSource, mSampleRate, mChannels, mAudioFormat, mNativeBufferSizeInBytes,
                session);
  if (initResult != SUCCESS) {
            loge("Error code "+initResult+" when initializing native AudioRecord object.");
            return; // with mState == STATE_UNINITIALIZED
  }
}
frameworks/base/core/jni/android_media_AudioRecord.cpp
{"native_setup",         "(Ljava/lang/Object;IIIII[I)I",
                                       (void *)android_media_AudioRecord_setup},
static int
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jint source, jint sampleRateInHertz, jint channels,
        jint audioFormat, jint buffSizeInBytes, jintArray jSession){
  sp<AudioRecord> lpRecorder = new AudioRecord();
  lpRecorder->set((audio_source_t) source,
        sampleRateInHertz,
        format,        // word length, PCM
        channels,
        frameCount,
        recorderCallback,// callback_t
        lpCallbackData,// void* user
        0,             // notificationFrames,
        true,          // threadCanCallJava)
        sessionId);
  if (lpRecorder->initCheck() != NO_ERROR) {
    ALOGE("Error creating AudioRecord instance: initialization check failed.");
    goto native_init_failure;
  }  
}
frameworks/av/media/libmedia/AudioRecord.cpp
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCount,
        callback_t cbf,
        void* user,
        int notificationFrames,
        bool threadCanCallJava,
        int sessionId){
  audio_io_handle_t input = AudioSystem::getInput(inputSource,
                                                    sampleRate,
                                                    format,
                                                    channelMask,
                                                    mSessionId);
  if (input == 0) {
    ALOGE("Could not get audio input for record source %d", inputSource);
    return BAD_VALUE;
  }
}
audio_io_handle_t AudioSystem::getInput(audio_source_t inputSource,
                                    uint32_t samplingRate,
                                    audio_format_t format,
                                    audio_channel_mask_t channelMask,
                                    int sessionId){
  const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
  if (aps == 0) return 0;
  return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId);
}
Binderプロセス間通信
frameworks/av/services/audioflinger/AudioPolicyService.cpp
audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
                                    uint32_t samplingRate,
                                    audio_format_t format,
                                    audio_channel_mask_t channelMask,
                                    int audioSession){
  audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
                                                       format, channelMask, (audio_in_acoustics_t) 0);
  //rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
  //rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
}
hardware/libhardware_legacy/audio/Audio_policy.c
static audio_io_handle_t ap_get_input(struct audio_policy *pol, audio_source_t inputSource,
                                      uint32_t sampling_rate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
                                      audio_in_acoustics_t acoustics){
  struct legacy_audio_policy *lap = to_lap(pol);
  return lap->apm->getInput((int) inputSource, sampling_rate, (int) format, channelMask,
                              (AudioSystem::audio_in_acoustics)acoustics);
}
hardware/libhardware_legacy/audio/AudiopolicyManagerBase.cpp
audio_io_handle_t AudioPolicyManagerBase::getInput(int inputSource,
                                    uint32_t samplingRate,
                                    uint32_t format,
                                    uint32_t channelMask,
                                    AudioSystem::audio_in_acoustics acoustics){
  audio_devices_t device = getDeviceForInputSource(inputSource);
  // 
  IOProfile *profile = getInputProfile(device,
                                         samplingRate,
                                         format,
                                         channelMask);
  // audio_policy.conf , 、 。
  if (profile == NULL) {
    ALOGW("getInput() could not find profile for device %04x, samplingRate %d, format %d,"
                "channelMask %04x",
                device, samplingRate, format, channelMask);
    return 0;
  }
  input = mpClientInterface->openInput(profile->mModule->mHandle,
                                    &inputDesc->mDevice,
                                    &inputDesc->mSamplingRate,
                                    &inputDesc->mFormat,
                                    &inputDesc->mChannelMask);
}

frameworks/av/services/audioflinger/AudioPolicyService.cpp
open_input_on_module  : aps_open_input_on_module,
static audio_io_handle_t aps_open_input_on_module(void *service,
                                                  audio_module_handle_t module,
                                                  audio_devices_t *pDevices,
                                                  uint32_t *pSamplingRate,
                                                  audio_format_t *pFormat,
                                                  audio_channel_mask_t *pChannelMask){
  sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
  if (af == 0) {
    ALOGW("%s: could not get AudioFlinger", __func__);
    return 0;
  }
  return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
}
frameworks/av/services/audioflinger/AudioFlinger.cpp
audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
                                          audio_devices_t *pDevices,
                                          uint32_t *pSamplingRate,
                                          audio_format_t *pFormat,
                                          audio_channel_mask_t *pChannelMask){
  status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
                                        &inStream);status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
                                        &inStream);
  if (status == NO_ERROR && inStream != NULL) {
    thread = new RecordThread(this,
                                  input,
                                  reqSamplingRate,
                                  reqChannels,
                                  id,
                                  device);
  }
}
AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
                                         AudioStreamIn *input,
                                         uint32_t sampleRate,
                                         audio_channel_mask_t channelMask,
                                         audio_io_handle_t id,
                                         audio_devices_t device) :
    ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
    mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
    // mRsmpInIndex and mInputBytes set by readInputParameters()
    mReqChannelCount(popcount(channelMask)),
    mReqSampleRate(sampleRate)
    // mBytesRead is only meaningful while active, and so is cleared in start()
    // (but might be better to also clear here for dump?){
  readInputParameters()
}
void AudioFlinger::RecordThread::readInputParameters(){
  mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
  if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2){
    mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);
    mResampler->setSampleRate(mSampleRate);
  }
  // !!!
}
bool AudioFlinger::RecordThread::threadLoop(){
  if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
    // HAL 
  }
  else{
    nsecs_t now = systemTime();
    if ((now - lastWarning) > kWarningThrottleNs) {
      ALOGW("RecordThread: buffer overflow");
      lastWarning = now;
    }
  }
}
二、アプリケーションがbufferデータを取り出す方法
W/AudioFlinger(  918): RecordThread: buffer overflow
W/AudioRecord( 2238): obtainBuffer timed out (is the CPU pegged?) user=0002f260, server=0002f260
bufferオーバーフローを考慮すべきである.特に第2の問題は、下位オーディオデータに問題があることを考慮すべきである.
frameworks/base/media/java/android/media/AudioRecord.java
public int read(byte[] audioData, int offsetInBytes, int sizeInBytes) {
  if (mState != STATE_INITIALIZED) {
    return ERROR_INVALID_OPERATION;
  }      
  if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 
                || (offsetInBytes + sizeInBytes > audioData.length)) {
    return ERROR_BAD_VALUE;
  }
  return native_read_in_byte_array(audioData, offsetInBytes, sizeInBytes);
}
frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint android_media_AudioRecord_readInByteArray(JNIEnv *env,  jobject thiz,
                                                        jbyteArray javaAudioData,
                                                        jint offsetInBytes, jint sizeInBytes) {
  sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);
  ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes,
                                        sizeInBytes > (jint)recorderBuffSize ?
                                            (jint)recorderBuffSize : sizeInBytes );
}
frameworks/av/media/libmedia/AudioRecord.cpp
ssize_t AudioRecord::read(void* buffer, size_t userSize){
  status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS));
}
status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount){
  cblk->waitTimeMs += waitTimeMs;
  if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
    ALOGW(   "obtainBuffer timed out (is the CPU pegged?) "
                            "user=%08x, server=%08x", cblk->user, cblk->server);
  }
  audioBuffer->raw         = (int8_t*)cblk->buffer(u);
  //cblk->buffer(u) AudioFlinger mActiveTrack->getNextBuffer(&buffer) HAL 
}