linphone呼出音頻電話時,錄音設備執行流程分析

發起呼叫

發起呼叫請求的入口在inviteAddressWithParams(address,params); jni接口在linphonecore_jni.cc 中 line 5023;

 

按照流程走,最終調用linphone_core_start_invite(), 在linphonecore.c ——line3161

int linphone_core_start_invite(LinphoneCore *lc, LinphoneCall *call, const LinphoneAddress* destination /* = NULL if to be taken from the call log */){
   ms_message("linphone_core_start_invite");
   int err;
   char *real_url,*barmsg;
   char *from;
   /*try to be best-effort in giving real local or routable contact address */
   linphone_call_set_contact_op(call);

   linphone_core_stop_dtmf_stream(lc);
   linphone_call_make_local_media_description(call);

   if (lc->ringstream==NULL) {
      if (lc->sound_conf.play_sndcard && lc->sound_conf.capt_sndcard){
         /*give a chance a set card prefered sampling frequency*/
         if (call->localdesc->streams[0].max_rate>0) {
            ms_snd_card_set_preferred_sample_rate(lc->sound_conf.play_sndcard, call->localdesc->streams[0].max_rate);
         }
         if (!lc->use_files){
             ms_message("start invite call audio_stream_prepare_sound");
            audio_stream_prepare_sound(call->audiostream,lc->sound_conf.play_sndcard,lc->sound_conf.capt_sndcard);
         }
      }
   }
   real_url=linphone_address_as_string( destination ? destination : call->log->to);
   from=linphone_address_as_string(call->log->from);

   if (!lc->sip_conf.sdp_200_ack){
      /*we are offering, set local media description before sending the call*/
      sal_call_set_local_media_description(call->op,call->localdesc);
   }
   
   barmsg=ortp_strdup_printf("%s %s", _("Contacting"), real_url);
   linphone_core_notify_display_status(lc,barmsg);
   ms_free(barmsg);
   
   err=sal_call(call->op,from,real_url);
   
   if (err < 0){
      if (call->state != LinphoneCallError &&
         call->state != LinphoneCallReleased){
         /*sal_call() may invoke call_failure() and call_released() SAL callbacks synchronously,
          * in which case there is no need to perform a state change here.*/
         linphone_core_notify_display_status(lc,_("Could not call"));
         linphone_call_stop_media_streams(call);
         linphone_call_set_state(call,LinphoneCallError,"Call failed");
      }
      goto end;
   }
   if (lc->sip_conf.sdp_200_ack){
      /*we are NOT offering, set local media description after sending the call so that we are ready to
       process the remote offer when it will arrive*/
      sal_call_set_local_media_description(call->op,call->localdesc);
   }
   call->log->call_id=ms_strdup(sal_op_get_call_id(call->op)); /*must be known at that time*/
   linphone_call_set_state(call,LinphoneCallOutgoingProgress,"Outgoing call in progress");
   
end:
   ms_free(real_url);
   ms_free(from);
   return err;
}linphone_call_make_local_media_description(call);

   if (lc->ringstream==NULL) {
      if (lc->sound_conf.play_sndcard && lc->sound_conf.capt_sndcard){
         /*give a chance a set card prefered sampling frequency*/
         if (call->localdesc->streams[0].max_rate>0) {
            ms_snd_card_set_preferred_sample_rate(lc->sound_conf.play_sndcard, call->localdesc->streams[0].max_rate);
         }
         if (!lc->use_files){
             ms_message("start invite call audio_stream_prepare_sound");
            audio_stream_prepare_sound(call->audiostream,lc->sound_conf.play_sndcard,lc->sound_conf.capt_sndcard);
         }
      }
   }
   real_url=linphone_address_as_string( destination ? destination : call->log->to);
   from=linphone_address_as_string(call->log->from);

   if (!lc->sip_conf.sdp_200_ack){
      /*we are offering, set local media description before sending the call*/
      sal_call_set_local_media_description(call->op,call->localdesc);
   }
   
   barmsg=ortp_strdup_printf("%s %s", _("Contacting"), real_url);
   linphone_core_notify_display_status(lc,barmsg);
   ms_free(barmsg);
   
   err=sal_call(call->op,from,real_url);
   
   if (err < 0){
      if (call->state != LinphoneCallError &&
         call->state != LinphoneCallReleased){
         /*sal_call() may invoke call_failure() and call_released() SAL callbacks synchronously,
          * in which case there is no need to perform a state change here.*/
         linphone_core_notify_display_status(lc,_("Could not call"));
         linphone_call_stop_media_streams(call);
         linphone_call_set_state(call,LinphoneCallError,"Call failed");
      }
      goto end;
   }
   if (lc->sip_conf.sdp_200_ack){
      /*we are NOT offering, set local media description after sending the call so that we are ready to
       process the remote offer when it will arrive*/
      sal_call_set_local_media_description(call->op,call->localdesc);
   }
   call->log->call_id=ms_strdup(sal_op_get_call_id(call->op)); /*must be known at that time*/
   linphone_call_set_state(call,LinphoneCallOutgoingProgress,"Outgoing call in progress");
   
end:
   ms_free(real_url);
   ms_free(from);
   return err;
}

1、設置本地的媒體描述參數 linphone_call_make_local_media_description(call)

2、判斷ringstream是否爲空,即響鈴的音頻流是否構建好,如果沒有則準備啓動音頻庫

audio_stream_prepare_sound();

3、在對方還沒接響應sip消息前,設置好sip的媒體參數sal_call_set_local_media_description();

4、發起呼叫sal_call();

5、最後根據響應結果,來更新本地媒體描述,sal_call_set_local_media_description,

6、更新狀態爲LinphoneCallOutgoingProgress;

 

本次不介紹呼叫過程中的sip消息的處理,因此只看下1、2、3步的過程;

 

第一步 linphone_call_make_local_media_description(), linphonecall.c ——line731

代碼太長了,主要過程是:

構建一個SalMediaDescription* md對象;

然後設置md->stream[]中的audio、video的multicast_role、ttl屬性

設置md的session_id、bandwidth等屬性

設置md->stream[]中的audio、video的採樣率、端口、協議等於音視頻通信協議相關的設置參數

 

第二步 audio_stream_prepare_sound; audiostream.c ——line240

void audio_stream_prepare_sound(AudioStream *stream, MSSndCard *playcard, MSSndCard *captcard){
    ms_message("audio_stream_prepare_sound playcard = %s, captcard = %s",playcard->desc->driver_type,captcard->desc->driver_type);
   audio_stream_unprepare_sound(stream);
   stream->dummy=ms_factory_create_filter(stream->ms.factory, MS_RTP_RECV_ID);
   rtp_session_set_payload_type(stream->ms.sessions.rtp_session,0);
   rtp_session_enable_rtcp(stream->ms.sessions.rtp_session, FALSE);
   ms_filter_call_method(stream->dummy,MS_RTP_RECV_SET_SESSION,stream->ms.sessions.rtp_session);

   if (captcard && playcard){
#ifdef __ios
      int muted = 1;
      stream->soundread=ms_snd_card_create_reader(captcard);
      stream->soundwrite=ms_snd_card_create_writer(playcard);
      ms_filter_link(stream->dummy,0,stream->soundwrite,0);
      ms_filter_call_method(stream->soundwrite, MS_AUDIO_PLAYBACK_MUTE, &muted);
#else
      stream->ms.voidsink=ms_factory_create_filter(stream->ms.factory,  MS_VOID_SINK_ID);
      ms_filter_link(stream->dummy,0,stream->ms.voidsink,0);
#endif
   } else {
      stream->ms.voidsink=ms_factory_create_filter(stream->ms.factory,  MS_VOID_SINK_ID);
      ms_filter_link(stream->dummy,0,stream->ms.voidsink,0);
      
   }
   if (stream->ms.sessions.ticker == NULL) media_stream_start_ticker(&stream->ms);
   ms_ticker_attach(stream->ms.sessions.ticker,stream->dummy);
   stream->ms.state=MSStreamPreparing;
}ms_factory_create_filter(stream->ms.factory, MS_RTP_RECV_ID);
   rtp_session_set_payload_type(stream->ms.sessions.rtp_session,0);
   rtp_session_enable_rtcp(stream->ms.sessions.rtp_session, FALSE);
   ms_filter_call_method(stream->dummy,MS_RTP_RECV_SET_SESSION,stream->ms.sessions.rtp_session);

   if (captcard && playcard){
#ifdef __ios
      int muted = 1;
      stream->soundread=ms_snd_card_create_reader(captcard);
      stream->soundwrite=ms_snd_card_create_writer(playcard);
      ms_filter_link(stream->dummy,0,stream->soundwrite,0);
      ms_filter_call_method(stream->soundwrite, MS_AUDIO_PLAYBACK_MUTE, &muted);
#else
      stream->ms.voidsink=ms_factory_create_filter(stream->ms.factory,  MS_VOID_SINK_ID);
      ms_filter_link(stream->dummy,0,stream->ms.voidsink,0);
#endif
   } else {
      stream->ms.voidsink=ms_factory_create_filter(stream->ms.factory,  MS_VOID_SINK_ID);
      ms_filter_link(stream->dummy,0,stream->ms.voidsink,0);
      
   }
   if (stream->ms.sessions.ticker == NULL) media_stream_start_ticker(&stream->ms);
   ms_ticker_attach(stream->ms.sessions.ticker,stream->dummy);
   stream->ms.state=MSStreamPreparing;
}

先釋放掉call中原有的audiostream: audio_stream_unprepare_sound(stream);

然後獲取ms_factory中MS_RTP_RECV_ID的MSFilter對象:ms_factory_create_filter(),設置到stream->dummy;

後面繼續獲取MS_VOID_SINK_ID的MSFilter對象,設置到stream->ms.voidsink上;

然後判斷stream->ms.sessions.ticker == null的時候,調用media_stream_start_ticker()去初始化並啓動一個ms_ticker;

最後調用ms_ticker_attach()來將ms_ticker與stream->dummy關聯上;

 

音頻準備工作重點就是啓用MSTicker並關聯到本次通話過程中,

首先獲取MSFilter* :ms_factory_create_filter()。 msfactory.c ——line362

MSFilter *ms_factory_create_filter(MSFactory* factory, MSFilterId id){
   MSFilterDesc *desc;
   if (id==MS_FILTER_PLUGIN_ID){
      ms_warning("cannot create plugin filters with ms_filter_new_from_id()");
      return NULL;
   }
   desc=ms_factory_lookup_filter_by_id(factory,id);
   if (desc) return ms_factory_create_filter_from_desc(factory,desc);
   ms_error("No such filter with id %i",id);
   return NULL;
}ms_factory_lookup_filter_by_id(factory,id);
   if (desc) return ms_factory_create_filter_from_desc(factory,desc);
   ms_error("No such filter with id %i",id);
   return NULL;
}

使用的id是MS_RTP_RECV_ID,通過ms_factory_lookup_filter_by_id來找到過濾器的描述;——line385

MSFilterDesc* ms_factory_lookup_filter_by_id( MSFactory* factory, MSFilterId id){
   bctbx_list_t *elem;
   for (elem=factory->desc_list;elem!=NULL;elem=bctbx_list_next(elem)){
      MSFilterDesc *desc=(MSFilterDesc*)elem->data;
      if (desc->id==id){
         return desc;
      }
   }
   return NULL;
}

實際就是遍歷factory的desc_list列表,找到跟id匹配的第一個desc描述體

回到上面的函數中,繼續通過dessc找到真正的filter

ms_factory_create_filter_from_desc(); ——line337

MSFilter *ms_factory_create_filter_from_desc(MSFactory* factory, MSFilterDesc *desc){
   MSFilter *obj;
   obj=(MSFilter *)ms_new0(MSFilter,1);
   ms_mutex_init(&obj->lock,NULL);
   obj->desc=desc;
   if (desc->ninputs>0)   obj->inputs=(MSQueue**)ms_new0(MSQueue*,desc->ninputs);
   if (desc->noutputs>0)  obj->outputs=(MSQueue**)ms_new0(MSQueue*,desc->noutputs);

   if (factory->statistics_enabled){
      obj->stats=find_or_create_stats(factory,desc);
   }
   obj->factory=factory;
   if (obj->desc->init!=NULL)
      obj->desc->init(obj);
   return obj;
}init(obj);
   return obj;
}

初始化一個MSFilter,然後將傳入的desc設置到obj上:obj->desc = desc;

然後初始化obj->inputs和obj->outputs;

關聯MSFactory :obj->factory = factory;

最後調用obj->desc->init(obj)進行MSFilter的初始化;

再回到audio_stream_prepare_sound中,看看MSTicker的初始化流程

media_stream_start_ticker(),位於mediastream.c ——line144

void media_stream_start_ticker(MediaStream *stream) {
    ms_message("media_stream_start_ticker");
   MSTickerParams params = {0};
   char name[32] = {0};

   if (stream->sessions.ticker) return;
   snprintf(name, sizeof(name) - 1, "%s MSTicker", media_stream_type_str(stream));
   name[0] = toupper(name[0]);
   params.name = name;
   params.prio = __ms_get_default_prio((stream->type == MSVideo) ? TRUE : FALSE);
   stream->sessions.ticker = ms_ticker_new_with_params(¶ms);
}ms_ticker_new_with_params(¶ms);
}

構建一個MSTickerParams參數,然後通過它構建MS_Ticker,

ms_ticker_new_with_params(), 位於msticker.c ——line87

MSTicker *ms_ticker_new_with_params(const MSTickerParams *params){
   MSTicker *obj=(MSTicker *)ms_new0(MSTicker,1);
   ms_ticker_init(obj,params);
   return obj;
}ms_ticker_init(obj,params);
   return obj;
}
static void ms_ticker_init(MSTicker *ticker, const MSTickerParams *params)
{
    ms_message("ms_ticker_init");
   ms_mutex_init(&ticker->lock,NULL);
   ticker->execution_list=NULL;
   ticker->task_list=NULL;
   ticker->ticks=1;
   ticker->time=0;
   ticker->interval=TICKER_INTERVAL;
   ticker->run=FALSE;
   ticker->exec_id=0;
   ticker->get_cur_time_ptr=&get_cur_time_ms;
   ticker->get_cur_time_data=NULL;
   ticker->name=ms_strdup(params->name);
   ticker->av_load=0;
   ticker->prio=params->prio;
   ticker->wait_next_tick=wait_next_tick;
   ticker->wait_next_tick_data=ticker;
   ticker->late_event.lateMs = 0;
   ticker->late_event.time = 0;
   ticker->late_event.current_late_ms = 0;
   ms_ticker_start(ticker);
}
static void ms_ticker_start(MSTicker *s){
   s->run=TRUE;
   ms_thread_create(&s->thread,NULL,ms_ticker_run,s);
}ms_ticker_start(ticker);
}
static void ms_ticker_start(MSTicker *s){
   s->run=TRUE;
   ms_thread_create(&s->thread,NULL,ms_ticker_run,s);
}

初始化分爲三個步驟

1、new一個MSTicker*對象;

2、配置ticker的屬性,默認的間隔是10秒;

3、添加到線程中啓動ms_ticker;

 

接着看ms_ticker_attach(),位於msticker.c ——line135

int ms_ticker_attach(MSTicker *ticker, MSFilter *f){
   return ms_ticker_attach_multiple(ticker,f,NULL);
}
int ms_ticker_attach_multiple(MSTicker *ticker,MSFilter *f,...)
{
    ms_message("ms_ticker_attach_multiple");
   bctbx_list_t *sources=NULL;
   bctbx_list_t *filters=NULL;
   bctbx_list_t *it;
   bctbx_list_t *total_sources=NULL;
   va_list l;

   va_start(l,f);

   do{
      if (f->ticker==NULL) {
          ms_message("f->ticker == null");
         filters=ms_filter_find_neighbours(f);
         sources=get_sources(filters);
         if (sources==NULL){
            ms_fatal("No sources found around filter %s",f->desc->name);
            bctbx_list_free(filters);
            break;
         }
         /*run preprocess on each filter: */
         for(it=filters;it!=NULL;it=it->next)
            ms_filter_preprocess((MSFilter*)it->data,ticker);
         bctbx_list_free(filters);
         total_sources=bctbx_list_concat(total_sources,sources);
      }else ms_message("Filter %s is already being scheduled; nothing to do.",f->desc->name);
   }while ((f=va_arg(l,MSFilter*))!=NULL);
   va_end(l);
   if (total_sources){
      ms_mutex_lock(&ticker->lock);
      ticker->execution_list=bctbx_list_concat(ticker->execution_list,total_sources);
      ms_mutex_unlock(&ticker->lock);
   }
   return 0;
}for(it=filters;it!=NULL;it=it->next)
            ms_filter_preprocess((MSFilter*)it->data,ticker);
         bctbx_list_free(filters);
         total_sources=bctbx_list_concat(total_sources,sources);
      }else ms_message("Filter %s is already being scheduled; nothing to do.",f->desc->name);
   }while ((f=va_arg(l,MSFilter*))!=NULL);
   va_end(l);
   if (total_sources){
      ms_mutex_lock(&ticker->lock);
      ticker->execution_list=bctbx_list_concat(ticker->execution_list,total_sources);
      ms_mutex_unlock(&ticker->lock);
   }
   return 0;
}

剛啓動的時候,f->ticker=null,

ms_filter_find_neighbours(f),獲取整個f的filters列表;

然後遍歷整個filters,調用每個ms_filter_preprocess();

執行完preprocess以後,會將當前的total_sources添加到ticker->execution列表中、

系統每個生成的MSTicker下的execution_list都會保存當前所有構建的MSTickers對象,這個會在ms_ticker_run中用到;

ms_filter_preprocess() 位於msfilter.c ——line234

void ms_filter_preprocess(MSFilter *f, struct _MSTicker *t){
    ms_message("ms_filter_preprocess %s" ,f->desc->name);
   f->last_tick=0;
   f->ticker=t;
   if (f->desc->preprocess!=NULL)
      f->desc->preprocess(f);
}preprocess(f);
}

調用每個MSFilter中定義的MSFilterDesc中的preprocess()函數;

這裏主要有兩種類型的MSFilter,一個是MSRtpRecv,一個是MSVoidSink;

 

MSRtpRecv定義在msrtp.c中,應該是作爲rtp數據接收使用的;

MSVoidSink定義在void.c中,這個沒有preprocess函數接口;

 

看一下MSRtpRecv中的preprocess(), 在msrtp.c ——line648

static void receiver_preprocess(MSFilter * f){
    ms_message("receiver_preprocess msrtp");
   ReceiverData *d = (ReceiverData *) f->data;
   d->starting=TRUE;
}

修改了f->data->starting的狀態值,後面在執行process的時候會校驗這個狀態;

f->data是真實的媒體庫的執行類;

 

至此,第二步的ringstream的初始化完成了;

 

第三步,sal_call_set_local_media_description();

位於sal_op_call.c ——line723

int sal_call_set_local_media_description(SalOp *op, SalMediaDescription *desc){
   if (desc)
      sal_media_description_ref(desc);
   if (op->base.local_media)
      sal_media_description_unref(op->base.local_media);
   op->base.local_media=desc;

   if (op->base.remote_media){
      /*case of an incoming call where we modify the local capabilities between the time
       * the call is ringing and it is accepted (for example if you want to accept without video*/
      /*reset the sdp answer so that it is computed again*/
      if (op->sdp_answer){
         belle_sip_object_unref(op->sdp_answer);
         op->sdp_answer=NULL;
      }
   }
   return 0;
}

主要是講SalOp*舊的local_media配置刪除,設置成傳入的desc;

op->base.local_media = desc;

 

後面就是發起呼叫請求到sip服務器。sipcall();

 

呼叫過程的結果響應回調在sal_op_call.c中

回調接口爲call_process_response() ——line237

static void call_process_response(void *op_base, const belle_sip_response_event_t *event){
    ms_message("call_process_response");
   SalOp* op = (SalOp*)op_base;

   belle_sip_request_t* ack;
   belle_sip_dialog_state_t dialog_state;
   belle_sip_client_transaction_t* client_transaction = belle_sip_response_event_get_client_transaction(event);
   belle_sip_request_t* req;
   belle_sip_response_t* response=belle_sip_response_event_get_response(event);
   int code = belle_sip_response_get_status_code(response);
   belle_sip_header_content_type_t *header_content_type=NULL;
   belle_sip_dialog_t *dialog=belle_sip_response_event_get_dialog(event);
   const char *method;

   if (!client_transaction) {
      ms_warning("Discarding stateless response [%i] on op [%p]",code,op);
      return;
   }
   req=belle_sip_transaction_get_request(BELLE_SIP_TRANSACTION(client_transaction));
   set_or_update_dialog(op,dialog);
   dialog_state=dialog ? belle_sip_dialog_get_state(dialog) : BELLE_SIP_DIALOG_NULL;
   method=belle_sip_request_get_method(req);
   ms_message("Op [%p] receiving call response [%i], dialog is [%p] in state [%s]",op,code,dialog,belle_sip_dialog_state_to_string(dialog_state));
   /*to make sure no cb will destroy op*/
   sal_op_ref(op);
   switch(dialog_state) {
      case BELLE_SIP_DIALOG_NULL:
      case BELLE_SIP_DIALOG_EARLY: {
         if (strcmp("INVITE",method)==0 ) {
            if (op->state == SalOpStateTerminating) {
               /*check if CANCEL was sent before*/
               if (strcmp("CANCEL",belle_sip_request_get_method(belle_sip_transaction_get_request(BELLE_SIP_TRANSACTION(op->pending_client_trans))))!=0) {
                  /*it wasn't sent */
                  if (code<200) {
                     cancelling_invite(op);
                  }else{
                     /* no need to send the INVITE because the UAS rejected the INVITE*/
                     if (op->dialog==NULL) call_set_released(op);
                  }
               } else {
                  /*it was sent already, so just expect the 487 or any error response to send the call_released() notification*/
                  if (code>=300){
                     if (op->dialog==NULL) call_set_released(op);
                  }
               }
            } else if (code >= 180 && code<200) {
               belle_sip_response_t *prev_response=belle_sip_object_data_get(BELLE_SIP_OBJECT(dialog),"early_response");
               if (!prev_response || code>belle_sip_response_get_status_code(prev_response)){
                  handle_sdp_from_response(op,response);
                  op->base.root->callbacks.call_ringing(op);
               }
               belle_sip_object_data_set(BELLE_SIP_OBJECT(dialog),"early_response",belle_sip_object_ref(response),belle_sip_object_unref);
            } else if (code>=300){
               call_set_error(op, response, TRUE);
               if (op->dialog==NULL) call_set_released(op);
            }
         } else if (code >=200 && code<300) {
            if (strcmp("UPDATE",method)==0) {
               handle_sdp_from_response(op,response);
               op->base.root->callbacks.call_accepted(op);
            } else if (strcmp("CANCEL", method) == 0) {
               op->base.root->callbacks.call_cancel_done(op);
            }
         }
      }
      break;
      case BELLE_SIP_DIALOG_CONFIRMED: {
         switch (op->state) {
            case SalOpStateEarly:/*invite case*/
            case SalOpStateActive: /*re-invite, INFO, UPDATE case*/
               if (strcmp("INVITE",method)==0){
                  if (code >=200 && code<300) {
                     handle_sdp_from_response(op,response);
                     ack=belle_sip_dialog_create_ack(op->dialog,belle_sip_dialog_get_local_seq_number(op->dialog));
                     if (ack == NULL) {
                        ms_error("This call has been already terminated.");
                        return ;
                     }
                     if (op->sdp_answer){
                        set_sdp(BELLE_SIP_MESSAGE(ack),op->sdp_answer);
                        belle_sip_object_unref(op->sdp_answer);
                        op->sdp_answer=NULL;
                     }
                     belle_sip_message_add_header(BELLE_SIP_MESSAGE(ack),BELLE_SIP_HEADER(op->base.root->user_agent));
                     belle_sip_dialog_send_ack(op->dialog,ack);
                     op->base.root->callbacks.call_accepted(op); /*INVITE*/
                     op->state=SalOpStateActive;
                  }else if (code >= 300){
                     call_set_error(op,response, FALSE);
                  }
               }else if (strcmp("INFO",method)==0){
                  if (code == 491
                     && (header_content_type = belle_sip_message_get_header_by_type(req,belle_sip_header_content_type_t))
                     && strcmp("application",belle_sip_header_content_type_get_type(header_content_type))==0
                     && strcmp("media_control+xml",belle_sip_header_content_type_get_subtype(header_content_type))==0) {
                     unsigned int retry_in = (unsigned int)(1000*((float)rand()/RAND_MAX));
                     belle_sip_source_t *s=sal_create_timer(op->base.root,vfu_retry,sal_op_ref(op), retry_in, "vfu request retry");
                     ms_message("Rejected vfu request on op [%p], just retry in [%ui] ms",op,retry_in);
                     belle_sip_object_unref(s);
                  }else {
                        /*ignoring*/
                  }
               }else if (strcmp("UPDATE",method)==0){
                  op->base.root->callbacks.call_accepted(op); /*INVITE*/
               }else if (strcmp("CANCEL",method)==0){
                  op->base.root->callbacks.call_cancel_done(op);
               }
            break;
            case SalOpStateTerminating:
               sal_op_send_request(op,belle_sip_dialog_create_request(op->dialog,"BYE"));
            break;
            case SalOpStateTerminated:
            default:
               ms_error("Call op [%p] receives unexpected answer [%i] while in state [%s].",op,code, sal_op_state_to_string(op->state));
         }
      }
      break;
      case BELLE_SIP_DIALOG_TERMINATED: {
         if (strcmp("INVITE",method)==0 && code >= 300){
            call_set_error(op,response, TRUE);
         }
      }
      break;
      default: {
         ms_error("call op [%p] receive answer [%i] not implemented",op,code);
      }
      break;
   }
   sal_op_unref(op);
}switch(dialog_state) {
      case BELLE_SIP_DIALOG_NULL:
      case BELLE_SIP_DIALOG_EARLY: {
         if (strcmp("INVITE",method)==0 ) {
            if (op->state == SalOpStateTerminating) {
               /*check if CANCEL was sent before*/
               if (strcmp("CANCEL",belle_sip_request_get_method(belle_sip_transaction_get_request(BELLE_SIP_TRANSACTION(op->pending_client_trans))))!=0) {
                  /*it wasn't sent */
                  if (code<200) {
                     cancelling_invite(op);
                  }else{
                     /* no need to send the INVITE because the UAS rejected the INVITE*/
                     if (op->dialog==NULL) call_set_released(op);
                  }
               } else {
                  /*it was sent already, so just expect the 487 or any error response to send the call_released() notification*/
                  if (code>=300){
                     if (op->dialog==NULL) call_set_released(op);
                  }
               }
            } else if (code >= 180 && code<200) {
               belle_sip_response_t *prev_response=belle_sip_object_data_get(BELLE_SIP_OBJECT(dialog),"early_response");
               if (!prev_response || code>belle_sip_response_get_status_code(prev_response)){
                  handle_sdp_from_response(op,response);
                  op->base.root->callbacks.call_ringing(op);
               }
               belle_sip_object_data_set(BELLE_SIP_OBJECT(dialog),"early_response",belle_sip_object_ref(response),belle_sip_object_unref);
            } else if (code>=300){
               call_set_error(op, response, TRUE);
               if (op->dialog==NULL) call_set_released(op);
            }
         } else if (code >=200 && code<300) {
            if (strcmp("UPDATE",method)==0) {
               handle_sdp_from_response(op,response);
               op->base.root->callbacks.call_accepted(op);
            } else if (strcmp("CANCEL", method) == 0) {
               op->base.root->callbacks.call_cancel_done(op);
            }
         }
      }
      break;
      case BELLE_SIP_DIALOG_CONFIRMED: {
         switch (op->state) {
            case SalOpStateEarly:/*invite case*/
            case SalOpStateActive: /*re-invite, INFO, UPDATE case*/
               if (strcmp("INVITE",method)==0){
                  if (code >=200 && code<300) {
                     handle_sdp_from_response(op,response);
                     ack=belle_sip_dialog_create_ack(op->dialog,belle_sip_dialog_get_local_seq_number(op->dialog));
                     if (ack == NULL) {
                        ms_error("This call has been already terminated.");
                        return ;
                     }
                     if (op->sdp_answer){
                        set_sdp(BELLE_SIP_MESSAGE(ack),op->sdp_answer);
                        belle_sip_object_unref(op->sdp_answer);
                        op->sdp_answer=NULL;
                     }
                     belle_sip_message_add_header(BELLE_SIP_MESSAGE(ack),BELLE_SIP_HEADER(op->base.root->user_agent));
                     belle_sip_dialog_send_ack(op->dialog,ack);
                     op->base.root->callbacks.call_accepted(op); /*INVITE*/
                     op->state=SalOpStateActive;
                  }else if (code >= 300){
                     call_set_error(op,response, FALSE);
                  }
               }else if (strcmp("INFO",method)==0){
                  if (code == 491
                     && (header_content_type = belle_sip_message_get_header_by_type(req,belle_sip_header_content_type_t))
                     && strcmp("application",belle_sip_header_content_type_get_type(header_content_type))==0
                     && strcmp("media_control+xml",belle_sip_header_content_type_get_subtype(header_content_type))==0) {
                     unsigned int retry_in = (unsigned int)(1000*((float)rand()/RAND_MAX));
                     belle_sip_source_t *s=sal_create_timer(op->base.root,vfu_retry,sal_op_ref(op), retry_in, "vfu request retry");
                     ms_message("Rejected vfu request on op [%p], just retry in [%ui] ms",op,retry_in);
                     belle_sip_object_unref(s);
                  }else {
                        /*ignoring*/
                  }
               }else if (strcmp("UPDATE",method)==0){
                  op->base.root->callbacks.call_accepted(op); /*INVITE*/
               }else if (strcmp("CANCEL",method)==0){
                  op->base.root->callbacks.call_cancel_done(op);
               }
            break;
            case SalOpStateTerminating:
               sal_op_send_request(op,belle_sip_dialog_create_request(op->dialog,"BYE"));
            break;
            case SalOpStateTerminated:
            default:
               ms_error("Call op [%p] receives unexpected answer [%i] while in state [%s].",op,code, sal_op_state_to_string(op->state));
         }
      }
      break;
      case BELLE_SIP_DIALOG_TERMINATED: {
         if (strcmp("INVITE",method)==0 && code >= 300){
            call_set_error(op,response, TRUE);
         }
      }
      break;
      default: {
         ms_error("call op [%p] receive answer [%i] not implemented",op,code);
      }
      break;
   }
   sal_op_unref(op);
}

 

回調的處理過程分析如下:

1、通過belle_sip_response_event_get_client_transaction(event),獲取belle_sip_client_transaction_t* 對象 client_transaction;

2、通過belle_sip_response_event_get_response(event),獲取belle_sip_response_t*對象 response;

3、通過belle_sip_response_get_status_code(response)獲取sip響應的code;

4、通過belle_sip_transaction_get_request(client_transaction)獲取響應的消息頭req;

5、通過belle_sip_response_event_get_dialog(event)來獲取belle_sip_dialog_t*對象dialog;

6、通過belle_sip_dialog_get_state(dialog)獲取當前響應的belle_sip_dialog_state_t *對象dialog_state;

7、通過belle_sip_request_get_method(req)獲取這個響應對應的sip請求類型:INVITE \INFO\UPDATE\CANCLE;

8、然後根據dialog_state和code覺得如何處理這個response

如果是BELLE_SIP_DIALOG_EARLY狀態,只處理INVITE類型的響應結果

如果180<= code<200:

調用handle_sdp_from_response(op,response)去處理;

並且調用op->base.root->callbacks.call_ringing(op)回調;

如果code>=300:

作爲響應出錯處理;會話結束

如果是BELLE_SIP_DIALOG_CONFIRMED狀態,還要分析op->state

如果op->state是SalOpStateActive,表示接受到對方的呼叫,

如果200<=code<300則是正常的,交給handle_sdp_from_response()處理;

如果code>=300,出錯處理,會話結束;

其他的情況基本都結束會話了;

如果是BELLE_SIP_DIALOG_TERMINATED狀態,會話正常終止,結束會話;

 

handler_sdp_from_response()位於sal_op_call.c ——line185

static void handle_sdp_from_response(SalOp* op,belle_sip_response_t* response) {
    belle_sip_message("handle_sdp_from_response");
   belle_sdp_session_description_t* sdp;
   SalReason reason;
   if (op->base.remote_media){
      sal_media_description_unref(op->base.remote_media);
      op->base.remote_media=NULL;
   }
   if (extract_sdp(op,BELLE_SIP_MESSAGE(response),&sdp,&reason)==0) {
      if (sdp){
         op->base.remote_media=sal_media_description_new();
         sdp_to_media_description(sdp,op->base.remote_media);
      }/*if no sdp in response, what can we do ?*/
   }
   /* process sdp in any case to reset result media description*/
   if (op->base.local_media) sdp_process(op);
}sdp_process(op);
}

主要是根據op->base.remote_media和op->base.local_media判斷一下是呼入響應還是呼出的響應;

如果是呼入的基本更新下參數屬性就可以了

如果是呼出,調用sdp_process(op)來處理;——line48

static void sdp_process(SalOp *h){
   ms_message("Doing SDP offer/answer process of type %s",h->sdp_offering ? "outgoing" : "incoming");
   if (h->result){
      sal_media_description_unref(h->result);
      h->result = NULL;
   }

   /* if SDP was invalid */
   if (h->base.remote_media == NULL) return;

   h->result=sal_media_description_new();
   if (h->sdp_offering){
      offer_answer_initiate_outgoing(h->base.root->factory, h->base.local_media,h->base.remote_media,h->result);
   }else{
      int i;
      if (h->sdp_answer){
         belle_sip_object_unref(h->sdp_answer);
      }
      offer_answer_initiate_incoming(h->base.root->factory, h->base.local_media,h->base.remote_media,h->result,h->base.root->one_matching_codec);
      /*for backward compatibility purpose*/
      if(h->cnx_ip_to_0000_if_sendonly_enabled && sal_media_description_has_dir(h->result,SalStreamSendOnly)) {
         set_addr_to_0000(h->result->addr);
         for(i=0;i<SAL_MEDIA_DESCRIPTION_MAX_STREAMS;++i){
            if (h->result->streams[i].dir == SalStreamSendOnly) {
               set_addr_to_0000(h->result->streams[i].rtp_addr);
               set_addr_to_0000(h->result->streams[i].rtcp_addr);
            }
         }
      }
      h->sdp_answer=(belle_sdp_session_description_t *)belle_sip_object_ref(media_description_to_sdp(h->result));
      /*once we have generated the SDP answer, we modify the result description for processing by the upper layer.
       It should contains media parameters constraint from the remote offer, not our response*/
      strcpy(h->result->addr,h->base.remote_media->addr);
      h->result->bandwidth=h->base.remote_media->bandwidth;

      for(i=0;i<SAL_MEDIA_DESCRIPTION_MAX_STREAMS;++i){
         /*copy back parameters from remote description that we need in our result description*/
         if (h->result->streams[i].rtp_port!=0){ /*if stream was accepted*/
            strcpy(h->result->streams[i].rtp_addr,h->base.remote_media->streams[i].rtp_addr);
            h->result->streams[i].ptime=h->base.remote_media->streams[i].ptime;
            h->result->streams[i].bandwidth=h->base.remote_media->streams[i].bandwidth;
            h->result->streams[i].rtp_port=h->base.remote_media->streams[i].rtp_port;
            strcpy(h->result->streams[i].rtcp_addr,h->base.remote_media->streams[i].rtcp_addr);
            h->result->streams[i].rtcp_port=h->base.remote_media->streams[i].rtcp_port;

            if (sal_stream_description_has_srtp(&h->result->streams[i])) {
               h->result->streams[i].crypto[0] = h->base.remote_media->streams[i].crypto[0];
            }
         }
      }
   }
}offer_answer_initiate_outgoing(h->base.root->factory, h->base.local_media,h->base.remote_media,h->result);
   }else{
      int i;
      if (h->sdp_answer){
         belle_sip_object_unref(h->sdp_answer);
      }
      offer_answer_initiate_incoming(h->base.root->factory, h->base.local_media,h->base.remote_media,h->result,h->base.root->one_matching_codec);
      /*for backward compatibility purpose*/
      if(h->cnx_ip_to_0000_if_sendonly_enabled && sal_media_description_has_dir(h->result,SalStreamSendOnly)) {
         set_addr_to_0000(h->result->addr);
         for(i=0;i<SAL_MEDIA_DESCRIPTION_MAX_STREAMS;++i){
            if (h->result->streams[i].dir == SalStreamSendOnly) {
               set_addr_to_0000(h->result->streams[i].rtp_addr);
               set_addr_to_0000(h->result->streams[i].rtcp_addr);
            }
         }
      }
      h->sdp_answer=(belle_sdp_session_description_t *)belle_sip_object_ref(media_description_to_sdp(h->result));
      /*once we have generated the SDP answer, we modify the result description for processing by the upper layer.
       It should contains media parameters constraint from the remote offer, not our response*/
      strcpy(h->result->addr,h->base.remote_media->addr);
      h->result->bandwidth=h->base.remote_media->bandwidth;

      for(i=0;i<SAL_MEDIA_DESCRIPTION_MAX_STREAMS;++i){
         /*copy back parameters from remote description that we need in our result description*/
         if (h->result->streams[i].rtp_port!=0){ /*if stream was accepted*/
            strcpy(h->result->streams[i].rtp_addr,h->base.remote_media->streams[i].rtp_addr);
            h->result->streams[i].ptime=h->base.remote_media->streams[i].ptime;
            h->result->streams[i].bandwidth=h->base.remote_media->streams[i].bandwidth;
            h->result->streams[i].rtp_port=h->base.remote_media->streams[i].rtp_port;
            strcpy(h->result->streams[i].rtcp_addr,h->base.remote_media->streams[i].rtcp_addr);
            h->result->streams[i].rtcp_port=h->base.remote_media->streams[i].rtcp_port;

            if (sal_stream_description_has_srtp(&h->result->streams[i])) {
               h->result->streams[i].crypto[0] = h->base.remote_media->streams[i].crypto[0];
            }
         }
      }
   }
}

入參SalOp的sdp_offering指明這個響應是呼入還是呼出;

如果是呼出的,調用offer_answer_initiate_outgoing()處理, offeranswer.c ——line534

如果是呼入的,調用offer_answer_initiate_incoming()處理, offeranswer.c——line573

這兩個函數主要都是處理本次會話的音視頻媒體的參數以及網絡配置參數;暫不分析了。

 

呼叫成功後,對方開始響鈴,

此時sip返回183表示: early_response。即開始播放等待提示應

按照上面的流程,在執行完handler_sdp_response()後,執行了callbacks.call_ringing()回調;

 

這個回調的函數接口是在linphone_core_init()中設置的。

	sal_set_callbacks(lc->sal,&linphone_sal_callbacks);sal_set_callbacks(lc->sal,&linphone_sal_callbacks);

其中的linphone_sal_callbacks是一個結構體,定義了一堆的回調函數指針;位於callback.c中;

SalCallbacks linphone_sal_callbacks={
   call_received,
   call_ringing,
   call_accepted,
   call_ack,
   call_updating,
   call_terminated,
   call_failure,
   call_released,
   call_cancel_done,
   auth_failure,
   register_success,
   register_failure,
   vfu_request,
   dtmf_received,
   refer_received,
   text_received,
   text_delivery_update,
   is_composing_received,
   notify_refer,
   subscribe_received,
   incoming_subscribe_closed,
   subscribe_response,
   notify,
   subscribe_presence_received,
   subscribe_presence_closed,
   parse_presence_requested,
   convert_presence_to_xml_requested,
   notify_presence,
   ping_reply,
   auth_requested,
   info_received,
   on_publish_response,
   on_expire,
   on_notify_response
};

所以op->base.root->callbacks定義的所有回調的真實接口都定義在callbacks.c中;

 

繼續看響鈴回調 call_ringing() callbacks.c ——line433;

static void call_ringing(SalOp *h){
   LinphoneCore *lc=(LinphoneCore *)sal_get_user_pointer(sal_op_get_sal(h));
   LinphoneCall *call=(LinphoneCall*)sal_op_get_user_pointer(h);
   SalMediaDescription *md;
   ms_message("call_ringing captcard = %s" ,lc->sound_conf.capt_sndcard->desc->driver_type);
   if (call==NULL) return;

   /*set privacy*/
   call->current_params->privacy=(LinphonePrivacyMask)sal_op_get_privacy(call->op);

   linphone_core_notify_display_status(lc,_("Remote ringing."));

   md=sal_call_get_final_media_description(h);
   if (md==NULL){
     ...
   }else{
      /*initialize the remote call params by invoking linphone_call_get_remote_params(). This is useful as the SDP may not be present in the 200Ok*/
      linphone_call_get_remote_params(call);
      /*accept early media */
      if ((call->audiostream && audio_stream_started(call->audiostream))
#ifdef VIDEO_ENABLED
         || (call->videostream && video_stream_started(call->videostream))
#endif
         ) {
         /*streams already started */
         try_early_media_forking(call,md);
         #ifdef VIDEO_ENABLED
         if (call->videostream){
            /*just request for iframe*/
            video_stream_send_vfu(call->videostream);
         }
         #endif
      return;
      }

      linphone_core_notify_show_interface(lc);
      linphone_core_notify_display_status(lc,_("Early media."));
      linphone_call_set_state(call,LinphoneCallOutgoingEarlyMedia,"Early media");
      linphone_core_stop_ringing(lc);
      ms_message("Doing early media...");
      linphone_core_update_streams(lc,call,md, call->state);
      if ((linphone_call_params_get_audio_direction(linphone_call_get_current_params(call)) == LinphoneMediaDirectionInactive) && call->audiostream) {
         if (lc->ringstream != NULL) return; /* Already ringing! */
         start_remote_ring(lc, call);
      }
   }
}sal_call_get_final_media_description(h);
   if (md==NULL){
     ...
   }else{
      /*initialize the remote call params by invoking linphone_call_get_remote_params(). This is useful as the SDP may not be present in the 200Ok*/
      linphone_call_get_remote_params(call);
      /*accept early media */
      if ((call->audiostream && audio_stream_started(call->audiostream))
#ifdef VIDEO_ENABLED
         || (call->videostream && video_stream_started(call->videostream))
#endif
         ) {
         /*streams already started */
         try_early_media_forking(call,md);
         #ifdef VIDEO_ENABLED
         if (call->videostream){
            /*just request for iframe*/
            video_stream_send_vfu(call->videostream);
         }
         #endif
      return;
      }

      linphone_core_notify_show_interface(lc);
      linphone_core_notify_display_status(lc,_("Early media."));
      linphone_call_set_state(call,LinphoneCallOutgoingEarlyMedia,"Early media");
      linphone_core_stop_ringing(lc);
      ms_message("Doing early media...");
      linphone_core_update_streams(lc,call,md, call->state);
      if ((linphone_call_params_get_audio_direction(linphone_call_get_current_params(call)) == LinphoneMediaDirectionInactive) && call->audiostream) {
         if (lc->ringstream != NULL) return; /* Already ringing! */
         start_remote_ring(lc, call);
      }
   }
}

首先獲取SalMediaDescription* md會話的媒體描述, sal_call_get_final_media_description()

應該是確認雙方都可以使用的最優媒體參數;

如果沒有可有的md,回調LinphoneCallOutgoingRinging的狀態給java層就結束了,估計後面通話不了;

存在可有的md時:

判斷call->audiostream是否已經開啓了,已經開啓了的話,就直接結束了;

如果沒開,執行linphone_core_update_streams()去開啓相關的音頻設備;

繼續判斷如果當前的sdp請求是呼入的,並且call->audiostream存在,調用start_remote_ring,啓動本機的響鈴;

 

linphone_core_update_streams()位於callbacks.c ——line127

void linphone_core_update_streams(LinphoneCore *lc, LinphoneCall *call, SalMediaDescription *new_md, LinphoneCallState target_state){
    ms_message("linphone_core_update_streams");
   SalMediaDescription *oldmd=call->resultdesc;
   int md_changed=0;

   ms_message("linphone_core_update_streams captcard = %s" ,lc->sound_conf.capt_sndcard->desc->driver_type);

   if (!((call->state == LinphoneCallIncomingEarlyMedia) && (linphone_core_get_ring_during_incoming_early_media(lc)))) {
      linphone_core_stop_ringing(lc);
   }
   if (!new_md) {
      ms_error("linphone_core_update_streams() called with null media description");
      return;
   }
   linphone_call_update_biggest_desc(call, call->localdesc);
   sal_media_description_ref(new_md);
   call->resultdesc=new_md;
   if ((call->audiostream && call->audiostream->ms.state==MSStreamStarted) || (call->videostream && call->videostream->ms.state==MSStreamStarted)){
     ....
   }

   if (call->audiostream==NULL){
      /*this happens after pausing the call locally. The streams are destroyed and then we wait the 200Ok to recreate them*/
      linphone_call_init_media_streams (call);
   }

   if (call->params->real_early_media && call->state==LinphoneCallOutgoingEarlyMedia){
      prepare_early_media_forking(call);
   }
   linphone_call_start_media_streams(call, target_state);
   if (call->state==LinphoneCallPausing && call->paused_by_app && bctbx_list_size(lc->calls)==1){
      linphone_core_play_named_tone(lc,LinphoneToneCallOnHold);
   }
   linphone_call_update_frozen_payloads(call, new_md);
   end:
   if (oldmd)
      sal_media_description_unref(oldmd);

}linphone_call_start_media_streams(call, target_state);
   if (call->state==LinphoneCallPausing && call->paused_by_app && bctbx_list_size(lc->calls)==1){
      linphone_core_play_named_tone(lc,LinphoneToneCallOnHold);
   }
   linphone_call_update_frozen_payloads(call, new_md);
   end:
   if (oldmd)
      sal_media_description_unref(oldmd);

}

開始會檢查call->audiostream的狀態,由於處於對方剛響鈴的狀態,中間的過程基本會跳過,

然後調用linphone_call_start_media_stream()啓動媒體;

 

linphone_call_start_media_stream()位於linphonecall.c ——line3669;

void linphone_call_start_media_streams(LinphoneCall *call, LinphoneCallState next_state){
   LinphoneCore *lc=call->core;
   ms_message("linphone_call_start_media_streams captcard = %s" ,lc->sound_conf.capt_sndcard->desc->driver_type);
   bool_t use_arc = linphone_core_adaptive_rate_control_enabled(lc);
#ifdef VIDEO_ENABLED
   const SalStreamDescription *vstream=sal_media_description_find_best_stream(call->resultdesc,SalVideo);
#endif
 	......		
   if (call->audiostream!=NULL) {
      linphone_call_start_audio_stream(call, next_state, use_arc);
   } else {
      ms_warning("linphone_call_start_media_streams(): no audio stream!");
   }
   call->current_params->has_video=FALSE;
   if (call->videostream!=NULL) {
      if (call->audiostream) audio_stream_link_video(call->audiostream,call->videostream);
      linphone_call_start_video_stream(call, next_state);
   }
 	......		
   set_dtls_fingerprint_on_all_streams(call);

   if ((call->ice_session != NULL) && (ice_session_state(call->ice_session) != IS_Completed)) {
      if (call->params->media_encryption==LinphoneMediaEncryptionDTLS) {
         call->current_params->update_call_when_ice_completed = FALSE;
         ms_message("Disabling update call when ice completed on call [%p]",call);
      }
      ice_session_start_connectivity_checks(call->ice_session);
   } else {
      /*should not start dtls until ice is completed*/
      start_dtls_on_all_streams(call);
   }
}linphone_call_start_audio_stream(call, next_state, use_arc);
   } else {
      ms_warning("linphone_call_start_media_streams(): no audio stream!");
   }
   call->current_params->has_video=FALSE;
   if (call->videostream!=NULL) {
      if (call->audiostream) audio_stream_link_video(call->audiostream,call->videostream);
      linphone_call_start_video_stream(call, next_state);
   }
 	......		
   set_dtls_fingerprint_on_all_streams(call);

   if ((call->ice_session != NULL) && (ice_session_state(call->ice_session) != IS_Completed)) {
      if (call->params->media_encryption==LinphoneMediaEncryptionDTLS) {
         call->current_params->update_call_when_ice_completed = FALSE;
         ms_message("Disabling update call when ice completed on call [%p]",call);
      }
      ice_session_start_connectivity_checks(call->ice_session);
   } else {
      /*should not start dtls until ice is completed*/
      start_dtls_on_all_streams(call);
   }
}

首先仍然是做了一系列的檢查和設置工作,跳過;

然後調用linphone_call_start_audio_stream()來啓動音頻媒體;

然後檢查通過是否屬於視頻通過,如果是則調用linphone_call_start_video_stream來啓動視頻媒體;

 

暫時只關注音頻媒體

linphone_call_start_audio_stream(),位於linphonecall.c ——line3234

static void linphone_call_start_audio_stream(LinphoneCall *call, LinphoneCallState next_state, bool_t use_arc){
   ms_message(();
   LinphoneCore *lc=call->core;
   int used_pt=-1;
   const SalStreamDescription *stream;
   MSSndCard *playcard;
   MSSndCard *captcard;
 	......
   int crypto_idx;
   MSMediaStreamIO io = MS_MEDIA_STREAM_IO_INITIALIZER;
   bool_t use_rtp_io = lp_config_get_int(lc->config, "sound", "rtp_io", FALSE);

   stream = sal_media_description_find_best_stream(call->resultdesc, SalAudio);
   if (stream && stream->dir!=SalStreamInactive && stream->rtp_port!=0){
      /* get remote stream description to check for zrtp-hash presence */
      SalMediaDescription *remote_desc = sal_call_get_remote_media_description(call->op);
      const SalStreamDescription *remote_stream = sal_media_description_find_best_stream(remote_desc, SalAudio);
      playcard=lc->sound_conf.lsd_card ?
         lc->sound_conf.lsd_card : lc->sound_conf.play_sndcard;
      captcard=lc->sound_conf.capt_sndcard;
      call->audio_profile=make_profile(call,call->resultdesc,stream,&used_pt);

      if (used_pt!=-1){
         bool_t ok = TRUE;
         call->current_params->audio_codec = rtp_profile_get_payload(call->audio_profile, used_pt);
      	 ......
         if (playcard &&  stream->max_rate>0) ms_snd_card_set_preferred_sample_rate(playcard, stream->max_rate);
         if (captcard &&  stream->max_rate>0) ms_snd_card_set_preferred_sample_rate(captcard, stream->max_rate);
         audio_stream_enable_adaptive_bitrate_control(call->audiostream,use_arc);
         media_stream_set_adaptive_bitrate_algorithm(&call->audiostream->ms,
                        ms_qos_analyzer_algorithm_from_string(linphone_core_get_adaptive_rate_algorithm(lc)));
         audio_stream_enable_adaptive_jittcomp(call->audiostream, linphone_core_audio_adaptive_jittcomp_enabled(lc));
         rtp_session_set_jitter_compensation(call->audiostream->ms.sessions.rtp_session,linphone_core_get_audio_jittcomp(lc));
         rtp_session_enable_rtcp_mux(call->audiostream->ms.sessions.rtp_session, stream->rtcp_mux);
        
         if (use_rtp_io) {
            io.input.type = io.output.type = MSResourceRtp;
            io.input.session = io.output.session = create_audio_rtp_io_session(call);
            if (io.input.session == NULL) {
               ok = FALSE;
            }
         }else {
            if (playcard){
               io.output.type = MSResourceSoundcard;
               io.output.soundcard = playcard;
            }else{
               io.output.type = MSResourceFile;
               io.output.file = recfile;
            }
            if (captcard){
               io.input.type = MSResourceSoundcard;
               io.input.soundcard = captcard;
            }else{
               io.input.type = MSResourceFile;
               file_to_play = playfile;
               io.input.file = NULL; /*we prefer to use the remote_play api, that allows to play multimedia files */
            }

         }
         if (ok == TRUE) {
            int err = audio_stream_start_from_io(call->audiostream,
               call->audio_profile,
               rtp_addr,
               stream->rtp_port,
               stream->rtcp_addr[0]!='\0' ? stream->rtcp_addr : call->resultdesc->addr,
               (linphone_core_rtcp_enabled(lc) && !is_multicast) ? (stream->rtcp_port ? stream->rtcp_port : stream->rtp_port+1) : 0,
               used_pt,
               &io);
            if (err == 0){
               post_configure_audio_streams(call, (call->all_muted || call->audio_muted) && !call->playing_ringbacktone);
            }
         }
 		......
      }else ms_warning("No audio stream accepted ?");
   }
   linphone_call_set_on_hold_file(call, file_to_play);
}io.output.type = MSResourceSoundcard;
               io.output.soundcard = playcard;
            }else{
               io.output.type = MSResourceFile;
               io.output.file = recfile;
            }
            if (captcard){
               io.input.type = MSResourceSoundcard;
               io.input.soundcard = captcard;
            }else{
               io.input.type = MSResourceFile;
               file_to_play = playfile;
               io.input.file = NULL; /*we prefer to use the remote_play api, that allows to play multimedia files */
            }

         }
         if (ok == TRUE) {
            int err = audio_stream_start_from_io(call->audiostream,
               call->audio_profile,
               rtp_addr,
               stream->rtp_port,
               stream->rtcp_addr[0]!='\0' ? stream->rtcp_addr : call->resultdesc->addr,
               (linphone_core_rtcp_enabled(lc) && !is_multicast) ? (stream->rtcp_port ? stream->rtcp_port : stream->rtp_port+1) : 0,
               used_pt,
               &io);
            if (err == 0){
               post_configure_audio_streams(call, (call->all_muted || call->audio_muted) && !call->playing_ringbacktone);
            }
         }
 		......
      }else ms_warning("No audio stream accepted ?");
   }
   linphone_call_set_on_hold_file(call, file_to_play);
}

過程比較複雜,先查詢一堆音頻媒體的參數並做好校驗;

use_rtp_io是從lp->config中的sound中讀取的rtp_io字段值,默認是false;

然後設置

io.output.type = MSResourceSoundcard;

io.output.soundcard = playcard;

io.input.type = MSResourceSoundcard;

io.input.soundcard = captcard;

接着調用audio_stream_start_from_io(),

最後調用linphone_call_set_on_hold_file();

 

audio_stream_start_from_io();audiostream.c line780

代碼比較長,簡化一下流程如下:

1、構建MS_RTP_RECV_ID類型的MSFilter,設置到stream->ms.rtprecv;

2、構建MS_DTMF_GEN_ID類型的MSFilter,設置到stream->dtmfgen;

3、根據io->input.type的類型,因爲使用的是聲卡模式,調用ms_snd_card_create_reader(io->input.soundcard);

4、根據io->output.tye的類型,調用ms_snd_card_create_writer(io->output.soundcard);

5、設置stream的編解碼,stream->ms.encoder = ms_factory_create_encoder()和stream->ms.decoder=ms_factory_create_decoder();

6、設置stream的音量控制器,stream->volsend = ms_factory_create_filter(,MS_VOLUME_ID);

7、設置stream的write_resampler = ms_factory_create_filter(,MS_RESAMPLE_ID);

8、設置stream->outbound_mixer = ms_factory_create_filter(,MS_AUDIO_MIXER_ID);

9、設置stream->mic_equalizer = ms_factory_create_filter(,MS_EQUALIZER_ID);

設置stream->spk_equalizer = ms_factory_create_filter(MS_EQUALIZER_ID);

10、然後做了一堆的ms_filter_link,都是跟上面設置的stream屬性相關的 ;

11、最後調用ms_ticker_attach_multiple();

 

 

關於錄音器的啓動,是通過ms_snd_card_create_reader來實現的;

位於mssndcard.c ——line251;

struct _MSFilter * ms_snd_card_create_reader(MSSndCard *obj){
    ms_message("ms_snd_card_create_reader [%s] ",obj->desc->driver_type);
   if (obj->desc->create_reader!=NULL)
      return obj->desc->create_reader(obj);
   else ms_warning("ms_snd_card_create_reader: unimplemented by %s wrapper",obj->desc->driver_type);
   return NULL;
}create_reader(obj);
   else ms_warning("ms_snd_card_create_reader: unimplemented by %s wrapper",obj->desc->driver_type);
   return NULL;
}

檢查一下MSSndCard聲卡模型是否制定了create_reader函數接口,如果有,直接調用;

對於Android平臺定義了三種聲卡模型 ,都指定了create_reader接口;

依次看一下:

 

對於msandroid_sound_card_desc,定義在androidsound_depr.cpp中,

實現函數爲msandroid_sound_read_new ——line599;

   MSFilter *msandroid_sound_read_new(MSSndCard *card){
      MSFilter *f=ms_factory_create_filter_from_desc(ms_snd_card_get_factory(card), &msandroid_sound_read_desc);
      msandroid_sound_read_data *data=new msandroid_sound_read_data();
      data->builtin_aec = card->capabilities & MS_SND_CARD_CAP_BUILTIN_ECHO_CANCELLER;
      if (card->data != NULL) {
         SoundDeviceDescription *d = (SoundDeviceDescription *)card->data;
         if (d->recommended_rate > 0) {
            data->rate = d->recommended_rate;
            data->forced_rate = true;
            ms_warning("Using forced sample rate %i", data->rate);
         }
      }
      f->data=data;
      return f;
   }ms_factory_create_filter_from_desc(ms_snd_card_get_factory(card), &msandroid_sound_read_desc);
      msandroid_sound_read_data *data=new msandroid_sound_read_data();
      data->builtin_aec = card->capabilities & MS_SND_CARD_CAP_BUILTIN_ECHO_CANCELLER;
      if (card->data != NULL) {
         SoundDeviceDescription *d = (SoundDeviceDescription *)card->data;
         if (d->recommended_rate > 0) {
            data->rate = d->recommended_rate;
            data->forced_rate = true;
            ms_warning("Using forced sample rate %i", data->rate);
         }
      }
      f->data=data;
      return f;
   }

首先通過ms_factory_create_filter_from_desc及制定的desc構建MSFilter* f;

初始化一個msandroid_sound_read_data * data對象;

然後獲取card中的data屬性,是一個SoundDeviceDescription*d描述體對象;

如果描述體有推薦的採樣率,設置給data對象;

最後將data對象設置給MSFilter,並返回filter;

 

看一下data對象的初始化過程:

msandroid_sound_read_data(),

class msandroid_sound_read_data : public msandroid_sound_data{
public:
   msandroid_sound_read_data() : audio_record(0),audio_record_class(0),read_buff(0),read_chunk_size(0) {
      ms_bufferizer_init(&rb);
      aec=NULL;
   }
   ~msandroid_sound_read_data() {
      ms_bufferizer_uninit (&rb);
   }
   jobject          audio_record;
   jclass           audio_record_class;
   jbyteArray    read_buff;
   MSBufferizer      rb;
   int          read_chunk_size;
   int framesize;
   int outgran_ms;
   int min_avail;
   int64_t start_time;
   int64_t read_samples;
   MSTickerSynchronizer *ticker_synchronizer;
   jobject aec;
   bool builtin_aec;
};

可以看出,初始化只是構建了一個MSBufferrizer的rb對象;

對於android_native_snd_card_desc,定義在androidsound.cpp中,desc->create_reader()最終的實現在

android_snd_card_create_reader()——line208

static MSFilter *android_snd_card_create_reader(MSSndCard *card){
   MSFilter *f=ms_android_snd_read_new(ms_snd_card_get_factory(card));
   (static_cast<AndroidSndReadData*>(f->data))->setCard(card);
   return f;
}ms_android_snd_read_new(ms_snd_card_get_factory(card));
   (static_cast<AndroidSndReadData*>(f->data))->setCard(card);
   return f;
}

其中又通過ms_android_snd_read_new來初始化一個MSFilter*對象,入參使用ms_snd_card_get_factory來獲取card->sndcardmanager->factory,

static MSFilter * ms_android_snd_read_new(MSFactory *factory){
   MSFilter *f=ms_factory_create_filter_from_desc(factory, &android_snd_read_desc);
   return f;
}ms_factory_create_filter_from_desc(factory, &android_snd_read_desc);
   return f;
}

所以對於這個聲卡create_reader()只是初始化了一個對應的MSFilter*對象;並且將傳入的MSSndCard對象設置到Msfiltet中的data下去;

 

對於android_native_snd_opensles_card_desc,定義在androidsound_opensles.cpp中, ——line660

static MSFilter *android_snd_card_create_reader(MSSndCard *card) {
   MSFilter *f = ms_android_snd_read_new(ms_snd_card_get_factory(card));
   OpenSLESInputContext *ictx = static_cast<OpenSLESInputContext*>(f->data);
   ictx->setContext((OpenSLESContext*)card->data);
   return f;
}

同樣的,通過card中指定的factory,獲取該desc對應的MSFilters,最終將card->data設置到f->data下;

 

回到audio_stream_start_from_io中的最後一步

 

ms_ticker_attach_multiple() ,msticker.c line139;

這個函數剛纔分析過,主要是生成f->ticker,並執行steam下所有的MSFilter的preprocess函數;

 

針對三種不同的聲卡,都調用各自的preprocess函數指針:

 

對於msandroid_sound_card_desc, sound_read_preprocess() line471;

static void sound_read_preprocess(MSFilter *f){
   msandroid_sound_read_data *d=(msandroid_sound_read_data*)f->data;
   ms_message("sound_read_preprocess");
   if (!d->started)
      sound_read_setup(f);
   ms_ticker_set_time_func(f->ticker,(uint64_t (*)(void*))ms_ticker_synchronizer_get_corrected_time, d->ticker_synchronizer);

   if (d->builtin_aec && d->audio_record) {
      JNIEnv *env=ms_get_jni_env();
      jmethodID getsession_id=0;
      int sessionId=-1;
      getsession_id = env->GetMethodID(d->audio_record_class,"getAudioSessionId", "()I");
      if(getsession_id==0) {
         ms_error("cannot find AudioRecord.getAudioSessionId() method");
         return;
      }
      sessionId = env->CallIntMethod(d->audio_record,getsession_id);
      ms_message("AudioRecord.getAudioSessionId() returned %i", sessionId);
      if (sessionId==-1) {
         return;
      }
      d->aec = enable_hardware_echo_canceller(env, sessionId);
   }
}

由於剛剛生成msandroid_sound_read_data對象,

這裏會進入sound_read_setup()去啓動錄音器;

 

sound_read_setup(),——line372

static void sound_read_setup(MSFilter *f){
   ms_message("sound_read_setup");
   msandroid_sound_read_data *d=(msandroid_sound_read_data*)f->data;
   jmethodID constructor_id=0, methodID = 0;
   int audio_record_state=0;
   jmethodID min_buff_size_id;
   //jmethodID set_notification_period;
   int rc;

   JNIEnv *jni_env = ms_get_jni_env();
   d->audio_record_class = (jclass)jni_env->NewGlobalRef(jni_env->FindClass("android/media/AudioRecord"));
// d->audio_record_class = (jclass)jni_env->NewGlobalRef(jni_env->FindClass("org/linphone/tools/LinphoneRecord"));
   if (d->audio_record_class == 0) {
      ms_error("cannot find android/media/AudioRecord");
      return;
   }

   constructor_id = jni_env->GetMethodID(d->audio_record_class,"<init>", "(IIIII)V");
   if (constructor_id == 0) {
      ms_error("cannot find AudioRecord (int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes)");
      return;
   }
   min_buff_size_id = jni_env->GetStaticMethodID(d->audio_record_class,"getMinBufferSize", "(III)I");
   if (min_buff_size_id == 0) {
      ms_error("cannot find AudioRecord.getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)");
      return;
   }
   d->buff_size = jni_env->CallStaticIntMethod(d->audio_record_class,min_buff_size_id,d->rate,2/*CHANNEL_CONFIGURATION_MONO*/,2/*  ENCODING_PCM_16BIT */);
   d->read_chunk_size = d->buff_size/4;
   d->buff_size*=2;/*double the size for configuring the recorder: this does not affect latency but prevents "AudioRecordThread: buffer overflow"*/

   if (d->buff_size > 0) {
      ms_message("Configuring recorder with [%i] bits  rate [%i] nchanels [%i] buff size [%i], chunk size [%i]"
            ,d->bits
            ,d->rate
            ,d->nchannels
            ,d->buff_size
            ,d->read_chunk_size);
   } else {
      ms_message("Cannot configure recorder with [%i] bits  rate [%i] nchanels [%i] buff size [%i] chunk size [%i]"
            ,d->bits
            ,d->rate
            ,d->nchannels
            ,d->buff_size
            ,d->read_chunk_size);
      return;
   }

   d->read_buff = jni_env->NewByteArray(d->buff_size);
   d->read_buff = (jbyteArray)jni_env->NewGlobalRef(d->read_buff);
   if (d->read_buff == 0) {
      ms_error("cannot instanciate read buff");
      return;
   }

   d->audio_record =  jni_env->NewObject(d->audio_record_class
         ,constructor_id
         ,sdk_version<11?1/*MIC*/:7/*VOICE_COMMUNICATION*/
         ,d->rate
         ,2/*CHANNEL_CONFIGURATION_MONO*/
         ,2/*  ENCODING_PCM_16BIT */
         ,d->buff_size);

   //Check the state of the AudioRecord (uninitialized = 1
   methodID = jni_env->GetMethodID(d->audio_record_class,"getState", "()I");
   if (methodID == 0) {
      ms_error("cannot find AudioRecord getState() method");
      return;
   }
   audio_record_state = jni_env->CallIntMethod(d->audio_record, methodID);

   if(audio_record_state == 1) {
      d->audio_record = jni_env->NewGlobalRef(d->audio_record);
      if (d->audio_record == 0) {
         ms_error("cannot instantiate AudioRecord");
         return;
      }
   } else {
      d->audio_record = NULL;
      ms_error("AudioRecord is not initialized properly. It may be caused by RECORD_AUDIO permission not granted");
   }

   d->min_avail=-1;
   d->read_samples=0;
   d->ticker_synchronizer = ms_ticker_synchronizer_new();
   d->outgran_ms=20;
   d->start_time=-1;
   d->framesize=(d->outgran_ms*d->rate)/1000;
   d->started=true;
   // start reader thread
   if(d->audio_record) {
      rc = ms_thread_create(&d->thread_id, 0, (void*(*)(void*))msandroid_read_cb, d);
      if (rc){
         ms_error("cannot create read thread return code  is [%i]", rc);
         d->started=false;
        }
   }
}ms_thread_create(&d->thread_id, 0, (void*(*)(void*))msandroid_read_cb, d);
      if (rc){
         ms_error("cannot create read thread return code  is [%i]", rc);
         d->started=false;
        }
   }
}

首先還是獲取MSFilter中的data對象* d;

生成一個java錄音器的類引用:AudioRecord,指定給d->audio_record_class;

獲取AudioRecord的構造函數id,

然後獲取AudioRecord支持的的最小BufferSize,指定給d->buff_size;

根據構造函數id實例化一個AudioRecord對象提供給d->audio_record;

最後創建一個線程,並指定回調接口 ms_thread_create(thread, msandroid_read_cb,d);

感覺這個回調接口並非錄音的回調接口,而是線程創建完成的回調接口;

 

從打印的log看,線程執行完後,會回調msandroid_read_cb()接口 ——line327

static void* msandroid_read_cb(msandroid_sound_read_data* d) {
    ms_message("msandroid_read_cb");
   mblk_t *m;
   int nread;
   jmethodID read_id=0;
   jmethodID record_id=0;

   set_high_prio();

   JNIEnv *jni_env = ms_get_jni_env();
   record_id = jni_env->GetMethodID(d->audio_record_class,"startRecording", "()V");
   if(record_id==0) {
      ms_error("cannot find AudioRecord.startRecording() method");
      goto end;
   }
   //start recording
   ms_message("Start recording");
   jni_env->CallVoidMethod(d->audio_record,record_id);

   // int read (byte[] audioData, int offsetInBytes, int sizeInBytes)
   read_id = jni_env->GetMethodID(d->audio_record_class,"read", "([BII)I");
   if(read_id==0) {
      ms_error("cannot find AudioRecord.read() method");
      goto end;
   }

   while (d->started && (nread=jni_env->CallIntMethod(d->audio_record,read_id,d->read_buff,0, d->read_chunk_size))>0) {
      m = allocb(nread,0);
      jni_env->GetByteArrayRegion(d->read_buff, 0,nread, (jbyte*)m->b_wptr);
      //ms_error("%i octets read",nread);
      m->b_wptr += nread;
      d->read_samples+=nread/(2*d->nchannels);
      compute_timespec(d);
      ms_mutex_lock(&d->mutex);
      ms_bufferizer_put (&d->rb,m);
      ms_mutex_unlock(&d->mutex);
   };

   goto end;
   end: {
      ms_thread_exit(NULL);
      return 0;
   }
}nread=jni_env->CallIntMethod(d->audio_record,read_id,d->read_buff,0, d->read_chunk_size))>0) {
      m = allocb(nread,0);
      jni_env->GetByteArrayRegion(d->read_buff, 0,nread, (jbyte*)m->b_wptr);
      //ms_error("%i octets read",nread);
      m->b_wptr += nread;
      d->read_samples+=nread/(2*d->nchannels);
      compute_timespec(d);
      ms_mutex_lock(&d->mutex);
      ms_bufferizer_put (&d->rb,m);
      ms_mutex_unlock(&d->mutex);
   };

   goto end;
   end: {
      ms_thread_exit(NULL);
      return 0;
   }
}

通過上一個過程中指定的d->audio_record_class,反射加載startRecording接口來啓動AudioRecord;

通過反射獲取AudioRecord的read接口;

執行一個while循環,不停的從read接口中過去buff,保存在d->read_buff中;

在while循環內,執行ms_bufferrizer_put()將讀取的buff保存到d->rb中去;

所以這裏這個msandroid_read_cb回調執行以後,就開啓了一個while循環去從AudioRecord中不停的read()音頻數據寫入到d->rb這個buffer中;

後面會在process指定的函數接口中去讀取d->rb中的數據,並通過f->outputs[0]去發送

對於android_native_snd_card_desc,preprocess指針對應的是android_snd_read_preprocess();——line 401;

static void android_snd_read_preprocess(MSFilter *obj){
   AndroidSndReadData *ad=(AndroidSndReadData*)obj->data;
   status_t  ss;
   int notify_frames=(int)(audio_buf_ms*(float)ad->rate);
   
   ad->mCard->enableVoipMode();
   
   ad->mFilter=obj;
   ad->read_samples=0;
   ad->started=FALSE;
   ad->audio_source=ad->mCard->mCaptureSource; /*some device require to capture from MIC instead of from voice communications*/
   for(int i=0;i<2;i++){
      ad->rec=new AudioRecord(ad->audio_source,
                  ad->rate,
                  AUDIO_FORMAT_PCM_16_BIT,
                  audio_channel_in_mask_from_count(ad->nchannels),
                  ad->rec_buf_size,
                  android_snd_read_cb,ad,notify_frames,0,AudioRecord::TRANSFER_DEFAULT, 
                  (ad->mCard->mFlags & DEVICE_HAS_CRAPPY_ANDROID_FASTRECORD) ? AUDIO_INPUT_FLAG_NONE : AUDIO_INPUT_FLAG_FAST);
      ss=ad->rec->initCheck();
      ms_message("Setting up AudioRecord  source=%i,rate=%i,framecount=%i",ad->audio_source,ad->rate,ad->rec_buf_size);

      if (ss!=0){
         ms_error("Problem when setting up AudioRecord:%s ",strerror(-ss));
         ad->rec=0;
         if (i == 0) {
            ms_error("Retrying with AUDIO_SOURCE_MIC");
            ad->audio_source=AUDIO_SOURCE_MIC;
         }
      }else break;
   }

   if (ad->rec != 0) {
      if (ad->builtin_aec) android_snd_read_activate_hardware_aec(obj);
      ad->rec->start();
   }
}AudioRecord(ad->audio_source,
                  ad->rate,
                  AUDIO_FORMAT_PCM_16_BIT,
                  audio_channel_in_mask_from_count(ad->nchannels),
                  ad->rec_buf_size,
                  android_snd_read_cb,ad,notify_frames,0,AudioRecord::TRANSFER_DEFAULT, 
                  (ad->mCard->mFlags & DEVICE_HAS_CRAPPY_ANDROID_FASTRECORD) ? AUDIO_INPUT_FLAG_NONE : AUDIO_INPUT_FLAG_FAST);
      ss=ad->rec->initCheck();
      ms_message("Setting up AudioRecord  source=%i,rate=%i,framecount=%i",ad->audio_source,ad->rate,ad->rec_buf_size);

      if (ss!=0){
         ms_error("Problem when setting up AudioRecord:%s ",strerror(-ss));
         ad->rec=0;
         if (i == 0) {
            ms_error("Retrying with AUDIO_SOURCE_MIC");
            ad->audio_source=AUDIO_SOURCE_MIC;
         }
      }else break;
   }

   if (ad->rec != 0) {
      if (ad->builtin_aec) android_snd_read_activate_hardware_aec(obj);
      ad->rec->start();
   }
}

首先從MSFilter中獲取AndroidSndReadData*對象ad,這個對象可以看成是使用的libmedia.so錄音器的一個管理類,

然後初始化一個AudioRecord();這個AudioRecord並非android系統java層的AudioRecord,而是media2steam庫下的一個接口類;與libmedia.so中 的AudioRecord很相似;

可以理解是對後者的封裝;

初始化的過程中傳入了回調監聽callback : android_snd_read_cb ——line348

初始化好了以後,將這個接口對象保存在ad->rec中,

然後調用ad->rec->start()來啓動錄音;

後續就是通過AudioRecord來觸發android_snd_read_cb()來回拋錄音數據了;

 

對於android_native_snd_opensles_card_desc,位於androidsound_opensles.cpp中;

preprocess真實實現爲android_snd_read_preprocess() ——line504;

static void android_snd_read_preprocess(MSFilter *obj) {
   OpenSLESInputContext *ictx = (OpenSLESInputContext*) obj->data;
   ictx->mFilter = obj;
   ictx->read_samples = 0;

   if (SL_RESULT_SUCCESS != opensles_recorder_init(ictx)) {
       ms_error("Problem when initialization of opensles recorder");
       return;
   }
   if (SL_RESULT_SUCCESS != opensles_recorder_callback_init(ictx)) {
       ms_error("Problem when initialization of opensles recorder callback");
       return;
   }

   if (ictx->opensles_context->builtin_aec) {
      //android_snd_read_activate_hardware_aec(obj);
   }
}opensles_recorder_init(ictx)) {
       ms_error("Problem when initialization of opensles recorder");
       return;
   }
   if (SL_RESULT_SUCCESS != opensles_recorder_callback_init(ictx)) {
       ms_error("Problem when initialization of opensles recorder callback");
       return;
   }

   if (ictx->opensles_context->builtin_aec) {
      //android_snd_read_activate_hardware_aec(obj);
   }
}

opensles的錄音過程與AudioRecord有很大的區別,

首先獲取最終的錄音器對象OpenSLESInputContext* ictx

通過opensles_recorder_init(ictx)進行初始化;

通過opensles_recorder_callback_init(ictx)進行回調從初始化;將回調接口opensles_recorder_callback註冊到opensles的實例中;

這兩個函數具體的實現是完全遵循opensles的api來實現的,最好是找點相關的資料瞭解下

 

至此音頻錄音器的啓動完全結束;


錄音數據採集

只有如何獲取錄音採集的buff數據,這個與ticker有關,

在最開始發起呼叫請求的時候,在ms_ticker_start中,創建了一個線程,執行ms_ticker_run();

ms_ticker_run裏面有一個while循環在會話過程中一直存在;

內部有一個run_graphs() ——line271

run_graphs內又會執行run_graph(); ——line247

static void run_graph(MSFilter *f, MSTicker *s, bctbx_list_t **unschedulable, bool_t force_schedule){
   // ms_message("run_graph %s ",f->desc->name);
   int i;
   MSQueue *l;
   if (f->last_tick!=s->ticks ){
      if (filter_can_process(f,s->ticks) || force_schedule) {
         /* this is a candidate */
         f->last_tick=s->ticks;
         call_process(f);
         /* now recurse to next filters */
         for(i=0;i<f->desc->noutputs;i++){
          //   ms_message("run_graph for %i count = %i",i ,f->desc->noutputs);
            l=f->outputs[i];
            if (l!=NULL){
               run_graph(l->next.filter,s,unschedulable, force_schedule);
            }
         }
      }else{
         /* this filter has not all inputs that have been filled by filters before it. */
         *unschedulable=bctbx_list_prepend(*unschedulable,f);
      }
   }
}call_process(f);
         /* now recurse to next filters */
         for(i=0;i<f->desc->noutputs;i++){
          //   ms_message("run_graph for %i count = %i",i ,f->desc->noutputs);
            l=f->outputs[i];
            if (l!=NULL){
               run_graph(l->next.filter,s,unschedulable, force_schedule);
            }
         }
      }else{
         /* this filter has not all inputs that have been filled by filters before it. */
         *unschedulable=bctbx_list_prepend(*unschedulable,f);
      }
   }
}

會檢查MSFilter和MSTicker是否有process接口,如果有,調用call_process(f);——line230;

static void call_process(MSFilter *f){
    ms_message("call_process %s",f->desc->name);
   bool_t process_done=FALSE;
   if (f->desc->ninputs==0 || f->desc->flags & MS_FILTER_IS_PUMP){
      ms_filter_process(f);
   }else{
      while (ms_filter_inputs_have_data(f)) {
         if (process_done){
            ms_warning("Re-scheduling filter %s: all data should be consumed in one process call, so fix it.",f->desc->name);
         }
         ms_filter_process(f);
         if (f->postponed_task) break;
         process_done=TRUE;
      }
   }
}ms_filter_process(f);
   }else{
      while (ms_filter_inputs_have_data(f)) {
         if (process_done){
            ms_warning("Re-scheduling filter %s: all data should be consumed in one process call, so fix it.",f->desc->name);
         }
         ms_filter_process(f);
         if (f->postponed_task) break;
         process_done=TRUE;
      }
   }
}

繼續調用ms_filter_process(f)處理 msfilter.c——line200;

void ms_filter_process(MSFilter *f){
   MSTimeSpec start,stop;
   ms_debug("Executing process of filter %s:%p",f->desc->name,f);
   if (f->stats)
      ms_get_cur_time(&start);
   f->desc->process(f);
   if (f->stats){
      ms_get_cur_time(&stop);
      f->stats->count++;
      f->stats->elapsed+=(stop.tv_sec-start.tv_sec)*1000000000LL + (stop.tv_nsec-start.tv_nsec);
   }
} f->desc->process(f);
   if (f->stats){
      ms_get_cur_time(&stop);
      f->stats->count++;
      f->stats->elapsed+=(stop.tv_sec-start.tv_sec)*1000000000LL + (stop.tv_nsec-start.tv_nsec);
   }
}

然後調用MSFilter->desc->process函數接口;

 

回到聲卡模型中:

對於msandroid_sound_card_desc:

process函數真實實現爲sound_read_process() ——line544

static void sound_read_process(MSFilter *f){
   ms_message("sound_read_process");
   msandroid_sound_read_data *d=(msandroid_sound_read_data*)f->data;
   int nbytes=d->framesize*d->nchannels*2;
   int avail;
   bool_t flush=FALSE;
   bool_t can_output=(d->start_time==-1 || ((f->ticker->time-d->start_time)%d->outgran_ms==0));

   ms_mutex_lock(&d->mutex);
   if (!d->started) {
      ms_mutex_unlock(&d->mutex);
      return;
   }
   avail=ms_bufferizer_get_avail(&d->rb);
   if (f->ticker->time % 5000==0){
      if (d->min_avail>=(sndread_flush_threshold*(float)d->rate*2.0*(float)d->nchannels)){
         int excess_ms=(d->min_avail*1000)/(d->rate*2*d->nchannels);
         ms_warning("Excess of audio samples in capture side bytes=%i (%i ms)",d->min_avail,excess_ms);
         can_output=TRUE;
         flush=TRUE;
      }
      d->min_avail=-1;
   }
   do{
      if (can_output && (avail>=nbytes*2)){//bytes*2 is to insure smooth output, we leave at least one packet in the buffer for next time*/
         mblk_t *om=allocb(nbytes,0);
         ms_bufferizer_read(&d->rb,om->b_wptr,nbytes);
         om->b_wptr+=nbytes;
         ms_queue_put(f->outputs[0],om);
         //ms_message("Out time=%llu ",f->ticker->time);
         if (d->start_time==-1) d->start_time=f->ticker->time;
         avail-=nbytes;
      }else break;
   }while(flush);
   ms_mutex_unlock(&d->mutex);
   if (d->min_avail==-1 || avail<d->min_avail) d->min_avail=avail;
}avail=ms_bufferizer_get_avail(&d->rb);
   if (f->ticker->time % 5000==0){
      if (d->min_avail>=(sndread_flush_threshold*(float)d->rate*2.0*(float)d->nchannels)){
         int excess_ms=(d->min_avail*1000)/(d->rate*2*d->nchannels);
         ms_warning("Excess of audio samples in capture side bytes=%i (%i ms)",d->min_avail,excess_ms);
         can_output=TRUE;
         flush=TRUE;
      }
      d->min_avail=-1;
   }
   do{
      if (can_output && (avail>=nbytes*2)){//bytes*2 is to insure smooth output, we leave at least one packet in the buffer for next time*/
         mblk_t *om=allocb(nbytes,0);
         ms_bufferizer_read(&d->rb,om->b_wptr,nbytes);
         om->b_wptr+=nbytes;
         ms_queue_put(f->outputs[0],om);
         //ms_message("Out time=%llu ",f->ticker->time);
         if (d->start_time==-1) d->start_time=f->ticker->time;
         avail-=nbytes;
      }else break;
   }while(flush);
   ms_mutex_unlock(&d->mutex);
   if (d->min_avail==-1 || avail<d->min_avail) d->min_avail=avail;
}

重點在下面的do while中;

會調用ms_bufferizer_read(&d->rb,om->b_wptr,nbytes),從d->rb中讀取音頻數據保存到om中;

然後調用ms_queue_put(f->outputs[0],om),將om中的數據寫入的f->outputs[0]中;也就是通過rtp發送出去;

 

對於android_native_snd_card_desc,process真實實現爲android_snd_read_process;

在process之前AudioRecord因爲已經啓動了,所以會間歇性的執行回調接口,將錄音數據拋回來;

static void android_snd_read_cb(int event, void* user, void *p_info){
   AndroidSndReadData *ad=(AndroidSndReadData*)user;
   
   if (!ad->started) return;
   if (ad->mTickerSynchronizer==NULL){
      MSFilter *obj=ad->mFilter;
      /*
       * ABSOLUTE HORRIBLE HACK. We temporarily disable logs to prevent ms_ticker_set_time_func() to output a debug log.
       * This is horrible because this also suspends logs for all concurrent threads during these two lines of code.
       * Possible way to do better:
       *  1) understand why AudioRecord thread doesn't detach.
       *  2) disable logs just for this thread (using a TLS)
       */
      int loglevel=ortp_get_log_level_mask(ORTP_LOG_DOMAIN);
      ortp_set_log_level_mask(NULL, ORTP_ERROR|ORTP_FATAL);
      ad->mTickerSynchronizer = ms_ticker_synchronizer_new();
      ms_ticker_set_time_func(obj->ticker,(uint64_t (*)(void*))ms_ticker_synchronizer_get_corrected_time, ad->mTickerSynchronizer);
      ortp_set_log_level_mask(ORTP_LOG_DOMAIN, loglevel);
   }
   if (event==AudioRecord::EVENT_MORE_DATA){
      AudioRecord::Buffer info;
      AudioRecord::readBuffer(p_info,&info);
      if (info.size > 0) {
         mblk_t *m=allocb(info.size,0);
         memcpy(m->b_wptr,info.raw,info.size);
         m->b_wptr+=info.size;
         ad->read_samples+=info.frameCount;

         ms_mutex_lock(&ad->mutex);
         compute_timespec(ad);
         putq(&ad->q,m);
         ms_mutex_unlock(&ad->mutex);
      }
   }else if (event==AudioRecord::EVENT_OVERRUN){
#ifdef TRACE_SND_READ_TIMINGS
      ms_warning("AudioRecord overrun");
#endif
   }
}event==AudioRecord::EVENT_MORE_DATA){
      AudioRecord::Buffer info;
      AudioRecord::readBuffer(p_info,&info);
      if (info.size > 0) {
         mblk_t *m=allocb(info.size,0);
         memcpy(m->b_wptr,info.raw,info.size);
         m->b_wptr+=info.size;
         ad->read_samples+=info.frameCount;

         ms_mutex_lock(&ad->mutex);
         compute_timespec(ad);
         putq(&ad->q,m);
         ms_mutex_unlock(&ad->mutex);
      }
   }else if (event==AudioRecord::EVENT_OVERRUN){
#ifdef TRACE_SND_READ_TIMINGS
      ms_warning("AudioRecord overrun");
#endif
   }
}

如果回調的是錄音數據,入參的event爲EVENT_MORE_DATA,且入參p_info就是錄音數據;

這裏會定義一個AudioRecord:buffer用於緩存,通過readBuffer(xx,xx)將p_info的數據拷貝到buffer info中;

如果錄音數據存在,則將錄音數據重新複製到mblk_t*對象中,

最後通過putq(&ad->q,m)將m的數據加入隊列中;

linphone會通過MSTicker循環調用android_snd_read_process();

static void android_snd_read_process(MSFilter *obj){
   AndroidSndReadData *ad=(AndroidSndReadData*)obj->data;
   mblk_t *om;
   
   ms_mutex_lock(&ad->mutex);
   if (ad->rec == 0 ) {
      ms_mutex_unlock(&ad->mutex);
      return;
   }
   if (!ad->started)
      ad->started=TRUE; //so that the callback can now start to queue buffers.

   while ((om=getq(&ad->q))!=NULL) {
      //ms_message("android_snd_read_process: Outputing %i bytes",msgdsize(om));
      ms_queue_put(obj->outputs[0],om);
      ad->nbufs++;
      if (ad->nbufs % 100 == 0)
         ms_message("sound/wall clock skew is average=%g ms", ad->av_skew);
   }
   ms_mutex_unlock(&ad->mutex);
}om=getq(&ad->q))!=NULL) {
      //ms_message("android_snd_read_process: Outputing %i bytes",msgdsize(om));
      ms_queue_put(obj->outputs[0],om);
      ad->nbufs++;
      if (ad->nbufs % 100 == 0)
         ms_message("sound/wall clock skew is average=%g ms", ad->av_skew);
   }
   ms_mutex_unlock(&ad->mutex);
}

此時就會從ad->q中通過getq()接口讀取錄音數據;

然後調用ms_queue_put()將數據寫入到設定的output中,最終交給sal去實現發送;

 

對於android_native_snd_opensles_card_desc,process真實實現爲android_snd_read_process()——line523

在process之前,同樣通過回調先收集錄音;回調接口爲opensles_recorder_callback() ——line416;

static void opensles_recorder_callback(SLAndroidSimpleBufferQueueItf bq, void *context) {
   SLresult result;
   OpenSLESInputContext *ictx = (OpenSLESInputContext *)context;

   if (ictx->mTickerSynchronizer == NULL) {
      MSFilter *obj = ictx->mFilter;
      /*
       * ABSOLUTE HORRIBLE HACK. We temporarily disable logs to prevent ms_ticker_set_time_func() to output a debug log.
       * This is horrible because this also suspends logs for all concurrent threads during these two lines of code.
       * Possible way to do better:
       *  1) understand why AudioRecord thread doesn't detach.
       *  2) disable logs just for this thread (using a TLS)
       */
      int loglevel=ortp_get_log_level_mask(ORTP_LOG_DOMAIN);
      ortp_set_log_level_mask(ORTP_LOG_DOMAIN, ORTP_ERROR|ORTP_FATAL);
      ictx->mTickerSynchronizer = ms_ticker_synchronizer_new();
      ms_ticker_set_time_func(obj->ticker,(uint64_t (*)(void*))ms_ticker_synchronizer_get_corrected_time, ictx->mTickerSynchronizer);
      ortp_set_log_level_mask(ORTP_LOG_DOMAIN, loglevel);
   }
   ictx->read_samples += ictx->inBufSize / sizeof(int16_t);

   mblk_t *m = allocb(ictx->inBufSize, 0);
   memcpy(m->b_wptr, ictx->recBuffer[ictx->currentBuffer], ictx->inBufSize);
   m->b_wptr += ictx->inBufSize;

   ms_mutex_lock(&ictx->mutex);
   compute_timespec(ictx);
   putq(&ictx->q, m);
   ms_mutex_unlock(&ictx->mutex);

   result = (*ictx->recorderBufferQueue)->Enqueue(ictx->recorderBufferQueue, ictx->recBuffer[ictx->currentBuffer], ictx->inBufSize);
   if (result != SL_RESULT_SUCCESS) {
      /*ms_error("OpenSLES Error %u while enqueueing record buffer", result);*/
   }
   ictx->currentBuffer = ictx->currentBuffer == 1 ? 0 : 1;
}memcpy(m->b_wptr, ictx->recBuffer[ictx->currentBuffer], ictx->inBufSize);
   m->b_wptr += ictx->inBufSize;

   ms_mutex_lock(&ictx->mutex);
   compute_timespec(ictx);
   putq(&ictx->q, m);
   ms_mutex_unlock(&ictx->mutex);

   result = (*ictx->recorderBufferQueue)->Enqueue(ictx->recorderBufferQueue, ictx->recBuffer[ictx->currentBuffer], ictx->inBufSize);
   if (result != SL_RESULT_SUCCESS) {
      /*ms_error("OpenSLES Error %u while enqueueing record buffer", result);*/
   }
   ictx->currentBuffer = ictx->currentBuffer == 1 ? 0 : 1;
}

錄音的數據保存在入參的context中個,將其轉換爲OpenSLESInputContext對象ictx,通過memcpy,將ictx中的recBuffer複製到mblk_t* m中;

使用putq(&ictx->q,m)將錄音數據保存到ictx->q隊列中;

然後就通過process進行獲取和發送;

static void android_snd_read_process(MSFilter *obj) {
   OpenSLESInputContext *ictx = (OpenSLESInputContext*) obj->data;
   mblk_t *m;

   if (obj->ticker->time % 1000 == 0) {
       if (ictx->recorderBufferQueue == NULL) {
           ms_message("Trying to init opensles recorder on process");
           if (SL_RESULT_SUCCESS != opensles_recorder_init(ictx)) {
                ms_error("Problem when initialization of opensles recorder");
            } else if (SL_RESULT_SUCCESS != opensles_recorder_callback_init(ictx)) {
               ms_error("Problem when initialization of opensles recorder callback");
            }
       }
   }

   ms_mutex_lock(&ictx->mutex);
   while ((m = getq(&ictx->q)) != NULL) {
      ms_queue_put(obj->outputs[0], m);
   }
   ms_mutex_unlock(&ictx->mutex);
   if (obj->ticker->time % 5000 == 0)
         ms_message("sound/wall clock skew is average=%g ms", ictx->mAvSkew);
}m = getq(&ictx->q)) != NULL) {
      ms_queue_put(obj->outputs[0], m);
   }
   ms_mutex_unlock(&ictx->mutex);
   if (obj->ticker->time % 5000 == 0)
         ms_message("sound/wall clock skew is average=%g ms", ictx->mAvSkew);
}

通過getq()來獲取ictx-q中緩存的錄音數據,然後調用ms_queue_put()將緩存的錄音數據寫入output;

 

至此針對三種不同聲卡的錄音採集流程分析結束;

 

通話過程中

當對方接聽了電話以後,還是在call_process_response()內接收到響應,然後進入handle_sdp_from_response()處理;

然後linphone_core_update_streams去更新steam流,而整個錄音的回調處理此時並不受影響,

 

通話結束

最後,如果是我方掛了電話,會回調callbacks.c中的call_terminated接口;

call_terminated() callback.c ——line799;

static void call_terminated(SalOp *op, const char *from){
   LinphoneCore *lc=(LinphoneCore *)sal_get_user_pointer(sal_op_get_sal(op));
   LinphoneCall *call=(LinphoneCall*)sal_op_get_user_pointer(op);

   if (call==NULL) return;

   switch(linphone_call_get_state(call)){
      case LinphoneCallEnd:
      case LinphoneCallError:
         ms_warning("call_terminated: already terminated, ignoring.");
         return;
      break;
      case LinphoneCallIncomingReceived:
      case LinphoneCallIncomingEarlyMedia:
         sal_error_info_set(&call->non_op_error,SalReasonRequestTimeout,0,"Incoming call cancelled",NULL);
      break;
      default:
      break;
   }
   ms_message("Current call terminated...");
   if (call->refer_pending){
      linphone_core_start_refered_call(lc,call,NULL);
   }
   //we stop the call only if we have this current call or if we are in call
   if ((bctbx_list_size(lc->calls)  == 1) || linphone_core_in_call(lc)) {
      linphone_core_stop_ringing(lc);
   }
   linphone_call_stop_media_streams(call);
   linphone_core_notify_show_interface(lc);
   linphone_core_notify_display_status(lc,_("Call terminated."));

#ifdef BUILD_UPNP
   linphone_call_delete_upnp_session(call);
#endif //BUILD_UPNP

   linphone_call_set_state(call, LinphoneCallEnd,"Call ended");
}

過程比較簡單,如果是在等待接聽的階段,則終止對方響鈴,linphone_core_stop_ringing();

然後關閉媒體設備: linphone_call_stop_media_streams(call);

最後更新狀態爲LinphoneCallEnd。

























 

 



 

 

 




 

 







 

 





 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章