- dispatch_queue_create
dispatch_queue_create
調用了_dispatch_lane_create_with_target
,繼續調用_dispatch_object_alloc
、_dispatch_queue_init
、_dispatch_trace_queue_create
,最終返回了dispatch_queue_t
。重要的代碼:
dispatch_lane_t dq = _dispatch_object_alloc(vtable, sizeof(struct dispatch_lane_s));
_dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ? DISPATCH_QUEUE_WIDTH_MAX : 1,DISPATCH_QUEUE_ROLE_INNER | (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
這其中的dqai.dqai_concurrent
看着跟隊列的串行併發有關係。追根溯源,一方面我們搞清楚DISPATCH_QUEUE_CONCURRENT
是什麼:
//串行隊列屬性DISPATCH_QUEUE_SERIAL==NULL
#define DISPATCH_QUEUE_SERIAL NULL
//併發隊列屬性DISPATCH_QUEUE_CONCURRENT使用了DISPATCH_GLOBAL_OBJECT宏定義
#define DISPATCH_QUEUE_CONCURRENT DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, _dispatch_queue_attr_concurrent)
//DISPATCH_GLOBAL_OBJECT:將object轉換爲type類型
#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object))
也就是我們使用的DISPATCH_QUEUE_CONCURRENT
宏定義,實際上是struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent;
結構體。
再者,我們探究dqai創建的函數dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);
:
dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa){
dispatch_queue_attr_info_t dqai = { };
if (!dqa) return dqai;
if (dqa == &_dispatch_queue_attr_concurrent) {
dqai.dqai_concurrent = true;
return dqai;
}
...
return dqai;
}
如果是串行隊列到參數dqa
爲NULL,則直接返回dqai
;而併發隊列的dqa== &_dispatch_queue_attr_concurrent
,則標記dqai.dqai_concurrent = true
。返回到上面的位置,調用_dispatch_queue_init
的時候,如果是併發隊列,第三個參數傳入的是DISPATCH_QUEUE_WIDTH_MAX,而
#define DISPATCH_QUEUE_WIDTH_MAX (DISPATCH_QUEUE_WIDTH_FULL - 2)
#define DISPATCH_QUEUE_WIDTH_FULL 0x1000ull
即DISPATCH_QUEUE_WIDTH_MAX
值爲6,串行隊列傳入的是1。繼續往下看:
static inline dispatch_queue_class_t _dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf, uint16_t width, uint64_t initial_state_bits) {
uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
dispatch_queue_t dq = dqu._dq;
dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
DISPATCH_QUEUE_INACTIVE)) == 0);
if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume
if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) {
dq->do_ref_cnt++; // released when DSF_DELETED is set
}
}
dq_state |= initial_state_bits;
dq->do_next = DISPATCH_OBJECT_LISTLESS;
dqf |= DQF_WIDTH(width);
os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
dq->dq_state = dq_state;
dq->dq_serialnum = os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
return dqu;
}
最終串行隊列DQF_WIDTH(width)的width=1,而併發隊列width=6,這還需要經過一些運算保存到queue的width屬性上。
帶着好奇心,我們打印下串行隊列和併發隊列、以及常用的全局併發隊列和主隊列的信息看看:
dispatch_queue_t concurrent = dispatch_queue_create("", DISPATCH_QUEUE_CONCURRENT);
dispatch_queue_t global = dispatch_get_global_queue(0, 0);
dispatch_queue_t serial = dispatch_queue_create("", DISPATCH_QUEUE_SERIAL);
dispatch_queue_t main = dispatch_get_main_queue();
打印結果如下:
打印的結果顯示:自己創建的串行隊列和默認的主隊列width==1,而自己創建的併發隊列和全局隊列width則爲一個很大的數。
這裏的dq->dq_serialnum
又是什麼呢?
// skip zero
// 1 - main_q
// 2 - mgr_q
// 3 - mgr_root_q
// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
// 17 - workloop_fallback_q
// we use 'xadd' on Intel, so the initial value == next assigned
#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17
這裏有對隊列的dq_serialnum的註解,1爲主隊列,4-15爲全局隊列,這個可以到_dispatch_root_queues[]
中查詢到。
2.dispatch_sync
dispatch_sync
的基本流程,dispatch_sync內部對任務進行封裝_dispatch_Block_invoke(work)
,然後
調用_dispatch_sync_f
:
void dispatch_sync(dispatch_queue_t dq, dispatch_block_t work){
uintptr_t dc_flags = DC_FLAG_BLOCK;
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
}
//_dispatch_Block_invoke(work)是對任務的一個封裝
_dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt,dispatch_function_t func,
uintptr_t dc_flags){
_dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}
static inline void _dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, uintptr_t dc_flags){
if (likely(dq->dq_width == 1)) {
return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
}
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
dispatch_lane_t dl = upcast(dq)._dl;
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
}
if (unlikely(dq->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
}
_dispatch_introspection_sync_begin(dl);
_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
/未完待續/
3.dispatch_async
/未完待續/
4.dispatch_once
dispatch_once
常用於單例對象的初始化,而且是線程安全的,下面來看具體的函數調用:
void dispatch_once(dispatch_once_t *val, dispatch_block_t block){
//入參數,第一個val是dispatch_once_t*類型的靜態變量,第二個block是dispatch_block_t類型
//調用dispatch_once_f函數,前2個參數同上,第三個參數是對block的封裝
dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
//核心函數
void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func){
//將val(也就是外部傳入的靜態變量)轉換爲dispatch_once_gate_t類型的變量
dispatch_once_gate_t l = (dispatch_once_gate_t)val;
#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
//通過os_atomic_load獲取此時任務的標識符v
uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
//如果當前任務的狀態爲DLOCK_ONCE_DONE,表示任務已經執行過了,則直接return,不會觸發block的調用。
if (likely(v == DLOCK_ONCE_DONE)) {
return;
}
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
//如果任務執行加鎖失敗,則走到_dispatch_once_mark_done_if_quiesced,並將標識符標記爲DLOCK_ONCE_DONE
if (likely(DISPATCH_ONCE_IS_GEN(v))) {
//內部調用os_atomic_store(&dgo->dgo_once, DLOCK_ONCE_DONE, ...);
return _dispatch_once_mark_done_if_quiesced(l, v);
}
#endif
#endif
//通過_dispatch_once_gate_tryenter嘗試進入任務,即解鎖,然後通過_dispatch_once_callout執行block回調任務
//os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED, (uintptr_t)_dispatch_lock_value_for_self(), relaxed)
if (_dispatch_once_gate_tryenter(l)) {
//觸發任務的執行,並標記任務的狀態
return _dispatch_once_callout(l, ctxt, func);
}
//如果此時有任務正在執行,這時再進來一個新任務,則通過_dispatch_once_wait讓新任務進入無限次等待。
return _dispatch_once_wait(l);
}
這裏讀取任務狀態是通過os_atomic_load(&dgo->dgo_once, ...)
進行的,而標記任務狀態則是通過os_atomic_store(&dgo->dgo_once, DLOCK_ONCE_DONE, ...);
進行的兩者相呼應。這裏標記爲DLOCK_ONCE_DONE
則修改dgo_once==1。
//執行block任務,並將任務的狀態標記爲DLOCK_ONCE_DONE。
static void _dispatch_once_callout(dispatch_once_gate_t l, void *ctxt, dispatch_function_t func){
_dispatch_client_callout(ctxt, func);
_dispatch_once_gate_broadcast(l);
}
//執行任務
_dispatch_client_callout(void *ctxt, dispatch_function_t f){
...
f(ctxt);
...
}
//標記任務的狀態,廣播通知
static inline void _dispatch_once_gate_broadcast(dispatch_once_gate_t l) {
dispatch_lock value_self = _dispatch_lock_value_for_self();
uintptr_t v = _dispatch_once_mark_done(l);//標記爲DLOCK_ONCE_DONE
if (likely((dispatch_lock)v == value_self)) return;
_dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
}
執行任務在_dispatch_client_callout
中通過f(ctxt);
觸發。 _dispatch_once_mark_done
中將任務的狀態標記爲DLOCK_ONCE_DONE
了。
那麼如果任務正在執行,這個無限次等待是怎麼實現的呢?
void _dispatch_once_wait(dispatch_once_gate_t dgo){
...
for (;;) {
os_atomic_rmw_loop(&dgo->dgo_once, old_v, new_v, relaxed, {
if (likely(old_v == DLOCK_ONCE_DONE)) {
os_atomic_rmw_loop_give_up(return);
}
if (DISPATCH_ONCE_IS_GEN(old_v)) {
os_atomic_rmw_loop_give_up({
os_atomic_thread_fence(acquire);
return _dispatch_once_mark_done_if_quiesced(dgo, old_v);
});
}
new_v = old_v | (uintptr_t)DLOCK_WAITERS_BIT;
if (new_v == old_v) os_atomic_rmw_loop_give_up(break);
});
...
}
}
等待任務標記爲完成後發出廣播,這個就立即返回了。