函數代碼
bool SELECT_LEX_UNIT::ExecuteIteratorQuery(THD *thd) {THD_STAGE_INFO(thd, stage_executing);DEBUG_SYNC(thd, "before_join_exec");Opt_trace_context *const trace = &thd->opt_trace;Opt_trace_object trace_wrapper(trace);Opt_trace_object trace_exec(trace, "join_execution");if (is_simple()) {trace_exec.add_select_number(first_select()->select_number);}Opt_trace_array trace_steps(trace, "steps");if (ClearForExecution(thd)) {return true;}mem_root_deque<Item *> *fields = get_field_list();Query_result *query_result = this->query_result();DBUG_ASSERT(query_result != nullptr);if (query_result->start_execution(thd)) return true;if (query_result->send_result_set_metadata(thd, *fields, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) {return true;}set_executed();// Hand over the query to the secondary engine if needed.if (first_select()->join->override_executor_func != nullptr) {thd->current_found_rows = 0;for (SELECT_LEX *select = first_select(); select != nullptr;select = select->next_select()) {if (select->join->override_executor_func(select->join)) {return true;}thd->current_found_rows += select->join->send_records;}const bool calc_found_rows =(first_select()->active_options() & OPTION_FOUND_ROWS);if (!calc_found_rows) {// This is for backwards compatibility reasons only;// we have documented that without SQL_CALC_FOUND_ROWS,// we return the actual number of rows returned.thd->current_found_rows =std::min(thd->current_found_rows, select_limit_cnt);}return query_result->send_eof(thd);}if (item) {item->reset_value_registration();if (item->assigned()) {item->assigned(false); // Prepare for re-execution of this unititem->reset();}}// We need to accumulate in the first join's send_records as long as// we support SQL_CALC_FOUND_ROWS, since LimitOffsetIterator will use it// for reporting rows skipped by OFFSET or LIMIT. When we get rid of// SQL_CALC_FOUND_ROWS, we can use a local variable here instead.ha_rows *send_records_ptr;if (fake_select_lex != nullptr) {// UNION with LIMIT: found_rows() applies to the outermost block.// LimitOffsetIterator will write skipped OFFSET rows into the// fake_select_lex's send_records, so use that.send_records_ptr = &fake_select_lex->join->send_records;} else if (is_simple()) {// Not an UNION: found_rows() applies to the join.// LimitOffsetIterator will write skipped OFFSET rows into the JOIN's// send_records, so use that.send_records_ptr = &first_select()->join->send_records;} else {// UNION, but without a fake_select_lex (may or may not have a// LIMIT): found_rows() applies to the outermost block. See// SELECT_LEX_UNIT::send_records for more information.send_records_ptr = &send_records;}*send_records_ptr = 0;thd->get_stmt_da()->reset_current_row_for_condition();{auto join_cleanup = create_scope_guard([this, thd] {for (SELECT_LEX *sl = first_select(); sl; sl = sl->next_select()) {JOIN *join = sl->join;join->join_free();thd->inc_examined_row_count(join->examined_rows);}if (fake_select_lex != nullptr) {thd->inc_examined_row_count(fake_select_lex->join->examined_rows);}});if (m_root_iterator->Init()) {return true;}PFSBatchMode pfs_batch_mode(m_root_iterator.get());for (;;) {int error = m_root_iterator->Read();DBUG_EXECUTE_IF("bug13822652_1", thd->killed = THD::KILL_QUERY;);if (error > 0 || thd->is_error()) // Fatal errorreturn true;else if (error < 0)break;else if (thd->killed) // Aborted by user{thd->send_kill_message();return true;}++*send_records_ptr;if (query_result->send_data(thd, *fields)) {return true;}thd->get_stmt_da()->inc_current_row_for_condition();}// NOTE: join_cleanup must be done before we send EOF, so that we get the// row counts right.}thd->current_found_rows = *send_records_ptr;return query_result->send_eof(thd);
}
函數過程淺析
1、is_simple()函數用來判斷一個查詢表達式是否有union或者多級order,如果沒有說明這個查詢語句簡單。就執行add_select_number
,TODO
2、運行ClearForExecution
函數。
在初始化root迭代器之前,把之前的執行迭代器的數據清除。
3、運行get_field_list()
,獲取查詢表達式的字段列表,并將所有字段都放到一個deque中,即mem_root_deque<Item*>
;對于查詢塊的并集,返回在準備期間生成的字段列表,對于單個查詢塊,盡可能返回字段列表。
4、運行start_execution
,準備執行查詢表達式或DML查詢。
5、接下來的一些操作與第二引擎有關,關于該引擎見https://www.h5w3.com/123061.html
總結一下就是:Secondary Engine實際上是MySQL sever上同時支持兩個存儲引擎,把一部分主引擎上的數據,在Secondary Engine上也保存一份,然后查詢的時候會根據優化器的的選擇決定在哪個引擎上處理數據。
我們這里先不看這一部分
6、如果該查詢用于子查詢,那么重新reset,指向子查詢。
7、接下來是對于復雜句以及簡單句的不同處理,從而給send_records_ptr
賦值。
函數對于這個情況的解釋如下:
We need to accumulate in the first join's send_records as long aswe support SQL_CALC_FOUND_ROWS, since LimitOffsetIterator will use itfor reporting rows skipped by OFFSET or LIMIT. When we get rid ofSQL_CALC_FOUND_ROWS, we can use a local variable here instead.
情況一:如果該查詢塊具有UNION或者多級的ORDER BY/LIMIT的話
UNION with LIMIT的話,found_rows()
用于最外層
LimitOffsetIterator
跳過偏移量行寫入send_records
情況二:如果是個簡單句的話
found_rows()
直接用到join上。
LimitOffsetIterator
跳過偏移量行寫入send_records
情況三:如果是UNION,但是沒有LIMIT
found_rows()
用于最外層。
8、重置計數器
9、接下來是一個對查詢塊遍歷,逐個釋放內存的操作,用以增加并發性并減少內存消耗。
10、初始化根迭代器
11、然后for循環,從根迭代器一直到引擎的handler,調用讀取數據。如果出錯就直接返回。
如果收到kill信號,也返回。
在循環中對send_records_ptr
進行累加。
行計數器++,指向下一行。
12、將send_records_ptr
賦值給該線程的current_found_rows