Skip to content

Commit 5f373b7

Browse files
author
Marcin Babij
committed
Bug #31095274 I_INNODB.INNODB_BUG16138582 TIMES OUT ON WINDOWS [Part 2/2]
This patch is using findings from the bug resolution that found the AIO threads have uneven amounts of completed OS IO requests ready to be served. This changes the way AIO::reserve_slot() works to balance the use of the segments. RB#24646
1 parent 4894770 commit 5f373b7

File tree

1 file changed

+58
-28
lines changed

1 file changed

+58
-28
lines changed

storage/innobase/os/os0file.cc

+58-28
Original file line numberDiff line numberDiff line change
@@ -736,6 +736,13 @@ class AIO {
736736
the ibuf segment */
737737
ulint m_n_reserved;
738738

739+
/** The index of last slot used to reserve. This is used to balance the
740+
incoming requests more evenly throughout the segments.
741+
This field is not guarded by any lock.
742+
This is only used as a heuristic and any value read or written to it is OK.
743+
It is atomic as it is accesses without any latches from multiple threads. */
744+
std::atomic_size_t m_last_slot_used;
745+
739746
#ifdef _WIN32
740747
typedef std::vector<HANDLE, ut_allocator<HANDLE>> Handles;
741748

@@ -1815,7 +1822,7 @@ static char *os_file_get_parent_dir(const char *path) {
18151822

18161823
if (last_slash - path < 0) {
18171824
/* Sanity check, it prevents gcc from trying to handle this case which
1818-
* results in warnings for some optimized builds */
1825+
results in warnings for some optimized builds */
18191826
return (nullptr);
18201827
}
18211828

@@ -5015,8 +5022,8 @@ static MY_ATTRIBUTE((warn_unused_result)) ssize_t
50155022
block = os_file_compress_page(type, buf, &n);
50165023
} else {
50175024
/* Since e_block is valid, encryption must have already happened. Since we
5018-
* do compression before encryption, we assert here that there is no
5019-
* encryption involved. */
5025+
do compression before encryption, we assert here that there is no
5026+
encryption involved. */
50205027
ut_ad(!type.is_encrypted());
50215028
}
50225029
} else {
@@ -6136,7 +6143,8 @@ dberr_t os_aio_handler(ulint segment, fil_node_t **m1, void **m2,
61366143
AIO::AIO(latch_id_t id, ulint n, ulint segments)
61376144
: m_slots(n),
61386145
m_n_segments(segments),
6139-
m_n_reserved()
6146+
m_n_reserved(),
6147+
m_last_slot_used(0)
61406148
#ifdef LINUX_NATIVE_AIO
61416149
,
61426150
m_aio_ctx(),
@@ -6710,11 +6718,6 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
67106718

67116719
const auto slots_per_seg = slots_per_segment();
67126720

6713-
/* We attempt to keep adjacent blocks in the same local
6714-
segment. This can help in merging IO requests when we are
6715-
doing simulated AIO */
6716-
ulint local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6)) % m_n_segments;
6717-
67186721
for (;;) {
67196722
acquire();
67206723

@@ -6734,26 +6737,53 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
67346737
os_event_wait(m_not_full);
67356738
}
67366739

6737-
ulint counter = 0;
6738-
Slot *slot = nullptr;
6739-
6740-
/* We start our search for an available slot from our preferred
6741-
local segment and do a full scan of the array. We are
6742-
guaranteed to find a slot in full scan. */
6743-
for (ulint i = local_seg * slots_per_seg; counter < m_slots.size();
6744-
++i, ++counter) {
6745-
i %= m_slots.size();
6746-
6747-
slot = at(i);
6748-
6749-
if (slot->is_reserved == false) {
6750-
break;
6740+
/* We will check first, next(first), next(next(first))... which should be a
6741+
permutation of values 0,..,m_slots.size()-1.*/
6742+
auto find_slot = [this](size_t first, auto next) {
6743+
size_t i = first;
6744+
for (size_t counter = 0; counter < m_slots.size(); ++counter) {
6745+
if (!at(i)->is_reserved) {
6746+
return i;
6747+
}
6748+
i = next(i);
67516749
}
6752-
}
6753-
6754-
/* We MUST always be able to get hold of a reserved slot. */
6755-
ut_a(counter < m_slots.size());
6756-
6750+
/* We know that there is a free slot, because m_n_reserved != m_slots.size()
6751+
was checked under the mutex protection, which we still hold. Additionally
6752+
the permutation generated by next() should visit all slots. If we checked
6753+
m_slots.size() elements of the sequence and not found a free slot, then it
6754+
was not a permutation, or there was no free slot.*/
6755+
ut_error;
6756+
};
6757+
size_t free_index;
6758+
if (srv_use_native_aio) {
6759+
/* We assume the m_slots.size() cannot be changed during runtime. */
6760+
ut_a(m_last_slot_used < m_slots.size());
6761+
/* We iterate through slots starting with the last used and then trying next
6762+
ones from consecutive segments to balance the incoming requests evenly
6763+
between the AIO threads. */
6764+
free_index = find_slot(m_last_slot_used, [&](size_t i) {
6765+
i += slots_per_seg;
6766+
if (i >= m_slots.size()) {
6767+
/* Start again from the first segment, this time trying next slot in
6768+
each segment. If we checked the last slot in segment, start with
6769+
first slot. */
6770+
i = (i + 1) % slots_per_seg;
6771+
}
6772+
return i;
6773+
});
6774+
m_last_slot_used = free_index;
6775+
} else {
6776+
/* We attempt to keep adjacent blocks in the same local
6777+
segment. This can help in merging IO requests when we are
6778+
doing simulated AIO */
6779+
const size_t local_seg =
6780+
(offset >> (UNIV_PAGE_SIZE_SHIFT + 6)) % m_n_segments;
6781+
/* We start our search for an available slot from our preferred
6782+
local segment and do a full scan of the array. */
6783+
free_index = find_slot(local_seg * slots_per_seg,
6784+
[&](size_t i) { return (i + 1) % m_slots.size(); });
6785+
}
6786+
Slot *const slot = at(free_index);
67576787
ut_a(slot->is_reserved == false);
67586788

67596789
++m_n_reserved;

0 commit comments

Comments
 (0)