Skip to content

Commit 9771dfc

Browse files
author
Marcin Babij
committed
Merge branch 'mysql-8.0' into mysql-trunk
2 parents 0c015c2 + 5f373b7 commit 9771dfc

File tree

1 file changed

+58
-28
lines changed

1 file changed

+58
-28
lines changed

Diff for: storage/innobase/os/os0file.cc

+58-28
Original file line numberDiff line numberDiff line change
@@ -736,6 +736,13 @@ class AIO {
736736
the ibuf segment */
737737
ulint m_n_reserved;
738738

739+
/** The index of last slot used to reserve. This is used to balance the
740+
incoming requests more evenly throughout the segments.
741+
This field is not guarded by any lock.
742+
This is only used as a heuristic and any value read or written to it is OK.
743+
It is atomic as it is accesses without any latches from multiple threads. */
744+
std::atomic_size_t m_last_slot_used;
745+
739746
#ifdef _WIN32
740747
typedef std::vector<HANDLE, ut_allocator<HANDLE>> Handles;
741748

@@ -1816,7 +1823,7 @@ static char *os_file_get_parent_dir(const char *path) {
18161823

18171824
if (last_slash - path < 0) {
18181825
/* Sanity check, it prevents gcc from trying to handle this case which
1819-
* results in warnings for some optimized builds */
1826+
results in warnings for some optimized builds */
18201827
return (nullptr);
18211828
}
18221829

@@ -5016,8 +5023,8 @@ static MY_ATTRIBUTE((warn_unused_result)) ssize_t
50165023
block = os_file_compress_page(type, buf, &n);
50175024
} else {
50185025
/* Since e_block is valid, encryption must have already happened. Since we
5019-
* do compression before encryption, we assert here that there is no
5020-
* encryption involved. */
5026+
do compression before encryption, we assert here that there is no
5027+
encryption involved. */
50215028
ut_ad(!type.is_encrypted());
50225029
}
50235030
} else {
@@ -6137,7 +6144,8 @@ dberr_t os_aio_handler(ulint segment, fil_node_t **m1, void **m2,
61376144
AIO::AIO(latch_id_t id, ulint n, ulint segments)
61386145
: m_slots(n),
61396146
m_n_segments(segments),
6140-
m_n_reserved()
6147+
m_n_reserved(),
6148+
m_last_slot_used(0)
61416149
#ifdef LINUX_NATIVE_AIO
61426150
,
61436151
m_aio_ctx(),
@@ -6711,11 +6719,6 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
67116719

67126720
const auto slots_per_seg = slots_per_segment();
67136721

6714-
/* We attempt to keep adjacent blocks in the same local
6715-
segment. This can help in merging IO requests when we are
6716-
doing simulated AIO */
6717-
ulint local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6)) % m_n_segments;
6718-
67196722
for (;;) {
67206723
acquire();
67216724

@@ -6735,26 +6738,53 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
67356738
os_event_wait(m_not_full);
67366739
}
67376740

6738-
ulint counter = 0;
6739-
Slot *slot = nullptr;
6740-
6741-
/* We start our search for an available slot from our preferred
6742-
local segment and do a full scan of the array. We are
6743-
guaranteed to find a slot in full scan. */
6744-
for (ulint i = local_seg * slots_per_seg; counter < m_slots.size();
6745-
++i, ++counter) {
6746-
i %= m_slots.size();
6747-
6748-
slot = at(i);
6749-
6750-
if (slot->is_reserved == false) {
6751-
break;
6741+
/* We will check first, next(first), next(next(first))... which should be a
6742+
permutation of values 0,..,m_slots.size()-1.*/
6743+
auto find_slot = [this](size_t first, auto next) {
6744+
size_t i = first;
6745+
for (size_t counter = 0; counter < m_slots.size(); ++counter) {
6746+
if (!at(i)->is_reserved) {
6747+
return i;
6748+
}
6749+
i = next(i);
67526750
}
6753-
}
6754-
6755-
/* We MUST always be able to get hold of a reserved slot. */
6756-
ut_a(counter < m_slots.size());
6757-
6751+
/* We know that there is a free slot, because m_n_reserved != m_slots.size()
6752+
was checked under the mutex protection, which we still hold. Additionally
6753+
the permutation generated by next() should visit all slots. If we checked
6754+
m_slots.size() elements of the sequence and not found a free slot, then it
6755+
was not a permutation, or there was no free slot.*/
6756+
ut_error;
6757+
};
6758+
size_t free_index;
6759+
if (srv_use_native_aio) {
6760+
/* We assume the m_slots.size() cannot be changed during runtime. */
6761+
ut_a(m_last_slot_used < m_slots.size());
6762+
/* We iterate through slots starting with the last used and then trying next
6763+
ones from consecutive segments to balance the incoming requests evenly
6764+
between the AIO threads. */
6765+
free_index = find_slot(m_last_slot_used, [&](size_t i) {
6766+
i += slots_per_seg;
6767+
if (i >= m_slots.size()) {
6768+
/* Start again from the first segment, this time trying next slot in
6769+
each segment. If we checked the last slot in segment, start with
6770+
first slot. */
6771+
i = (i + 1) % slots_per_seg;
6772+
}
6773+
return i;
6774+
});
6775+
m_last_slot_used = free_index;
6776+
} else {
6777+
/* We attempt to keep adjacent blocks in the same local
6778+
segment. This can help in merging IO requests when we are
6779+
doing simulated AIO */
6780+
const size_t local_seg =
6781+
(offset >> (UNIV_PAGE_SIZE_SHIFT + 6)) % m_n_segments;
6782+
/* We start our search for an available slot from our preferred
6783+
local segment and do a full scan of the array. */
6784+
free_index = find_slot(local_seg * slots_per_seg,
6785+
[&](size_t i) { return (i + 1) % m_slots.size(); });
6786+
}
6787+
Slot *const slot = at(free_index);
67586788
ut_a(slot->is_reserved == false);
67596789

67606790
++m_n_reserved;

0 commit comments

Comments
 (0)