@@ -736,6 +736,13 @@ class AIO {
736
736
the ibuf segment */
737
737
ulint m_n_reserved;
738
738
739
+ /* * The index of last slot used to reserve. This is used to balance the
740
+ incoming requests more evenly throughout the segments.
741
+ This field is not guarded by any lock.
742
+ This is only used as a heuristic and any value read or written to it is OK.
743
+ It is atomic as it is accesses without any latches from multiple threads. */
744
+ std::atomic_size_t m_last_slot_used;
745
+
739
746
#ifdef _WIN32
740
747
typedef std::vector<HANDLE, ut_allocator<HANDLE>> Handles;
741
748
@@ -1816,7 +1823,7 @@ static char *os_file_get_parent_dir(const char *path) {
1816
1823
1817
1824
if (last_slash - path < 0 ) {
1818
1825
/* Sanity check, it prevents gcc from trying to handle this case which
1819
- * results in warnings for some optimized builds */
1826
+ results in warnings for some optimized builds */
1820
1827
return (nullptr );
1821
1828
}
1822
1829
@@ -5016,8 +5023,8 @@ static MY_ATTRIBUTE((warn_unused_result)) ssize_t
5016
5023
block = os_file_compress_page (type, buf, &n);
5017
5024
} else {
5018
5025
/* Since e_block is valid, encryption must have already happened. Since we
5019
- * do compression before encryption, we assert here that there is no
5020
- * encryption involved. */
5026
+ do compression before encryption, we assert here that there is no
5027
+ encryption involved. */
5021
5028
ut_ad (!type.is_encrypted ());
5022
5029
}
5023
5030
} else {
@@ -6137,7 +6144,8 @@ dberr_t os_aio_handler(ulint segment, fil_node_t **m1, void **m2,
6137
6144
AIO::AIO (latch_id_t id, ulint n, ulint segments)
6138
6145
: m_slots(n),
6139
6146
m_n_segments(segments),
6140
- m_n_reserved()
6147
+ m_n_reserved(),
6148
+ m_last_slot_used(0 )
6141
6149
#ifdef LINUX_NATIVE_AIO
6142
6150
,
6143
6151
m_aio_ctx (),
@@ -6711,11 +6719,6 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
6711
6719
6712
6720
const auto slots_per_seg = slots_per_segment ();
6713
6721
6714
- /* We attempt to keep adjacent blocks in the same local
6715
- segment. This can help in merging IO requests when we are
6716
- doing simulated AIO */
6717
- ulint local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6 )) % m_n_segments;
6718
-
6719
6722
for (;;) {
6720
6723
acquire ();
6721
6724
@@ -6735,26 +6738,53 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
6735
6738
os_event_wait (m_not_full);
6736
6739
}
6737
6740
6738
- ulint counter = 0 ;
6739
- Slot *slot = nullptr ;
6740
-
6741
- /* We start our search for an available slot from our preferred
6742
- local segment and do a full scan of the array. We are
6743
- guaranteed to find a slot in full scan. */
6744
- for (ulint i = local_seg * slots_per_seg; counter < m_slots.size ();
6745
- ++i, ++counter) {
6746
- i %= m_slots.size ();
6747
-
6748
- slot = at (i);
6749
-
6750
- if (slot->is_reserved == false ) {
6751
- break ;
6741
+ /* We will check first, next(first), next(next(first))... which should be a
6742
+ permutation of values 0,..,m_slots.size()-1.*/
6743
+ auto find_slot = [this ](size_t first, auto next) {
6744
+ size_t i = first;
6745
+ for (size_t counter = 0 ; counter < m_slots.size (); ++counter) {
6746
+ if (!at (i)->is_reserved ) {
6747
+ return i;
6748
+ }
6749
+ i = next (i);
6752
6750
}
6753
- }
6754
-
6755
- /* We MUST always be able to get hold of a reserved slot. */
6756
- ut_a (counter < m_slots.size ());
6757
-
6751
+ /* We know that there is a free slot, because m_n_reserved != m_slots.size()
6752
+ was checked under the mutex protection, which we still hold. Additionally
6753
+ the permutation generated by next() should visit all slots. If we checked
6754
+ m_slots.size() elements of the sequence and not found a free slot, then it
6755
+ was not a permutation, or there was no free slot.*/
6756
+ ut_error;
6757
+ };
6758
+ size_t free_index;
6759
+ if (srv_use_native_aio) {
6760
+ /* We assume the m_slots.size() cannot be changed during runtime. */
6761
+ ut_a (m_last_slot_used < m_slots.size ());
6762
+ /* We iterate through slots starting with the last used and then trying next
6763
+ ones from consecutive segments to balance the incoming requests evenly
6764
+ between the AIO threads. */
6765
+ free_index = find_slot (m_last_slot_used, [&](size_t i) {
6766
+ i += slots_per_seg;
6767
+ if (i >= m_slots.size ()) {
6768
+ /* Start again from the first segment, this time trying next slot in
6769
+ each segment. If we checked the last slot in segment, start with
6770
+ first slot. */
6771
+ i = (i + 1 ) % slots_per_seg;
6772
+ }
6773
+ return i;
6774
+ });
6775
+ m_last_slot_used = free_index;
6776
+ } else {
6777
+ /* We attempt to keep adjacent blocks in the same local
6778
+ segment. This can help in merging IO requests when we are
6779
+ doing simulated AIO */
6780
+ const size_t local_seg =
6781
+ (offset >> (UNIV_PAGE_SIZE_SHIFT + 6 )) % m_n_segments;
6782
+ /* We start our search for an available slot from our preferred
6783
+ local segment and do a full scan of the array. */
6784
+ free_index = find_slot (local_seg * slots_per_seg,
6785
+ [&](size_t i) { return (i + 1 ) % m_slots.size (); });
6786
+ }
6787
+ Slot *const slot = at (free_index);
6758
6788
ut_a (slot->is_reserved == false );
6759
6789
6760
6790
++m_n_reserved;
0 commit comments