@@ -736,6 +736,13 @@ class AIO {
736
736
the ibuf segment */
737
737
ulint m_n_reserved;
738
738
739
+ /* * The index of last slot used to reserve. This is used to balance the
740
+ incoming requests more evenly throughout the segments.
741
+ This field is not guarded by any lock.
742
+ This is only used as a heuristic and any value read or written to it is OK.
743
+ It is atomic as it is accesses without any latches from multiple threads. */
744
+ std::atomic_size_t m_last_slot_used;
745
+
739
746
#ifdef _WIN32
740
747
typedef std::vector<HANDLE, ut_allocator<HANDLE>> Handles;
741
748
@@ -1815,7 +1822,7 @@ static char *os_file_get_parent_dir(const char *path) {
1815
1822
1816
1823
if (last_slash - path < 0 ) {
1817
1824
/* Sanity check, it prevents gcc from trying to handle this case which
1818
- * results in warnings for some optimized builds */
1825
+ results in warnings for some optimized builds */
1819
1826
return (nullptr );
1820
1827
}
1821
1828
@@ -5015,8 +5022,8 @@ static MY_ATTRIBUTE((warn_unused_result)) ssize_t
5015
5022
block = os_file_compress_page (type, buf, &n);
5016
5023
} else {
5017
5024
/* Since e_block is valid, encryption must have already happened. Since we
5018
- * do compression before encryption, we assert here that there is no
5019
- * encryption involved. */
5025
+ do compression before encryption, we assert here that there is no
5026
+ encryption involved. */
5020
5027
ut_ad (!type.is_encrypted ());
5021
5028
}
5022
5029
} else {
@@ -6136,7 +6143,8 @@ dberr_t os_aio_handler(ulint segment, fil_node_t **m1, void **m2,
6136
6143
AIO::AIO (latch_id_t id, ulint n, ulint segments)
6137
6144
: m_slots(n),
6138
6145
m_n_segments(segments),
6139
- m_n_reserved()
6146
+ m_n_reserved(),
6147
+ m_last_slot_used(0 )
6140
6148
#ifdef LINUX_NATIVE_AIO
6141
6149
,
6142
6150
m_aio_ctx (),
@@ -6710,11 +6718,6 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
6710
6718
6711
6719
const auto slots_per_seg = slots_per_segment ();
6712
6720
6713
- /* We attempt to keep adjacent blocks in the same local
6714
- segment. This can help in merging IO requests when we are
6715
- doing simulated AIO */
6716
- ulint local_seg = (offset >> (UNIV_PAGE_SIZE_SHIFT + 6 )) % m_n_segments;
6717
-
6718
6721
for (;;) {
6719
6722
acquire ();
6720
6723
@@ -6734,26 +6737,53 @@ Slot *AIO::reserve_slot(IORequest &type, fil_node_t *m1, void *m2,
6734
6737
os_event_wait (m_not_full);
6735
6738
}
6736
6739
6737
- ulint counter = 0 ;
6738
- Slot *slot = nullptr ;
6739
-
6740
- /* We start our search for an available slot from our preferred
6741
- local segment and do a full scan of the array. We are
6742
- guaranteed to find a slot in full scan. */
6743
- for (ulint i = local_seg * slots_per_seg; counter < m_slots.size ();
6744
- ++i, ++counter) {
6745
- i %= m_slots.size ();
6746
-
6747
- slot = at (i);
6748
-
6749
- if (slot->is_reserved == false ) {
6750
- break ;
6740
+ /* We will check first, next(first), next(next(first))... which should be a
6741
+ permutation of values 0,..,m_slots.size()-1.*/
6742
+ auto find_slot = [this ](size_t first, auto next) {
6743
+ size_t i = first;
6744
+ for (size_t counter = 0 ; counter < m_slots.size (); ++counter) {
6745
+ if (!at (i)->is_reserved ) {
6746
+ return i;
6747
+ }
6748
+ i = next (i);
6751
6749
}
6752
- }
6753
-
6754
- /* We MUST always be able to get hold of a reserved slot. */
6755
- ut_a (counter < m_slots.size ());
6756
-
6750
+ /* We know that there is a free slot, because m_n_reserved != m_slots.size()
6751
+ was checked under the mutex protection, which we still hold. Additionally
6752
+ the permutation generated by next() should visit all slots. If we checked
6753
+ m_slots.size() elements of the sequence and not found a free slot, then it
6754
+ was not a permutation, or there was no free slot.*/
6755
+ ut_error;
6756
+ };
6757
+ size_t free_index;
6758
+ if (srv_use_native_aio) {
6759
+ /* We assume the m_slots.size() cannot be changed during runtime. */
6760
+ ut_a (m_last_slot_used < m_slots.size ());
6761
+ /* We iterate through slots starting with the last used and then trying next
6762
+ ones from consecutive segments to balance the incoming requests evenly
6763
+ between the AIO threads. */
6764
+ free_index = find_slot (m_last_slot_used, [&](size_t i) {
6765
+ i += slots_per_seg;
6766
+ if (i >= m_slots.size ()) {
6767
+ /* Start again from the first segment, this time trying next slot in
6768
+ each segment. If we checked the last slot in segment, start with
6769
+ first slot. */
6770
+ i = (i + 1 ) % slots_per_seg;
6771
+ }
6772
+ return i;
6773
+ });
6774
+ m_last_slot_used = free_index;
6775
+ } else {
6776
+ /* We attempt to keep adjacent blocks in the same local
6777
+ segment. This can help in merging IO requests when we are
6778
+ doing simulated AIO */
6779
+ const size_t local_seg =
6780
+ (offset >> (UNIV_PAGE_SIZE_SHIFT + 6 )) % m_n_segments;
6781
+ /* We start our search for an available slot from our preferred
6782
+ local segment and do a full scan of the array. */
6783
+ free_index = find_slot (local_seg * slots_per_seg,
6784
+ [&](size_t i) { return (i + 1 ) % m_slots.size (); });
6785
+ }
6786
+ Slot *const slot = at (free_index);
6757
6787
ut_a (slot->is_reserved == false );
6758
6788
6759
6789
++m_n_reserved;
0 commit comments