Skip to content

Commit b9587d1

Browse files
author
Arthur Gautier
committed
Implement HeartbeatLogEvent
I ended up debbugging a slow startup with pymysqlreplication, the stream "stucked" for a few seconds before seeing first events coming in. For that purpose I ended up implementing HeartbeatLogEvent and documenting observed mysql behavior. Signed-off-by: Arthur Gautier <[email protected]>
1 parent bd2a640 commit b9587d1

File tree

3 files changed

+42
-2
lines changed

3 files changed

+42
-2
lines changed

pymysqlreplication/binlogstream.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
QueryEvent, RotateEvent, FormatDescriptionEvent,
1515
XidEvent, GtidEvent, StopEvent,
1616
BeginLoadQueryEvent, ExecuteLoadQueryEvent,
17-
NotImplementedEvent)
17+
HeartbeatLogEvent, NotImplementedEvent)
1818
from .row_event import (
1919
UpdateRowsEvent, WriteRowsEvent, DeleteRowsEvent, TableMapEvent)
2020

@@ -450,7 +450,9 @@ def _allowed_event_list(self, only_events, ignored_events,
450450
WriteRowsEvent,
451451
DeleteRowsEvent,
452452
TableMapEvent,
453-
NotImplementedEvent))
453+
HeartbeatLogEvent,
454+
NotImplementedEvent,
455+
))
454456
if ignored_events is not None:
455457
for e in ignored_events:
456458
events.remove(e)

pymysqlreplication/event.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,43 @@ def _dump(self):
119119
print("Transaction ID: %d" % (self.xid))
120120

121121

122+
class HeartbeatLogEvent(BinLogEvent):
123+
"""A Heartbeat event
124+
Heartbeats are sent by the master only if there are no unsent events in the
125+
binary log file for a period longer than the interval defined by
126+
MASTER_HEARTBEAT_PERIOD connection setting.
127+
128+
A mysql server will also play those to the slave for each skipped
129+
events in the log. I (baloo) believe the intention is to make the slave
130+
bump its position so that if a disconnection occurs, the slave only
131+
reconnects from the last skipped position (see Binlog_sender::send_events
132+
in sql/rpl_binlog_sender.cc). That makes 106 bytes of data for skipped
133+
event in the binlog. *this is also the case with GTID replication*. To
134+
mitigate such behavior, you are expected to keep the binlog small (see
135+
max_binlog_size, defaults to 1G).
136+
In such a case, the timestamp will be 0 (as in 1970-01-01T00:00:00)
137+
to notify the slave of a skipped event.
138+
139+
140+
Attributes:
141+
ident: Name of the current binlog
142+
143+
Note:
144+
behavior changed after ced9292eb87b061fb7b8ac2190f01e9dec18f3d7
145+
and the timestamp is now always 0.
146+
"""
147+
148+
def __init__(self, from_packet, event_size, table_map, ctl_connection, **kwargs):
149+
super(HeartbeatLogEvent, self).__init__(from_packet, event_size,
150+
table_map, ctl_connection,
151+
**kwargs)
152+
self.ident = self.packet.read(event_size).decode()
153+
154+
def _dump(self):
155+
super(HeartbeatLogEvent, self)._dump()
156+
print("Current binlog: %s" % (self.ident))
157+
158+
122159
class QueryEvent(BinLogEvent):
123160
'''This evenement is trigger when a query is run of the database.
124161
Only replicated queries are logged.'''

pymysqlreplication/packet.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ class BinLogPacketWrapper(object):
7070
constants.STOP_EVENT: event.StopEvent,
7171
constants.BEGIN_LOAD_QUERY_EVENT: event.BeginLoadQueryEvent,
7272
constants.EXECUTE_LOAD_QUERY_EVENT: event.ExecuteLoadQueryEvent,
73+
constants.HEARTBEAT_LOG_EVENT: event.HeartbeatLogEvent,
7374
# row_event
7475
constants.UPDATE_ROWS_EVENT_V1: row_event.UpdateRowsEvent,
7576
constants.WRITE_ROWS_EVENT_V1: row_event.WriteRowsEvent,

0 commit comments

Comments
 (0)