Skip to content

Commit a82ca05

Browse files
committed
Merge branch 'master' into compression
2 parents 243b3df + 575e1b2 commit a82ca05

20 files changed

+286
-274
lines changed

.github/workflows/test.yml

+8-8
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,7 @@ jobs:
1515
runs-on: ubuntu-latest
1616
steps:
1717
- uses: actions/checkout@v4
18-
- uses: dominikh/[email protected]
19-
with:
20-
version: "2023.1.6"
18+
- uses: dominikh/[email protected]
2119

2220
list:
2321
runs-on: ubuntu-latest
@@ -31,20 +29,22 @@ jobs:
3129
import os
3230
go = [
3331
# Keep the most recent production release at the top
34-
'1.22',
32+
'1.23',
3533
# Older production releases
34+
'1.22',
3635
'1.21',
37-
'1.20',
3836
]
3937
mysql = [
38+
'9.0',
39+
'8.4', # LTS
4040
'8.0',
41-
'8.3',
4241
'5.7',
43-
'mariadb-11.3',
42+
'mariadb-11.4', # LTS
43+
'mariadb-11.2',
4444
'mariadb-11.1',
4545
'mariadb-10.11', # LTS
4646
'mariadb-10.6', # LTS
47-
'mariadb-10.5',
47+
'mariadb-10.5', # LTS
4848
]
4949
5050
includes = []

AUTHORS

+3
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ Daniel Montoya <dsmontoyam at gmail.com>
3434
Daniel Nichter <nil at codenode.com>
3535
Daniël van Eeden <git at myname.nl>
3636
Dave Protasowski <dprotaso at gmail.com>
37+
Dirkjan Bussink <d.bussink at gmail.com>
3738
DisposaBoy <disposaboy at dby.me>
3839
Egor Smolyakov <egorsmkv at gmail.com>
3940
Erwan Martin <hello at erwan.io>
@@ -51,6 +52,7 @@ ICHINOSE Shogo <shogo82148 at gmail.com>
5152
Ilia Cimpoes <ichimpoesh at gmail.com>
5253
INADA Naoki <songofacandy at gmail.com>
5354
Jacek Szwec <szwec.jacek at gmail.com>
55+
Jakub Adamus <kratky at zobak.cz>
5456
James Harr <james.harr at gmail.com>
5557
Janek Vedock <janekvedock at comcast.net>
5658
Jason Ng <oblitorum at gmail.com>
@@ -82,6 +84,7 @@ Lunny Xiao <xiaolunwen at gmail.com>
8284
Luke Scott <luke at webconnex.com>
8385
Maciej Zimnoch <maciej.zimnoch at codilime.com>
8486
Michael Woolnough <michael.woolnough at gmail.com>
87+
Nao Yokotsuka <yokotukanao at gmail.com>
8588
Nathanial Murphy <nathanial.murphy at gmail.com>
8689
Nicola Peduzzi <thenikso at gmail.com>
8790
Oliver Bone <owbone at github.com>

README.md

+4-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
4242

4343
## Requirements
4444

45-
* Go 1.20 or higher. We aim to support the 3 latest versions of Go.
45+
* Go 1.21 or higher. We aim to support the 3 latest versions of Go.
4646
* MySQL (5.7+) and MariaDB (10.5+) are supported.
4747
* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
4848
* Do not ask questions about TiDB in our issue tracker or forum.
@@ -539,6 +539,9 @@ This driver supports the [`ColumnType` interface](https://golang.org/pkg/databas
539539
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
540540
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
541541

542+
> [!IMPORTANT]
543+
> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver.
544+
542545

543546
### `LOAD DATA LOCAL INFILE` support
544547
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):

buffer.go

+45-70
Original file line numberDiff line numberDiff line change
@@ -22,42 +22,30 @@ const maxCachedBufSize = 256 * 1024
2222
// In other words, we can't write and read simultaneously on the same connection.
2323
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
2424
// Also highly optimized for this particular use case.
25-
// This buffer is backed by two byte slices in a double-buffering scheme
2625
type buffer struct {
27-
buf []byte // buf is a byte buffer who's length and capacity are equal.
28-
nc net.Conn
29-
idx int
30-
length int
31-
timeout time.Duration
32-
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
33-
flipcnt uint // flipccnt is the current buffer counter for double-buffering
26+
buf []byte // read buffer.
27+
cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
28+
nc net.Conn
29+
timeout time.Duration
3430
}
3531

3632
// newBuffer allocates and returns a new buffer.
3733
func newBuffer(nc net.Conn) buffer {
38-
fg := make([]byte, defaultBufSize)
3934
return buffer{
40-
buf: fg,
41-
nc: nc,
42-
dbuf: [2][]byte{fg, nil},
35+
cachedBuf: make([]byte, defaultBufSize),
36+
nc: nc,
4337
}
4438
}
4539

46-
// flip replaces the active buffer with the background buffer
47-
// this is a delayed flip that simply increases the buffer counter;
48-
// the actual flip will be performed the next time we call `buffer.fill`
49-
func (b *buffer) flip() {
50-
b.flipcnt += 1
40+
// busy returns true if the read buffer is not empty.
41+
func (b *buffer) busy() bool {
42+
return len(b.buf) > 0
5143
}
5244

53-
// fill reads into the buffer until at least _need_ bytes are in it
45+
// fill reads into the read buffer until at least _need_ bytes are in it.
5446
func (b *buffer) fill(need int) error {
55-
n := b.length
56-
// fill data into its double-buffering target: if we've called
57-
// flip on this buffer, we'll be copying to the background buffer,
58-
// and then filling it with network data; otherwise we'll just move
59-
// the contents of the current buffer to the front before filling it
60-
dest := b.dbuf[b.flipcnt&1]
47+
// we'll move the contents of the current buffer to dest before filling it.
48+
dest := b.cachedBuf
6149

6250
// grow buffer if necessary to fit the whole packet.
6351
if need > len(dest) {
@@ -67,18 +55,13 @@ func (b *buffer) fill(need int) error {
6755
// if the allocated buffer is not too large, move it to backing storage
6856
// to prevent extra allocations on applications that perform large reads
6957
if len(dest) <= maxCachedBufSize {
70-
b.dbuf[b.flipcnt&1] = dest
58+
b.cachedBuf = dest
7159
}
7260
}
7361

74-
// if we're filling the fg buffer, move the existing data to the start of it.
75-
// if we're filling the bg buffer, copy over the data
76-
if n > 0 {
77-
copy(dest[:n], b.buf[b.idx:])
78-
}
79-
80-
b.buf = dest
81-
b.idx = 0
62+
// move the existing data to the start of the buffer.
63+
n := len(b.buf)
64+
copy(dest[:n], b.buf)
8265

8366
for {
8467
if b.timeout > 0 {
@@ -87,63 +70,58 @@ func (b *buffer) fill(need int) error {
8770
}
8871
}
8972

90-
nn, err := b.nc.Read(b.buf[n:])
73+
nn, err := b.nc.Read(dest[n:])
9174
n += nn
9275

93-
switch err {
94-
case nil:
95-
if n < need {
96-
continue
97-
}
98-
b.length = n
99-
return nil
76+
if err == nil && n < need {
77+
continue
78+
}
10079

101-
case io.EOF:
102-
if n >= need {
103-
b.length = n
104-
return nil
105-
}
106-
return io.ErrUnexpectedEOF
80+
b.buf = dest[:n]
10781

108-
default:
109-
return err
82+
if err == io.EOF {
83+
if n < need {
84+
err = io.ErrUnexpectedEOF
85+
} else {
86+
err = nil
87+
}
11088
}
89+
return err
11190
}
11291
}
11392

11493
// returns next N bytes from buffer.
11594
// The returned slice is only guaranteed to be valid until the next read
11695
func (b *buffer) readNext(need int) ([]byte, error) {
117-
if b.length < need {
96+
if len(b.buf) < need {
11897
// refill
11998
if err := b.fill(need); err != nil {
12099
return nil, err
121100
}
122101
}
123102

124-
offset := b.idx
125-
b.idx += need
126-
b.length -= need
127-
return b.buf[offset:b.idx], nil
103+
data := b.buf[:need]
104+
b.buf = b.buf[need:]
105+
return data, nil
128106
}
129107

130108
// takeBuffer returns a buffer with the requested size.
131109
// If possible, a slice from the existing buffer is returned.
132110
// Otherwise a bigger buffer is made.
133111
// Only one buffer (total) can be used at a time.
134112
func (b *buffer) takeBuffer(length int) ([]byte, error) {
135-
if b.length > 0 {
113+
if b.busy() {
136114
return nil, ErrBusyBuffer
137115
}
138116

139117
// test (cheap) general case first
140-
if length <= cap(b.buf) {
141-
return b.buf[:length], nil
118+
if length <= len(b.cachedBuf) {
119+
return b.cachedBuf[:length], nil
142120
}
143121

144-
if length < maxPacketSize {
145-
b.buf = make([]byte, length)
146-
return b.buf, nil
122+
if length < maxCachedBufSize {
123+
b.cachedBuf = make([]byte, length)
124+
return b.cachedBuf, nil
147125
}
148126

149127
// buffer is larger than we want to store.
@@ -154,29 +132,26 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
154132
// known to be smaller than defaultBufSize.
155133
// Only one buffer (total) can be used at a time.
156134
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
157-
if b.length > 0 {
135+
if b.busy() {
158136
return nil, ErrBusyBuffer
159137
}
160-
return b.buf[:length], nil
138+
return b.cachedBuf[:length], nil
161139
}
162140

163141
// takeCompleteBuffer returns the complete existing buffer.
164142
// This can be used if the necessary buffer size is unknown.
165143
// cap and len of the returned buffer will be equal.
166144
// Only one buffer (total) can be used at a time.
167145
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
168-
if b.length > 0 {
146+
if b.busy() {
169147
return nil, ErrBusyBuffer
170148
}
171-
return b.buf, nil
149+
return b.cachedBuf, nil
172150
}
173151

174152
// store stores buf, an updated buffer, if its suitable to do so.
175-
func (b *buffer) store(buf []byte) error {
176-
if b.length > 0 {
177-
return ErrBusyBuffer
178-
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
179-
b.buf = buf[:cap(buf)]
153+
func (b *buffer) store(buf []byte) {
154+
if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
155+
b.cachedBuf = buf[:cap(buf)]
180156
}
181-
return nil
182157
}

collations.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
package mysql
1010

11-
const defaultCollation = "utf8mb4_general_ci"
11+
const defaultCollationID = 45 // utf8mb4_general_ci
1212
const binaryCollationID = 63
1313

1414
// A list of available collations mapped to the internal ID.

0 commit comments

Comments
 (0)