Skip to content

Commit 9a48688

Browse files
authored
Merge branch 'go-sql-driver:master' into fix_bigint_unsigned_scan_type
2 parents 6368cf3 + 7403860 commit 9a48688

19 files changed

+688
-316
lines changed

Diff for: .github/workflows/test.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ jobs:
8383
my-cnf: |
8484
innodb_log_file_size=256MB
8585
innodb_buffer_pool_size=512MB
86-
max_allowed_packet=16MB
86+
max_allowed_packet=48MB
8787
; TestConcurrent fails if max_connections is too large
8888
max_connections=50
8989
local_infile=1

Diff for: AUTHORS

+4
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,9 @@ Andrew Reid <andrew.reid at tixtrack.com>
2020
Animesh Ray <mail.rayanimesh at gmail.com>
2121
Arne Hormann <arnehormann at gmail.com>
2222
Ariel Mashraki <ariel at mashraki.co.il>
23+
Artur Melanchyk <[email protected]>
2324
Asta Xie <xiemengjun at gmail.com>
25+
B Lamarche <blam413 at gmail.com>
2426
Brian Hendriks <brian at dolthub.com>
2527
Bulat Gaifullin <gaifullinbf at gmail.com>
2628
Caine Jette <jette at alum.mit.edu>
@@ -62,6 +64,7 @@ Jennifer Purevsuren <jennifer at dolthub.com>
6264
Jerome Meyer <jxmeyer at gmail.com>
6365
Jiajia Zhong <zhong2plus at gmail.com>
6466
Jian Zhen <zhenjl at gmail.com>
67+
Joe Mann <contact at joemann.co.uk>
6568
Joshua Prunier <joshua.prunier at gmail.com>
6669
Julien Lefevre <julien.lefevr at gmail.com>
6770
Julien Schmidt <go-sql-driver at julienschmidt.com>
@@ -92,6 +95,7 @@ Paul Bonser <misterpib at gmail.com>
9295
Paulius Lozys <pauliuslozys at gmail.com>
9396
Peter Schultz <peter.schultz at classmarkets.com>
9497
Phil Porada <philporada at gmail.com>
98+
Minh Quang <minhquang4334 at gmail.com>
9599
Rebecca Chin <rchin at pivotal.io>
96100
Reed Allman <rdallman10 at gmail.com>
97101
Richard Wilkes <wilkes at me.com>

Diff for: README.md

+11
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
3838
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
3939
* Optional `time.Time` parsing
4040
* Optional placeholder interpolation
41+
* Supports zlib compression.
4142

4243
## Requirements
4344

@@ -267,6 +268,16 @@ SELECT u.id FROM users as u
267268

268269
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
269270

271+
##### `compress`
272+
273+
```
274+
Type: bool
275+
Valid Values: true, false
276+
Default: false
277+
```
278+
279+
Toggles zlib compression. false by default.
280+
270281
##### `interpolateParams`
271282

272283
```

Diff for: benchmark_test.go

+20-8
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,13 @@ func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt {
4646
return stmt
4747
}
4848

49-
func initDB(b *testing.B, queries ...string) *sql.DB {
49+
func initDB(b *testing.B, useCompression bool, queries ...string) *sql.DB {
5050
tb := (*TB)(b)
51-
db := tb.checkDB(sql.Open(driverNameTest, dsn))
51+
comprStr := ""
52+
if useCompression {
53+
comprStr = "&compress=1"
54+
}
55+
db := tb.checkDB(sql.Open(driverNameTest, dsn+comprStr))
5256
for _, query := range queries {
5357
if _, err := db.Exec(query); err != nil {
5458
b.Fatalf("error on %q: %v", query, err)
@@ -60,10 +64,18 @@ func initDB(b *testing.B, queries ...string) *sql.DB {
6064
const concurrencyLevel = 10
6165

6266
func BenchmarkQuery(b *testing.B) {
67+
benchmarkQueryHelper(b, false)
68+
}
69+
70+
func BenchmarkQueryCompression(b *testing.B) {
71+
benchmarkQueryHelper(b, true)
72+
}
73+
74+
func benchmarkQueryHelper(b *testing.B, compr bool) {
6375
tb := (*TB)(b)
6476
b.StopTimer()
6577
b.ReportAllocs()
66-
db := initDB(b,
78+
db := initDB(b, compr,
6779
"DROP TABLE IF EXISTS foo",
6880
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
6981
`INSERT INTO foo VALUES (1, "one")`,
@@ -222,7 +234,7 @@ func BenchmarkInterpolation(b *testing.B) {
222234
},
223235
maxAllowedPacket: maxPacketSize,
224236
maxWriteSize: maxPacketSize - 1,
225-
buf: newBuffer(nil),
237+
buf: newBuffer(),
226238
}
227239

228240
args := []driver.Value{
@@ -269,7 +281,7 @@ func benchmarkQueryContext(b *testing.B, db *sql.DB, p int) {
269281
}
270282

271283
func BenchmarkQueryContext(b *testing.B) {
272-
db := initDB(b,
284+
db := initDB(b, false,
273285
"DROP TABLE IF EXISTS foo",
274286
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
275287
`INSERT INTO foo VALUES (1, "one")`,
@@ -305,7 +317,7 @@ func benchmarkExecContext(b *testing.B, db *sql.DB, p int) {
305317
}
306318

307319
func BenchmarkExecContext(b *testing.B) {
308-
db := initDB(b,
320+
db := initDB(b, false,
309321
"DROP TABLE IF EXISTS foo",
310322
"CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))",
311323
`INSERT INTO foo VALUES (1, "one")`,
@@ -323,7 +335,7 @@ func BenchmarkExecContext(b *testing.B) {
323335
// "size=" means size of each blobs.
324336
func BenchmarkQueryRawBytes(b *testing.B) {
325337
var sizes []int = []int{100, 1000, 2000, 4000, 8000, 12000, 16000, 32000, 64000, 256000}
326-
db := initDB(b,
338+
db := initDB(b, false,
327339
"DROP TABLE IF EXISTS bench_rawbytes",
328340
"CREATE TABLE bench_rawbytes (id INT PRIMARY KEY, val LONGBLOB)",
329341
)
@@ -376,7 +388,7 @@ func BenchmarkQueryRawBytes(b *testing.B) {
376388
// BenchmarkReceiveMassiveRows measures performance of receiving large number of rows.
377389
func BenchmarkReceiveMassiveRows(b *testing.B) {
378390
// Setup -- prepare 10000 rows.
379-
db := initDB(b,
391+
db := initDB(b, false,
380392
"DROP TABLE IF EXISTS foo",
381393
"CREATE TABLE foo (id INT PRIMARY KEY, val TEXT)")
382394
defer db.Close()

Diff for: buffer.go

+50-81
Original file line numberDiff line numberDiff line change
@@ -10,54 +10,42 @@ package mysql
1010

1111
import (
1212
"io"
13-
"net"
14-
"time"
1513
)
1614

1715
const defaultBufSize = 4096
1816
const maxCachedBufSize = 256 * 1024
1917

18+
// readerFunc is a function that compatible with io.Reader.
19+
// We use this function type instead of io.Reader because we want to
20+
// just pass mc.readWithTimeout.
21+
type readerFunc func([]byte) (int, error)
22+
2023
// A buffer which is used for both reading and writing.
2124
// This is possible since communication on each connection is synchronous.
2225
// In other words, we can't write and read simultaneously on the same connection.
2326
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
2427
// Also highly optimized for this particular use case.
25-
// This buffer is backed by two byte slices in a double-buffering scheme
2628
type buffer struct {
27-
buf []byte // buf is a byte buffer who's length and capacity are equal.
28-
nc net.Conn
29-
idx int
30-
length int
31-
timeout time.Duration
32-
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
33-
flipcnt uint // flipccnt is the current buffer counter for double-buffering
29+
buf []byte // read buffer.
30+
cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
3431
}
3532

3633
// newBuffer allocates and returns a new buffer.
37-
func newBuffer(nc net.Conn) buffer {
38-
fg := make([]byte, defaultBufSize)
34+
func newBuffer() buffer {
3935
return buffer{
40-
buf: fg,
41-
nc: nc,
42-
dbuf: [2][]byte{fg, nil},
36+
cachedBuf: make([]byte, defaultBufSize),
4337
}
4438
}
4539

46-
// flip replaces the active buffer with the background buffer
47-
// this is a delayed flip that simply increases the buffer counter;
48-
// the actual flip will be performed the next time we call `buffer.fill`
49-
func (b *buffer) flip() {
50-
b.flipcnt += 1
40+
// busy returns true if the read buffer is not empty.
41+
func (b *buffer) busy() bool {
42+
return len(b.buf) > 0
5143
}
5244

53-
// fill reads into the buffer until at least _need_ bytes are in it
54-
func (b *buffer) fill(need int) error {
55-
n := b.length
56-
// fill data into its double-buffering target: if we've called
57-
// flip on this buffer, we'll be copying to the background buffer,
58-
// and then filling it with network data; otherwise we'll just move
59-
// the contents of the current buffer to the front before filling it
60-
dest := b.dbuf[b.flipcnt&1]
45+
// fill reads into the read buffer until at least _need_ bytes are in it.
46+
func (b *buffer) fill(need int, r readerFunc) error {
47+
// we'll move the contents of the current buffer to dest before filling it.
48+
dest := b.cachedBuf
6149

6250
// grow buffer if necessary to fit the whole packet.
6351
if need > len(dest) {
@@ -67,83 +55,67 @@ func (b *buffer) fill(need int) error {
6755
// if the allocated buffer is not too large, move it to backing storage
6856
// to prevent extra allocations on applications that perform large reads
6957
if len(dest) <= maxCachedBufSize {
70-
b.dbuf[b.flipcnt&1] = dest
58+
b.cachedBuf = dest
7159
}
7260
}
7361

74-
// if we're filling the fg buffer, move the existing data to the start of it.
75-
// if we're filling the bg buffer, copy over the data
76-
if n > 0 {
77-
copy(dest[:n], b.buf[b.idx:])
78-
}
79-
80-
b.buf = dest
81-
b.idx = 0
62+
// move the existing data to the start of the buffer.
63+
n := len(b.buf)
64+
copy(dest[:n], b.buf)
8265

8366
for {
84-
if b.timeout > 0 {
85-
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
86-
return err
87-
}
67+
nn, err := r(dest[n:])
68+
n += nn
69+
70+
if err == nil && n < need {
71+
continue
8872
}
8973

90-
nn, err := b.nc.Read(b.buf[n:])
91-
n += nn
74+
b.buf = dest[:n]
9275

93-
switch err {
94-
case nil:
76+
if err == io.EOF {
9577
if n < need {
96-
continue
78+
err = io.ErrUnexpectedEOF
79+
} else {
80+
err = nil
9781
}
98-
b.length = n
99-
return nil
100-
101-
case io.EOF:
102-
if n >= need {
103-
b.length = n
104-
return nil
105-
}
106-
return io.ErrUnexpectedEOF
107-
108-
default:
109-
return err
11082
}
83+
return err
11184
}
11285
}
11386

11487
// returns next N bytes from buffer.
11588
// The returned slice is only guaranteed to be valid until the next read
116-
func (b *buffer) readNext(need int) ([]byte, error) {
117-
if b.length < need {
89+
func (b *buffer) readNext(need int, r readerFunc) ([]byte, error) {
90+
if len(b.buf) < need {
11891
// refill
119-
if err := b.fill(need); err != nil {
92+
if err := b.fill(need, r); err != nil {
12093
return nil, err
12194
}
12295
}
12396

124-
offset := b.idx
125-
b.idx += need
126-
b.length -= need
127-
return b.buf[offset:b.idx], nil
97+
data := b.buf[:need]
98+
b.buf = b.buf[need:]
99+
return data, nil
128100
}
129101

130102
// takeBuffer returns a buffer with the requested size.
131103
// If possible, a slice from the existing buffer is returned.
132104
// Otherwise a bigger buffer is made.
133105
// Only one buffer (total) can be used at a time.
134106
func (b *buffer) takeBuffer(length int) ([]byte, error) {
135-
if b.length > 0 {
107+
if b.busy() {
136108
return nil, ErrBusyBuffer
137109
}
138110

139111
// test (cheap) general case first
140-
if length <= cap(b.buf) {
141-
return b.buf[:length], nil
112+
if length <= len(b.cachedBuf) {
113+
return b.cachedBuf[:length], nil
142114
}
143115

144-
if length < maxPacketSize {
145-
b.buf = make([]byte, length)
146-
return b.buf, nil
116+
if length < maxCachedBufSize {
117+
b.cachedBuf = make([]byte, length)
118+
return b.cachedBuf, nil
147119
}
148120

149121
// buffer is larger than we want to store.
@@ -154,29 +126,26 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
154126
// known to be smaller than defaultBufSize.
155127
// Only one buffer (total) can be used at a time.
156128
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
157-
if b.length > 0 {
129+
if b.busy() {
158130
return nil, ErrBusyBuffer
159131
}
160-
return b.buf[:length], nil
132+
return b.cachedBuf[:length], nil
161133
}
162134

163135
// takeCompleteBuffer returns the complete existing buffer.
164136
// This can be used if the necessary buffer size is unknown.
165137
// cap and len of the returned buffer will be equal.
166138
// Only one buffer (total) can be used at a time.
167139
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
168-
if b.length > 0 {
140+
if b.busy() {
169141
return nil, ErrBusyBuffer
170142
}
171-
return b.buf, nil
143+
return b.cachedBuf, nil
172144
}
173145

174146
// store stores buf, an updated buffer, if its suitable to do so.
175-
func (b *buffer) store(buf []byte) error {
176-
if b.length > 0 {
177-
return ErrBusyBuffer
178-
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
179-
b.buf = buf[:cap(buf)]
147+
func (b *buffer) store(buf []byte) {
148+
if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
149+
b.cachedBuf = buf[:cap(buf)]
180150
}
181-
return nil
182151
}

0 commit comments

Comments
 (0)