Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 27 additions & 8 deletions go/binlog/gomysql_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,16 @@ type GoMySQLReader struct {
// LastTrxCoords are the coordinates of the last transaction completely read.
// If using the file coordinates it is binlog position of the transaction's XID event.
LastTrxCoords mysql.BinlogCoordinates
// currentTrxCoords is set once per GTIDEvent and shared by all RowsEvents within
// the same transaction. It points to currentCoordinates (a *LazyGTIDCoordinates),
// which is replaced at the next GTIDEvent — so old entries retain valid references.
// Only accessed from within the StreamEvents goroutine; no mutex needed.
currentTrxCoords mysql.BinlogCoordinates
// lastCommittedGTIDSet is the MysqlGTIDSet from the most recently seen XIDEvent
// (or the initial coordinates). It is immutable once set and used as the base for
// LazyGTIDCoordinates so we avoid cloning the full set on each GTIDEvent.
// Only written from within StreamEvents; no mutex needed.
lastCommittedGTIDSet *gomysql.MysqlGTIDSet
}

func NewGoMySQLReader(migrationContext *base.MigrationContext) *GoMySQLReader {
Expand Down Expand Up @@ -68,6 +78,7 @@ func (this *GoMySQLReader) ConnectBinlogStreamer(coordinates mysql.BinlogCoordin
// Start sync with specified GTID set or binlog file and position
if this.migrationContext.UseGTIDs {
coords := coordinates.(*mysql.GTIDBinlogCoordinates)
this.lastCommittedGTIDSet = coords.GTIDSet
this.binlogStreamer, err = this.binlogSyncer.StartSyncGTID(coords.GTIDSet)
} else {
coords := this.currentCoordinates.(*mysql.FileBinlogCoordinates)
Expand All @@ -86,7 +97,12 @@ func (this *GoMySQLReader) GetCurrentBinlogCoordinates() mysql.BinlogCoordinates
}

func (this *GoMySQLReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *replication.RowsEvent, entriesChannel chan<- *BinlogEntry) error {
currentCoords := this.GetCurrentBinlogCoordinates()
var currentCoords mysql.BinlogCoordinates
if this.migrationContext.UseGTIDs && this.currentTrxCoords != nil {
currentCoords = this.currentTrxCoords
} else {
currentCoords = this.GetCurrentBinlogCoordinates()
}
dml := ToEventDML(ev.Header.EventType.String())
if dml == NotDML {
return fmt.Errorf("Unknown DML type: %s", ev.Header.EventType.String())
Expand Down Expand Up @@ -167,12 +183,8 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
return err
}
this.currentCoordinatesMutex.Lock()
if this.LastTrxCoords != nil {
this.currentCoordinates = this.LastTrxCoords.Clone()
}
coords := this.currentCoordinates.(*mysql.GTIDBinlogCoordinates)
trxGset := gomysql.NewUUIDSet(sid, gomysql.Interval{Start: event.GNO, Stop: event.GNO + 1})
coords.GTIDSet.AddSet(trxGset)
this.currentCoordinates = mysql.NewLazyGTIDCoordinates(this.lastCommittedGTIDSet, sid, event.GNO)
this.currentTrxCoords = this.currentCoordinates
this.currentCoordinatesMutex.Unlock()
case *replication.RotateEvent:
if this.migrationContext.UseGTIDs {
Expand All @@ -185,7 +197,14 @@ func (this *GoMySQLReader) StreamEvents(canStopStreaming func() bool, entriesCha
this.currentCoordinatesMutex.Unlock()
case *replication.XIDEvent:
if this.migrationContext.UseGTIDs {
this.LastTrxCoords = &mysql.GTIDBinlogCoordinates{GTIDSet: event.GSet.(*gomysql.MysqlGTIDSet)}
gSet := event.GSet.(*gomysql.MysqlGTIDSet)
if coords, ok := this.LastTrxCoords.(*mysql.GTIDBinlogCoordinates); ok {
coords.GTIDSet = gSet
coords.UUIDSet = nil
} else {
this.LastTrxCoords = &mysql.GTIDBinlogCoordinates{GTIDSet: gSet}
}
this.lastCommittedGTIDSet = gSet
} else {
this.LastTrxCoords = this.currentCoordinates.Clone()
}
Expand Down
201 changes: 201 additions & 0 deletions go/binlog/streaming_bench_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
package binlog

import (
"fmt"
"io"
"os"
"strings"
"sync"
"sync/atomic"
"testing"
"time"

"github.com/github/gh-ost/go/base"
"github.com/github/gh-ost/go/mysql"
gomysql "github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
guuid "github.com/google/uuid"
)

const (
benchTxCount = 1_000
benchRowsPerTx = 5
)

// 02b9e2cf-9c8a-11e7-a479-42010ae7009b — one of the real servers in the set
var benchServerSID = []byte{
0x02, 0xb9, 0xe2, 0xcf, 0x9c, 0x8a, 0x11, 0xe7,
0xa4, 0x79, 0x42, 0x01, 0x0a, 0xe7, 0x00, 0x9b,
}

func loadProductionGTIDSet(tb testing.TB) *gomysql.MysqlGTIDSet {
data, err := os.ReadFile("../../gtid_executed_shard21")
if err != nil {
tb.Fatalf("could not load gtid_executed_shard21: %v", err)
}
cleaned := strings.Join(strings.Fields(string(data)), ",")
set, err := gomysql.ParseMysqlGTIDSet(cleaned)
if err != nil {
tb.Fatalf("could not parse GTID set: %v", err)
}
return set.(*gomysql.MysqlGTIDSet)
}

func buildGTIDEvents(initialSet *gomysql.MysqlGTIDSet) []*replication.BinlogEvent {
events := make([]*replication.BinlogEvent, 0, benchTxCount*(benchRowsPerTx+2))
accSet := initialSet.Clone().(*gomysql.MysqlGTIDSet)
sid, _ := guuid.FromBytes(benchServerSID)

for i := 0; i < benchTxCount; i++ {
gno := int64(73_590_714 + i)

events = append(events, &replication.BinlogEvent{
Header: &replication.EventHeader{EventType: replication.GTID_EVENT},
Event: &replication.GTIDEvent{SID: benchServerSID, GNO: gno},
})

for r := 0; r < benchRowsPerTx; r++ {
events = append(events, &replication.BinlogEvent{
Header: &replication.EventHeader{
EventType: replication.WRITE_ROWS_EVENTv2,
LogPos: uint32(i*1000 + r + 1),
EventSize: 100,
},
Event: &replication.RowsEvent{
Table: &replication.TableMapEvent{
Schema: []byte("mydb"),
Table: []byte("orders"),
},
Rows: [][]interface{}{{int64(i), "value"}},
},
})
}

trxGset := gomysql.NewUUIDSet(sid, gomysql.Interval{Start: gno, Stop: gno + 1})
accSet.AddSet(trxGset)

events = append(events, &replication.BinlogEvent{
Header: &replication.EventHeader{EventType: replication.XID_EVENT},
Event: &replication.XIDEvent{GSet: accSet.Clone()},
})
}
return events
}

func buildFileEvents() []*replication.BinlogEvent {
events := make([]*replication.BinlogEvent, 0, benchTxCount*(benchRowsPerTx+1))

for i := 0; i < benchTxCount; i++ {
for r := 0; r < benchRowsPerTx; r++ {
events = append(events, &replication.BinlogEvent{
Header: &replication.EventHeader{
EventType: replication.WRITE_ROWS_EVENTv2,
LogPos: uint32(i*1000 + r + 1),
EventSize: 100,
},
Event: &replication.RowsEvent{
Table: &replication.TableMapEvent{
Schema: []byte("mydb"),
Table: []byte("orders"),
},
Rows: [][]interface{}{{int64(i), "value"}},
},
})
}

events = append(events, &replication.BinlogEvent{
Header: &replication.EventHeader{
EventType: replication.XID_EVENT,
LogPos: uint32(i*1000 + benchRowsPerTx + 1),
},
Event: &replication.XIDEvent{},
})
}
return events
}

// feedAndRun feeds events into a fresh streamer concurrently with StreamEvents.
// This avoids b.N scaling issues caused by heavy pre-fill setup dominating over
// the (very fast) file-mode processing time.
func feedAndRun(b *testing.B, label string, useGTIDs bool, events []*replication.BinlogEvent, initialCoords mysql.BinlogCoordinates) {
b.ReportAllocs()

var iterations atomic.Int64
done := make(chan struct{})

go func() {
spinner := []string{"|", "/", "-", "\\"}
tick := time.NewTicker(500 * time.Millisecond)
defer tick.Stop()
frame := 0
for {
select {
case <-done:
fmt.Fprintf(os.Stderr, "\r%-30s done (%d iters) \n", label, iterations.Load())
return
case <-tick.C:
fmt.Fprintf(os.Stderr, "\r%-30s %s iter %d", label, spinner[frame%4], iterations.Load())
frame++
}
}
}()

b.ResetTimer()

for i := 0; i < b.N; i++ {
// Small channel — events flow through as StreamEvents consumes them.
s := replication.NewBinlogStreamer()

ctx := &base.MigrationContext{}
ctx.UseGTIDs = useGTIDs
reader := &GoMySQLReader{
migrationContext: ctx,
currentCoordinatesMutex: &sync.Mutex{},
currentCoordinates: initialCoords.Clone(),
binlogStreamer: s,
}
entriesCh := make(chan *BinlogEntry, 100)

// Feed events concurrently so AddEventToStreamer never blocks.
var feedDone sync.WaitGroup
feedDone.Add(1)
go func() {
defer feedDone.Done()
for _, ev := range events {
s.AddEventToStreamer(ev)
}
s.AddErrorToStreamer(io.EOF)
}()

// Drain entries so StreamEvents never blocks writing to entriesCh.
var drainDone sync.WaitGroup
drainDone.Add(1)
go func() {
defer drainDone.Done()
for range entriesCh {
}
}()

reader.StreamEvents(func() bool { return false }, entriesCh)
feedDone.Wait()
close(entriesCh)
drainDone.Wait()

iterations.Add(1)
}

close(done)
}

func BenchmarkStreamingGTID(b *testing.B) {
initialSet := loadProductionGTIDSet(b)
events := buildGTIDEvents(initialSet)
initialCoords := &mysql.GTIDBinlogCoordinates{GTIDSet: initialSet}
feedAndRun(b, "GTID (182 UUIDs)", true, events, initialCoords)
}

func BenchmarkStreamingFile(b *testing.B) {
events := buildFileEvents()
initialCoords := &mysql.FileBinlogCoordinates{LogFile: "mysql-bin.000001", LogPos: 0}
feedAndRun(b, "File", false, events, initialCoords)
}
53 changes: 53 additions & 0 deletions go/mysql/binlog_gtid.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@
package mysql

import (
"fmt"

gomysql "github.com/go-mysql-org/go-mysql/mysql"
uuid "github.com/google/uuid"
)

// GTIDBinlogCoordinates describe binary log coordinates in MySQL GTID format.
Expand Down Expand Up @@ -85,3 +88,53 @@ func (this *GTIDBinlogCoordinates) Clone() BinlogCoordinates {
}
return out
}

// LazyGTIDCoordinates describes the in-flight coordinates of a transaction that
// has been announced via GTIDEvent but not yet committed (XIDEvent not yet seen).
// It holds a stable, immutable reference to the last-committed MysqlGTIDSet and
// the current transaction's GTID. The expensive Clone of the full set is deferred
// until Materialize is actually called, which only happens when external callers
// need a snapshot (via GetCurrentBinlogCoordinates) or when a comparison is made —
// not on every row event in the hot path.
type LazyGTIDCoordinates struct {
base *gomysql.MysqlGTIDSet // last-committed GTIDSet; immutable, not owned
sid uuid.UUID // current transaction's server UUID
gno int64 // current transaction's GNO
}

// NewLazyGTIDCoordinates creates coordinates for an in-flight transaction.
// base must be the MysqlGTIDSet of the last committed transaction and must
// not be mutated after this call.
func NewLazyGTIDCoordinates(base *gomysql.MysqlGTIDSet, sid uuid.UUID, gno int64) *LazyGTIDCoordinates {
return &LazyGTIDCoordinates{base: base, sid: sid, gno: gno}
}

// Materialize clones the base set, adds the in-flight GTID, and returns a full
// GTIDBinlogCoordinates. The result is an independent snapshot safe to hold across
// transaction boundaries. This is the only point where a MysqlGTIDSet.Clone occurs.
func (l *LazyGTIDCoordinates) Materialize() *GTIDBinlogCoordinates {
set := l.base.Clone().(*gomysql.MysqlGTIDSet)
set.AddGTID(l.sid, l.gno)
return &GTIDBinlogCoordinates{GTIDSet: set}
}

func (l *LazyGTIDCoordinates) String() string { return l.Materialize().String() }
func (l *LazyGTIDCoordinates) DisplayString() string { return fmt.Sprintf("%s:%d", l.sid, l.gno) }
func (l *LazyGTIDCoordinates) IsEmpty() bool { return l.base == nil }

func (l *LazyGTIDCoordinates) Equals(other BinlogCoordinates) bool {
return l.Materialize().Equals(other)
}

func (l *LazyGTIDCoordinates) SmallerThan(other BinlogCoordinates) bool {
return l.Materialize().SmallerThan(other)
}

func (l *LazyGTIDCoordinates) SmallerThanOrEquals(other BinlogCoordinates) bool {
return l.Materialize().SmallerThanOrEquals(other)
}

// Clone materializes the full coordinates. The returned *GTIDBinlogCoordinates is
// an independent copy; callers receive a concrete type regardless of which
// BinlogCoordinates implementation produced it.
func (l *LazyGTIDCoordinates) Clone() BinlogCoordinates { return l.Materialize() }
Loading