Skip to content

Commit a4ff899

Browse files
committed
Merge remote-tracking branch 'origin/master' into dev/bump-client-go
Signed-off-by: zyguan <zhongyangguan@gmail.com>
2 parents de2d8d6 + 84d8269 commit a4ff899

67 files changed

Lines changed: 1622 additions & 505 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

DEPS.bzl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6582,13 +6582,13 @@ def go_deps():
65826582
name = "com_github_pingcap_tipb",
65836583
build_file_proto_mode = "disable_global",
65846584
importpath = "github.com/pingcap/tipb",
6585-
sha256 = "2d6aaef873e175599c39f6fe3cf85cabce88a69c157ff7ec4c22fbcbc97648dd",
6586-
strip_prefix = "github.com/pingcap/tipb@v0.0.0-20260210113932-1447c9d7e9fe",
6585+
sha256 = "68768a27ed6c35716fcb01a0b4a15ff13e5c1a5dc11acc7a3d44ba02a2742077",
6586+
strip_prefix = "github.com/pingcap/tipb@v0.0.0-20260414032333-da912b84de6f",
65876587
urls = [
6588-
"http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260210113932-1447c9d7e9fe.zip",
6589-
"http://ats.apps.svc/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260210113932-1447c9d7e9fe.zip",
6590-
"https://cache.hawkingrei.com/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260210113932-1447c9d7e9fe.zip",
6591-
"https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260210113932-1447c9d7e9fe.zip",
6588+
"http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260414032333-da912b84de6f.zip",
6589+
"http://ats.apps.svc/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260414032333-da912b84de6f.zip",
6590+
"https://cache.hawkingrei.com/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260414032333-da912b84de6f.zip",
6591+
"https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/tipb/com_github_pingcap_tipb-v0.0.0-20260414032333-da912b84de6f.zip",
65926592
],
65936593
)
65946594
go_repository(

br/pkg/stream/crr/internal/checkpoint/randomized_integration_test.go

Lines changed: 20 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import (
1919
"fmt"
2020
"math"
2121
"math/rand"
22+
"os"
2223
"sync"
2324
"testing"
2425
"time"
@@ -34,7 +35,7 @@ func TestCheckpointCalculatorRandomizedCRRSimulation(t *testing.T) {
3435
ctx := context.Background()
3536
tc := testutil.NewTestContext(t)
3637
cfg := randomizedCRRSimulationConfig{
37-
Iterations: 1000,
38+
Iterations: 300,
3839
InitialStores: 3,
3940
MaxStores: 12,
4041
RegionCount: 12,
@@ -268,22 +269,24 @@ func (s *randomizedCRRSimulation) runRound(
268269

269270
s.rememberSyncedTS()
270271

271-
s.log(
272-
"randomized crr round",
273-
zap.Int("round", round),
274-
zap.Uint64s("flushed-stores", roundLog.flushedStores),
275-
zap.Int("replicated-files", roundLog.replicatedFiles),
276-
zap.Bool("restarted-calculator", roundLog.restartedCalculator),
277-
zap.Uint64s("added-stores", roundLog.addedStores),
278-
zap.Uint64s("removed-stores", roundLog.removedStores),
279-
zap.Uint64s("scattered-regions", roundLog.scatteredRegions),
280-
zap.Strings("action-order", roundLog.actionOrder),
281-
zap.Bool("advanced", roundLog.advanced),
282-
zap.Uint64("checkpoint", roundLog.checkpoint),
283-
zap.Uint64("safe-checkpoint", *lastSafeCheckpoint),
284-
zap.Uint64("validated-checkpoint", *lastValidatedCheckpoint),
285-
zap.String("state", s.describeState()),
286-
)
272+
if os.Getenv("TIDB_RANDOMIZED_CRR_SIM_ROUND_LOG") != "" {
273+
s.log(
274+
"randomized crr round",
275+
zap.Int("round", round),
276+
zap.Uint64s("flushed-stores", roundLog.flushedStores),
277+
zap.Int("replicated-files", roundLog.replicatedFiles),
278+
zap.Bool("restarted-calculator", roundLog.restartedCalculator),
279+
zap.Uint64s("added-stores", roundLog.addedStores),
280+
zap.Uint64s("removed-stores", roundLog.removedStores),
281+
zap.Uint64s("scattered-regions", roundLog.scatteredRegions),
282+
zap.Strings("action-order", roundLog.actionOrder),
283+
zap.Bool("advanced", roundLog.advanced),
284+
zap.Uint64("checkpoint", roundLog.checkpoint),
285+
zap.Uint64("safe-checkpoint", *lastSafeCheckpoint),
286+
zap.Uint64("validated-checkpoint", *lastValidatedCheckpoint),
287+
zap.String("state", s.describeState()),
288+
)
289+
}
287290

288291
if roundLog.advanced {
289292
require.GreaterOrEqualf(

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ require (
106106
github.com/pingcap/metering_sdk v0.0.0-20260324055927-14fead745f1d
107107
github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5
108108
github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e
109-
github.com/pingcap/tipb v0.0.0-20260210113932-1447c9d7e9fe
109+
github.com/pingcap/tipb v0.0.0-20260414032333-da912b84de6f
110110
github.com/prometheus/client_golang v1.23.0
111111
github.com/prometheus/client_model v0.6.2
112112
github.com/prometheus/common v0.65.0

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -738,8 +738,8 @@ github.com/pingcap/metering_sdk v0.0.0-20260324055927-14fead745f1d h1:5JCgncG9X7
738738
github.com/pingcap/metering_sdk v0.0.0-20260324055927-14fead745f1d/go.mod h1:HMNxmg0/lrn3SPGJ6LTZqP0WwEpcXMu9s/4TWJbzT8w=
739739
github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 h1:T4pXRhBflzDeAhmOQHNPRRogMYxP13V7BkYw3ZsoSfE=
740740
github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5/go.mod h1:rlimy0GcTvjiJqvD5mXTRr8O2eNZPBrcUgiWVYp9530=
741-
github.com/pingcap/tipb v0.0.0-20260210113932-1447c9d7e9fe h1:Zmz9mON+2NoKDVjkJbk6NZbFoTzVzk8MPTbRnu+MiVM=
742-
github.com/pingcap/tipb v0.0.0-20260210113932-1447c9d7e9fe/go.mod h1:RM8iRcMalzOthG2XJxnNBniM4xFGb/lDwHUwqkaVzt4=
741+
github.com/pingcap/tipb v0.0.0-20260414032333-da912b84de6f h1:+IEEq1wl/kxfGK/qOCe9Bu0Kk9ERqxrzeGoKazevWrw=
742+
github.com/pingcap/tipb v0.0.0-20260414032333-da912b84de6f/go.mod h1:RM8iRcMalzOthG2XJxnNBniM4xFGb/lDwHUwqkaVzt4=
743743
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
744744
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
745745
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=

pkg/ddl/column_modify_test.go

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -655,3 +655,22 @@ func TestModifyColumnReorgCheckpoint(t *testing.T) {
655655
require.Len(t, rangeCnts, 2) // It should have two rounds for loading table ranges.
656656
require.Less(t, rangeCnts[1], rangeCnts[0]) // Verify if the checkpoint is progressing.
657657
}
658+
659+
func TestIssue37611(t *testing.T) {
660+
store := testkit.CreateMockStoreWithSchemaLease(t, columnModifyLease)
661+
662+
tk := testkit.NewTestKit(t, store)
663+
tk.MustExec("use test")
664+
tk.MustExec("create table t(a char(5), b char(6) as (concat(a, a)), index idx(b));")
665+
tk.MustExec("set @@sql_mode='';")
666+
tk.MustExec("insert into t (a) values ('aaa');")
667+
tk.MustExec("insert into t (a) values ('aaaa');")
668+
tk.MustGetErrCode("alter table t modify b char(10) as (concat(a, a));", errno.ErrUnsupportedOnGeneratedColumn)
669+
tk.MustExec("set @@sql_mode=default;")
670+
tk.MustQuery("select * from t ignore index(idx) where b = 'aaaaaa';").Check(testkit.Rows("aaa aaaaaa", "aaaa aaaaaa"))
671+
tk.MustQuery("select * from t force index(idx) where b = 'aaaaaa';").Check(testkit.Rows("aaa aaaaaa", "aaaa aaaaaa"))
672+
tk.MustGetErrCode("alter table t change column b c char(10) as (concat(a, a));", errno.ErrUnsupportedOnGeneratedColumn)
673+
674+
tk.MustExec("create table t2(a char(5), b char(6) as (concat(a, a)) stored, index idx(b));")
675+
tk.MustGetErrCode("alter table t2 modify b char(10) as (concat(a, a)) stored;", errno.ErrUnsupportedOnGeneratedColumn)
676+
}

pkg/ddl/generated_column.go

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -389,20 +389,32 @@ func checkIllegalFn4Generated(name string, genType int, expr ast.ExprNode) error
389389
}
390390

391391
func checkIndexOrStored(tbl table.Table, oldCol, newCol *table.Column) error {
392+
isIndexed := false
393+
for _, idx := range tbl.Indices() {
394+
for _, col := range idx.Meta().Columns {
395+
if col.Name.L == oldCol.Name.L || col.Name.L == newCol.Name.L {
396+
isIndexed = true
397+
break
398+
}
399+
}
400+
if isIndexed {
401+
break
402+
}
403+
}
404+
392405
if oldCol.GeneratedExprString == newCol.GeneratedExprString {
393-
return nil
406+
if oldCol.FieldType.Equal(&newCol.FieldType) || !isIndexed {
407+
return nil
408+
}
409+
return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStack("Unsupported modification for generated columns covered by an index")
394410
}
395411

396412
if newCol.GeneratedStored {
397413
return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("modifying a stored column")
398414
}
399415

400-
for _, idx := range tbl.Indices() {
401-
for _, col := range idx.Meta().Columns {
402-
if col.Name.L == newCol.Name.L {
403-
return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("modifying an indexed column")
404-
}
405-
}
416+
if isIndexed {
417+
return dbterror.ErrUnsupportedOnGeneratedColumn.GenWithStack("Unsupported modification for generated columns covered by an index")
406418
}
407419
return nil
408420
}

pkg/ddl/tests/tiflash/ddl_tiflash_test.go

Lines changed: 33 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -518,6 +518,25 @@ func CheckTableAvailable(dom *domain.Domain, t *testing.T, count uint64, labels
518518
CheckTableAvailableWithTableName(dom, t, count, labels, "test", "ddltiflash")
519519
}
520520

521+
func tableReplicaWithTableName(dom *domain.Domain, db string, table string) *model.TiFlashReplicaInfo {
522+
tb, err := dom.InfoSchema().TableByName(context.Background(), ast.NewCIStr(db), ast.NewCIStr(table))
523+
if err != nil || tb == nil {
524+
return nil
525+
}
526+
return tb.Meta().TiFlashReplica
527+
}
528+
529+
func waitTableReplicaStateWithTableName(dom *domain.Domain, t *testing.T, db string, table string, available bool, timeout time.Duration) {
530+
t.Helper()
531+
require.Eventually(t, func() bool {
532+
replica := tableReplicaWithTableName(dom, db, table)
533+
return replica != nil && replica.Available == available
534+
}, timeout, ddl.PollTiFlashInterval/2)
535+
replica := tableReplicaWithTableName(dom, db, table)
536+
require.NotNil(t, replica)
537+
require.Equal(t, available, replica.Available)
538+
}
539+
521540
func CheckTableNoReplica(dom *domain.Domain, t *testing.T, db string, table string) {
522541
tb, err := dom.InfoSchema().TableByName(context.Background(), ast.NewCIStr(db), ast.NewCIStr(table))
523542
require.NoError(t, err)
@@ -1316,44 +1335,31 @@ func TestTiFlashAvailableAfterResetReplica(t *testing.T) {
13161335
func TestTiFlashPartitionNotAvailable(t *testing.T) {
13171336
s, teardown := createTiFlashContext(t)
13181337
defer teardown()
1319-
tk := testkit.NewTestKit(t, s.store)
1338+
se := session.CreateSessionAndSetID(t, s.store)
1339+
defer se.Close()
1340+
transitionTimeout := ddl.PollTiFlashInterval * RoundToBeAvailable * 6
13201341

1321-
tk.MustExec("use test")
1322-
tk.MustExec("drop table if exists ddltiflash")
1323-
tk.MustExec("create table ddltiflash(z int) PARTITION BY RANGE(z) (PARTITION p0 VALUES LESS THAN (10))")
1342+
session.MustExec(t, se, "use test")
1343+
session.MustExec(t, se, "drop table if exists ddltiflash")
1344+
session.MustExec(t, se, "create table ddltiflash(z int) PARTITION BY RANGE(z) (PARTITION p0 VALUES LESS THAN (10))")
13241345

13251346
tb, err := s.dom.InfoSchema().TableByName(context.Background(), ast.NewCIStr("test"), ast.NewCIStr("ddltiflash"))
13261347
require.NoError(t, err)
13271348
require.NotNil(t, tb)
13281349

1329-
tk.MustExec("alter table ddltiflash set tiflash replica 1")
1350+
session.MustExec(t, se, "alter table ddltiflash set tiflash replica 1")
13301351
s.tiflash.ResetSyncStatus(int(tb.Meta().Partition.Definitions[0].ID), false)
1331-
time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3)
1332-
1333-
tb, err = s.dom.InfoSchema().TableByName(context.Background(), ast.NewCIStr("test"), ast.NewCIStr("ddltiflash"))
1334-
require.NoError(t, err)
1335-
require.NotNil(t, tb)
1336-
replica := tb.Meta().TiFlashReplica
1337-
require.NotNil(t, replica)
1338-
require.False(t, replica.Available)
1352+
waitTableReplicaStateWithTableName(s.dom, t, "test", "ddltiflash", false, transitionTimeout)
13391353

13401354
s.tiflash.ResetSyncStatus(int(tb.Meta().Partition.Definitions[0].ID), true)
1341-
time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3)
1342-
1343-
tb, err = s.dom.InfoSchema().TableByName(context.Background(), ast.NewCIStr("test"), ast.NewCIStr("ddltiflash"))
1344-
require.NoError(t, err)
1345-
require.NotNil(t, tb)
1346-
replica = tb.Meta().TiFlashReplica
1347-
require.NotNil(t, replica)
1348-
require.True(t, replica.Available)
1355+
waitTableReplicaStateWithTableName(s.dom, t, "test", "ddltiflash", true, transitionTimeout)
13491356

13501357
s.tiflash.ResetSyncStatus(int(tb.Meta().Partition.Definitions[0].ID), false)
1351-
time.Sleep(ddl.PollTiFlashInterval * RoundToBeAvailable * 3)
1352-
require.NoError(t, err)
1353-
require.NotNil(t, tb)
1354-
replica = tb.Meta().TiFlashReplica
1355-
require.NotNil(t, replica)
1356-
require.True(t, replica.Available)
1358+
require.Never(t, func() bool {
1359+
replica := tableReplicaWithTableName(s.dom, "test", "ddltiflash")
1360+
return replica == nil || !replica.Available
1361+
}, ddl.PollTiFlashInterval*RoundToBeAvailable*3, ddl.PollTiFlashInterval/2)
1362+
CheckTableAvailable(s.dom, t, 1, []string{})
13571363
}
13581364

13591365
func TestTiFlashAvailableAfterAddPartition(t *testing.T) {

pkg/executor/adapter.go

Lines changed: 63 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,7 @@ func (a *ExecStmt) PointGet(ctx context.Context) (*recordSet, error) {
419419
}
420420

421421
if executor == nil {
422-
b := newExecutorBuilder(a.Ctx, a.InfoSchema, a.Ti)
422+
b := newExecutorBuilder(ctx, a.Ctx, a.InfoSchema, a.Ti)
423423
executor = b.build(a.Plan)
424424
if b.err != nil {
425425
return nil, b.err
@@ -678,7 +678,7 @@ func (a *ExecStmt) Exec(ctx context.Context) (_ sqlexec.RecordSet, err error) {
678678
execStartTime = time.Now()
679679
}
680680

681-
e, err := a.buildExecutor()
681+
e, err := a.buildExecutor(ctx)
682682
if err != nil {
683683
return nil, err
684684
}
@@ -1281,6 +1281,11 @@ func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e exec.Executor) (e
12811281

12821282
// acquire xlocks
12831283
keys = txnCtx.CollectUnchangedKeysForXLock(keys)
1284+
sharedKeys := txnCtx.CollectUnchangedKeysForSLock(nil)
1285+
keys, sharedKeys, err = moveWrittenSharedLockKeysToExclusive(ctx, txn, keys, sharedKeys)
1286+
if err != nil {
1287+
return err
1288+
}
12841289
if ex, err := tryLockKeys(e, keys, false); err != nil {
12851290
return err
12861291
} else if ex != nil {
@@ -1289,9 +1294,8 @@ func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e exec.Executor) (e
12891294
}
12901295

12911296
// acquire slocks
1292-
keys = txnCtx.CollectUnchangedKeysForSLock(keys[:0])
12931297
startLock := time.Now()
1294-
if ex, err := tryLockKeys(e, keys, true); err != nil {
1298+
if ex, err := tryLockKeys(e, sharedKeys, true); err != nil {
12951299
return err
12961300
} else if ex != nil {
12971301
e = ex
@@ -1303,6 +1307,52 @@ func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e exec.Executor) (e
13031307
}
13041308
}
13051309

1310+
func moveWrittenSharedLockKeysToExclusive(
1311+
ctx context.Context,
1312+
txn kv.Transaction,
1313+
exclusiveKeys []kv.Key,
1314+
sharedKeys []kv.Key,
1315+
) (finalExclusiveKeys []kv.Key, finalSharedKeys []kv.Key, err error) {
1316+
if len(sharedKeys) == 0 {
1317+
return exclusiveKeys, sharedKeys, nil
1318+
}
1319+
1320+
exclusiveKeySet := make(map[string]struct{}, len(exclusiveKeys))
1321+
for _, key := range exclusiveKeys {
1322+
exclusiveKeySet[string(key)] = struct{}{}
1323+
}
1324+
1325+
memBuffer := txn.GetMemBuffer()
1326+
memBuffer.RLock()
1327+
defer memBuffer.RUnlock()
1328+
1329+
// sharedKeys is collected locally for this lock phase, so reuse its buffer
1330+
// for the filtered shared-only result.
1331+
sharedOnlyKeys := sharedKeys[:0]
1332+
for _, key := range sharedKeys {
1333+
if _, ok := exclusiveKeySet[string(key)]; ok {
1334+
continue
1335+
}
1336+
1337+
// A key written by this transaction must not be protected only by a
1338+
// shared pessimistic lock, otherwise commit prewrite would put over
1339+
// its own shared pessimistic lock.
1340+
_, err := memBuffer.GetLocal(ctx, key)
1341+
if err == nil {
1342+
exclusiveKeys = append(exclusiveKeys, key)
1343+
exclusiveKeySet[string(key)] = struct{}{}
1344+
continue
1345+
}
1346+
if !kv.ErrNotExist.Equal(err) {
1347+
return nil, nil, err
1348+
}
1349+
1350+
sharedOnlyKeys = append(sharedOnlyKeys, key)
1351+
}
1352+
1353+
return exclusiveKeys, sharedOnlyKeys, nil
1354+
}
1355+
13061356
// updateFKCheckLockStats updates the Lock stats of FK check executors after the deferred
13071357
// pessimistic lock phase completes. In pessimistic mode, FK check keys are not locked
13081358
// inline during doCheck but deferred to handlePessimisticDML. This function attributes
@@ -1371,7 +1421,7 @@ func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, lockErr error
13711421
a.resetPhaseDurations()
13721422

13731423
a.inheritContextFromExecuteStmt()
1374-
e, err := a.buildExecutor()
1424+
e, err := a.buildExecutor(ctx)
13751425
if err != nil {
13761426
return nil, err
13771427
}
@@ -1397,28 +1447,28 @@ type pessimisticTxn interface {
13971447
}
13981448

13991449
// buildExecutor build an executor from plan, prepared statement may need additional procedure.
1400-
func (a *ExecStmt) buildExecutor() (exec.Executor, error) {
1450+
func (a *ExecStmt) buildExecutor(ctx context.Context) (exec.Executor, error) {
14011451
defer func(start time.Time) { a.phaseBuildDurations[0] += time.Since(start) }(time.Now())
1402-
ctx := a.Ctx
1403-
stmtCtx := ctx.GetSessionVars().StmtCtx
1452+
sctx := a.Ctx
1453+
stmtCtx := sctx.GetSessionVars().StmtCtx
14041454
if _, ok := a.Plan.(*plannercore.Execute); !ok {
14051455
if stmtCtx.Priority == mysql.NoPriority && a.LowerPriority {
14061456
stmtCtx.Priority = kv.PriorityLow
14071457
}
14081458
}
1409-
if _, ok := a.Plan.(*plannercore.Analyze); ok && ctx.GetSessionVars().InRestrictedSQL {
1410-
ctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow
1459+
if _, ok := a.Plan.(*plannercore.Analyze); ok && sctx.GetSessionVars().InRestrictedSQL {
1460+
sctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow
14111461
}
14121462

1413-
b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti)
1463+
b := newExecutorBuilder(ctx, sctx, a.InfoSchema, a.Ti)
14141464
e := b.build(a.Plan)
14151465
if b.err != nil {
14161466
return nil, errors.Trace(b.err)
14171467
}
14181468

14191469
failpoint.Inject("assertTxnManagerAfterBuildExecutor", func() {
14201470
sessiontxn.RecordAssert(a.Ctx, "assertTxnManagerAfterBuildExecutor", true)
1421-
sessiontxn.AssertTxnManagerInfoSchema(b.ctx, b.is)
1471+
sessiontxn.AssertTxnManagerInfoSchema(b.sctx, b.is)
14221472
})
14231473

14241474
// ExecuteExec is not a real Executor, we only use it to build another Executor from a prepared statement.
@@ -1428,7 +1478,7 @@ func (a *ExecStmt) buildExecutor() (exec.Executor, error) {
14281478
return nil, err
14291479
}
14301480
if executorExec.lowerPriority {
1431-
ctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow
1481+
sctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow
14321482
}
14331483
e = executorExec.stmtExec
14341484
}

0 commit comments

Comments
 (0)