723 lines
22 KiB
Go
723 lines
22 KiB
Go
// Copyright 2015 PingCAP, Inc.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package ddl
|
|
|
|
import (
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
"sync/atomic"
|
|
|
|
"github.com/pingcap/errors"
|
|
"github.com/pingcap/parser/model"
|
|
"github.com/pingcap/tidb/ddl/util"
|
|
"github.com/pingcap/tidb/infoschema"
|
|
"github.com/pingcap/tidb/kv"
|
|
"github.com/pingcap/tidb/meta"
|
|
"github.com/pingcap/tidb/meta/autoid"
|
|
"github.com/pingcap/tidb/table"
|
|
"github.com/pingcap/tidb/tablecodec"
|
|
"github.com/pingcap/tidb/util/gcutil"
|
|
log "github.com/sirupsen/logrus"
|
|
)
|
|
|
|
func onCreateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
// gofail: var mockExceedErrorLimit bool
|
|
// if mockExceedErrorLimit {
|
|
// return ver, errors.New("mock do job error")
|
|
// }
|
|
|
|
schemaID := job.SchemaID
|
|
tbInfo := &model.TableInfo{}
|
|
if err := job.DecodeArgs(tbInfo); err != nil {
|
|
// Invalid arguments, cancel this job.
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tbInfo.State = model.StateNone
|
|
err := checkTableNotExists(t, job, schemaID, tbInfo.Name.L)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
ver, err = updateSchemaVersion(t, job)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
switch tbInfo.State {
|
|
case model.StateNone:
|
|
// none -> public
|
|
tbInfo.State = model.StatePublic
|
|
tbInfo.UpdateTS = t.StartTS
|
|
err = t.CreateTableOrView(schemaID, tbInfo)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
if atomic.LoadUint32(&EnableSplitTableRegion) != 0 {
|
|
// TODO: Add restrictions to this operation.
|
|
go splitTableRegion(d.store, tbInfo.ID)
|
|
}
|
|
// Finish this job.
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbInfo)
|
|
asyncNotifyEvent(d, &util.Event{Tp: model.ActionCreateTable, TableInfo: tbInfo})
|
|
return ver, nil
|
|
default:
|
|
return ver, ErrInvalidTableState.GenWithStack("invalid table state %v", tbInfo.State)
|
|
}
|
|
}
|
|
|
|
func onCreateView(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
schemaID := job.SchemaID
|
|
tbInfo := &model.TableInfo{}
|
|
var orReplace bool
|
|
var oldTbInfoID int64
|
|
if err := job.DecodeArgs(tbInfo, &orReplace, &oldTbInfoID); err != nil {
|
|
// Invalid arguments, cancel this job.
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
tbInfo.State = model.StateNone
|
|
err := checkTableNotExists(t, job, schemaID, tbInfo.Name.L)
|
|
if err != nil {
|
|
if infoschema.ErrDatabaseNotExists.Equal(err) || !orReplace {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
}
|
|
ver, err = updateSchemaVersion(t, job)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
switch tbInfo.State {
|
|
case model.StateNone:
|
|
// none -> public
|
|
tbInfo.State = model.StatePublic
|
|
tbInfo.UpdateTS = t.StartTS
|
|
if oldTbInfoID > 0 && orReplace {
|
|
err = t.DropTableOrView(schemaID, oldTbInfoID, true)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
}
|
|
err = t.CreateTableOrView(schemaID, tbInfo)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// Finish this job.
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbInfo)
|
|
asyncNotifyEvent(d, &util.Event{Tp: model.ActionCreateView, TableInfo: tbInfo})
|
|
return ver, nil
|
|
default:
|
|
return ver, ErrInvalidTableState.GenWithStack("invalid view state %v", tbInfo.State)
|
|
}
|
|
}
|
|
|
|
func onDropTableOrView(t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, job.SchemaID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
originalState := job.SchemaState
|
|
switch tblInfo.State {
|
|
case model.StatePublic:
|
|
// public -> write only
|
|
job.SchemaState = model.StateWriteOnly
|
|
tblInfo.State = model.StateWriteOnly
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != tblInfo.State)
|
|
case model.StateWriteOnly:
|
|
// write only -> delete only
|
|
job.SchemaState = model.StateDeleteOnly
|
|
tblInfo.State = model.StateDeleteOnly
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != tblInfo.State)
|
|
case model.StateDeleteOnly:
|
|
tblInfo.State = model.StateNone
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != tblInfo.State)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
if err = t.DropTableOrView(job.SchemaID, job.TableID, true); err != nil {
|
|
break
|
|
}
|
|
// Finish this job.
|
|
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
|
|
startKey := tablecodec.EncodeTablePrefix(job.TableID)
|
|
job.Args = append(job.Args, startKey, getPartitionIDs(tblInfo))
|
|
default:
|
|
err = ErrInvalidTableState.GenWithStack("invalid table state %v", tblInfo.State)
|
|
}
|
|
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
const (
|
|
restoreTableCheckFlagNone int64 = iota
|
|
restoreTableCheckFlagEnableGC
|
|
restoreTableCheckFlagDisableGC
|
|
)
|
|
|
|
func (w *worker) onRestoreTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) {
|
|
schemaID := job.SchemaID
|
|
tblInfo := &model.TableInfo{}
|
|
var autoID, dropJobID, restoreTableCheckFlag int64
|
|
var snapshotTS uint64
|
|
if err = job.DecodeArgs(tblInfo, &autoID, &dropJobID, &snapshotTS, &restoreTableCheckFlag); err != nil {
|
|
// Invalid arguments, cancel this job.
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
// check GC and safe point
|
|
gcEnable, err := checkGCEnable(w)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
err = checkTableNotExists(t, job, schemaID, tblInfo.Name.L)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
// Restore table divide into 2 steps:
|
|
// 1. Check GC enable status, to decided whether enable GC after restore table.
|
|
// a. Why not disable GC before put the job to DDL job queue?
|
|
// Think about concurrency problem. If a restore job-1 is doing and already disabled GC,
|
|
// then, another restore table job-2 check GC enable will get disable before into the job queue.
|
|
// then, after restore table job-2 finished, the GC will be disabled.
|
|
// b. Why split into 2 steps? 1 step also can finish this job: check GC -> disable GC -> restore table -> finish job.
|
|
// What if the transaction commit failed? then, the job will retry, but the GC already disabled when first running.
|
|
// So, after this job retry succeed, the GC will be disabled.
|
|
// 2. Do restore table job.
|
|
// a. Check whether GC enabled, if enabled, disable GC first.
|
|
// b. Check GC safe point. If drop table time if after safe point time, then can do restore.
|
|
// otherwise, can't restore table, because the records of the table may already delete by gc.
|
|
// c. Remove GC task of the table from gc_delete_range table.
|
|
// d. Create table and rebase table auto ID.
|
|
// e. Finish.
|
|
switch tblInfo.State {
|
|
case model.StateNone:
|
|
// none -> write only
|
|
// check GC enable and update flag.
|
|
if gcEnable {
|
|
job.Args[len(job.Args)-1] = restoreTableCheckFlagEnableGC
|
|
} else {
|
|
job.Args[len(job.Args)-1] = restoreTableCheckFlagDisableGC
|
|
}
|
|
|
|
job.SchemaState = model.StateWriteOnly
|
|
tblInfo.State = model.StateWriteOnly
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, false)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
case model.StateWriteOnly:
|
|
// write only -> public
|
|
// do restore table.
|
|
if gcEnable {
|
|
err = disableGC(w)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Errorf("disable gc failed, try again later. err: %v", err)
|
|
}
|
|
}
|
|
// check GC safe point
|
|
err = checkSafePoint(w, snapshotTS)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// Remove dropped table DDL job from gc_delete_range table.
|
|
err = w.delRangeManager.removeFromGCDeleteRange(dropJobID, tblInfo.ID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo.State = model.StatePublic
|
|
tblInfo.UpdateTS = t.StartTS
|
|
err = t.CreateTableAndSetAutoID(schemaID, tblInfo, autoID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
// gofail: var mockRestoreTableCommitErr bool
|
|
// if mockRestoreTableCommitErr && mockRestoreTableCommitErrOnce {
|
|
// mockRestoreTableCommitErrOnce = false
|
|
// kv.MockCommitErrorEnable()
|
|
// }
|
|
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
// Finish this job.
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
default:
|
|
return ver, ErrInvalidTableState.GenWithStack("invalid restore table state %v", tblInfo.State)
|
|
}
|
|
return ver, nil
|
|
}
|
|
|
|
// mockRestoreTableCommitErrOnce uses to make sure `mockRestoreTableCommitErr` only mock error once.
|
|
var mockRestoreTableCommitErrOnce = true
|
|
|
|
func enableGC(w *worker) error {
|
|
ctx, err := w.sessPool.get()
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
defer w.sessPool.put(ctx)
|
|
|
|
return gcutil.EnableGC(ctx)
|
|
}
|
|
|
|
func disableGC(w *worker) error {
|
|
ctx, err := w.sessPool.get()
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
defer w.sessPool.put(ctx)
|
|
|
|
return gcutil.DisableGC(ctx)
|
|
}
|
|
|
|
func checkGCEnable(w *worker) (enable bool, err error) {
|
|
ctx, err := w.sessPool.get()
|
|
if err != nil {
|
|
return false, errors.Trace(err)
|
|
}
|
|
defer w.sessPool.put(ctx)
|
|
|
|
return gcutil.CheckGCEnable(ctx)
|
|
}
|
|
|
|
func checkSafePoint(w *worker, snapshotTS uint64) error {
|
|
ctx, err := w.sessPool.get()
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
defer w.sessPool.put(ctx)
|
|
|
|
return gcutil.ValidateSnapshot(ctx, snapshotTS)
|
|
}
|
|
|
|
type splitableStore interface {
|
|
SplitRegion(splitKey kv.Key) error
|
|
}
|
|
|
|
func splitTableRegion(store kv.Storage, tableID int64) {
|
|
s, ok := store.(splitableStore)
|
|
if !ok {
|
|
return
|
|
}
|
|
tableStartKey := tablecodec.GenTablePrefix(tableID)
|
|
if err := s.SplitRegion(tableStartKey); err != nil {
|
|
// It will be automatically split by TiKV later.
|
|
log.Warnf("[ddl] splitting table region failed %v", errors.ErrorStack(err))
|
|
}
|
|
}
|
|
|
|
func getTable(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) (table.Table, error) {
|
|
alloc := autoid.NewAllocator(store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned())
|
|
tbl, err := table.TableFromMeta(alloc, tblInfo)
|
|
return tbl, errors.Trace(err)
|
|
}
|
|
|
|
func getTableInfoAndCancelFaultJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error) {
|
|
tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, schemaID)
|
|
if err != nil {
|
|
return nil, errors.Trace(err)
|
|
}
|
|
|
|
if tblInfo.State != model.StatePublic {
|
|
job.State = model.JobStateCancelled
|
|
return nil, ErrInvalidTableState.GenWithStack("table %s is not in public, but %s", tblInfo.Name, tblInfo.State)
|
|
}
|
|
|
|
return tblInfo, nil
|
|
}
|
|
|
|
func checkTableExistAndCancelNonExistJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error) {
|
|
tableID := job.TableID
|
|
// Check this table's database.
|
|
tblInfo, err := t.GetTable(schemaID, tableID)
|
|
if err != nil {
|
|
if meta.ErrDBNotExists.Equal(err) {
|
|
job.State = model.JobStateCancelled
|
|
return nil, errors.Trace(infoschema.ErrDatabaseNotExists.GenWithStackByArgs(
|
|
fmt.Sprintf("(Schema ID %d)", schemaID),
|
|
))
|
|
}
|
|
return nil, errors.Trace(err)
|
|
}
|
|
|
|
// Check the table.
|
|
if tblInfo == nil {
|
|
job.State = model.JobStateCancelled
|
|
return nil, errors.Trace(infoschema.ErrTableNotExists.GenWithStackByArgs(
|
|
fmt.Sprintf("(Schema ID %d)", schemaID),
|
|
fmt.Sprintf("(Table ID %d)", tableID),
|
|
))
|
|
}
|
|
return tblInfo, nil
|
|
}
|
|
|
|
// onTruncateTable delete old table meta, and creates a new table identical to old table except for table ID.
|
|
// As all the old data is encoded with old table ID, it can not be accessed any more.
|
|
// A background job will be created to delete old data.
|
|
func onTruncateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
schemaID := job.SchemaID
|
|
tableID := job.TableID
|
|
var newTableID int64
|
|
err := job.DecodeArgs(&newTableID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
err = t.DropTableOrView(schemaID, tblInfo.ID, true)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// gofail: var truncateTableErr bool
|
|
// if truncateTableErr {
|
|
// job.State = model.JobStateCancelled
|
|
// return ver, errors.New("occur an error after dropping table.")
|
|
// }
|
|
|
|
var oldPartitionIDs []int64
|
|
if tblInfo.GetPartitionInfo() != nil {
|
|
oldPartitionIDs = getPartitionIDs(tblInfo)
|
|
// We use the new partition ID because all the old data is encoded with the old partition ID, it can not be accessed anymore.
|
|
err = truncateTableByReassignPartitionIDs(t, tblInfo)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
}
|
|
|
|
tblInfo.ID = newTableID
|
|
err = t.CreateTableOrView(schemaID, tblInfo)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
ver, err = updateSchemaVersion(t, job)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
asyncNotifyEvent(d, &util.Event{Tp: model.ActionTruncateTable, TableInfo: tblInfo})
|
|
startKey := tablecodec.EncodeTablePrefix(tableID)
|
|
job.Args = []interface{}{startKey, oldPartitionIDs}
|
|
return ver, nil
|
|
}
|
|
|
|
func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
schemaID := job.SchemaID
|
|
var newBase int64
|
|
err := job.DecodeArgs(&newBase)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
tblInfo.AutoIncID = newBase
|
|
tbl, err := getTable(store, schemaID, tblInfo)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// The operation of the minus 1 to make sure that the current value doesn't be used,
|
|
// the next Alloc operation will get this value.
|
|
// Its behavior is consistent with MySQL.
|
|
err = tbl.RebaseAutoID(nil, tblInfo.AutoIncID-1, false)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
return ver, nil
|
|
}
|
|
|
|
func onShardRowID(t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
var shardRowIDBits uint64
|
|
err := job.DecodeArgs(&shardRowIDBits)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
tblInfo.ShardRowIDBits = shardRowIDBits
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
return ver, nil
|
|
}
|
|
|
|
func onRenameTable(t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
var oldSchemaID int64
|
|
var tableName model.CIStr
|
|
if err := job.DecodeArgs(&oldSchemaID, &tableName); err != nil {
|
|
// Invalid arguments, cancel this job.
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, oldSchemaID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
newSchemaID := job.SchemaID
|
|
err = checkTableNotExists(t, job, newSchemaID, tableName.L)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
var baseID int64
|
|
shouldDelAutoID := false
|
|
if newSchemaID != oldSchemaID {
|
|
shouldDelAutoID = true
|
|
baseID, err = t.GetAutoTableID(tblInfo.GetDBID(oldSchemaID), tblInfo.ID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// It's compatible with old version.
|
|
// TODO: Remove it.
|
|
tblInfo.OldSchemaID = 0
|
|
}
|
|
|
|
err = t.DropTableOrView(oldSchemaID, tblInfo.ID, shouldDelAutoID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// gofail: var renameTableErr bool
|
|
// if renameTableErr {
|
|
// job.State = model.JobStateCancelled
|
|
// return ver, errors.New("occur an error after renaming table.")
|
|
// }
|
|
tblInfo.Name = tableName
|
|
err = t.CreateTableOrView(newSchemaID, tblInfo)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// Update the table's auto-increment ID.
|
|
if newSchemaID != oldSchemaID {
|
|
_, err = t.GenAutoTableID(newSchemaID, tblInfo.ID, baseID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
}
|
|
|
|
ver, err = updateSchemaVersion(t, job)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
return ver, nil
|
|
}
|
|
|
|
func onModifyTableComment(t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
var comment string
|
|
if err := job.DecodeArgs(&comment); err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo.Comment = comment
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
return ver, nil
|
|
}
|
|
|
|
func onModifyTableCharsetAndCollate(t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
var toCharset, toCollate string
|
|
if err := job.DecodeArgs(&toCharset, &toCollate); err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo.Charset = toCharset
|
|
tblInfo.Collate = toCollate
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
return ver, nil
|
|
}
|
|
|
|
func checkTableNotExists(t *meta.Meta, job *model.Job, schemaID int64, tableName string) error {
|
|
// Check this table's database.
|
|
tables, err := t.ListTables(schemaID)
|
|
if err != nil {
|
|
if meta.ErrDBNotExists.Equal(err) {
|
|
return infoschema.ErrDatabaseNotExists.GenWithStackByArgs("")
|
|
}
|
|
return errors.Trace(err)
|
|
}
|
|
|
|
// Check the table.
|
|
for _, tbl := range tables {
|
|
if tbl.Name.L == tableName {
|
|
return infoschema.ErrTableExists.GenWithStackByArgs(tbl.Name)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// updateVersionAndTableInfo updates the schema version and the table information.
|
|
func updateVersionAndTableInfo(t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, shouldUpdateVer bool) (
|
|
ver int64, err error) {
|
|
if shouldUpdateVer {
|
|
ver, err = updateSchemaVersion(t, job)
|
|
if err != nil {
|
|
return 0, errors.Trace(err)
|
|
}
|
|
}
|
|
|
|
if tblInfo.State == model.StatePublic {
|
|
tblInfo.UpdateTS = t.StartTS
|
|
}
|
|
return ver, t.UpdateTable(job.SchemaID, tblInfo)
|
|
}
|
|
|
|
// TODO: It may have the issue when two clients concurrently add partitions to a table.
|
|
func onAddTablePartition(t *meta.Meta, job *model.Job) (ver int64, _ error) {
|
|
partInfo := &model.PartitionInfo{}
|
|
err := job.DecodeArgs(&partInfo)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
err = checkAddPartitionTooManyPartitions(uint64(len(tblInfo.Partition.Definitions) + len(partInfo.Definitions)))
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
err = checkAddPartitionValue(tblInfo, partInfo)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
err = checkPartitionNameUnique(tblInfo, partInfo)
|
|
if err != nil {
|
|
job.State = model.JobStateCancelled
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
updatePartitionInfo(partInfo, tblInfo)
|
|
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
|
|
if err != nil {
|
|
return ver, errors.Trace(err)
|
|
}
|
|
// Finish this job.
|
|
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
|
|
return ver, errors.Trace(err)
|
|
}
|
|
|
|
func updatePartitionInfo(partitionInfo *model.PartitionInfo, tblInfo *model.TableInfo) {
|
|
parInfo := &model.PartitionInfo{}
|
|
oldDefs, newDefs := tblInfo.Partition.Definitions, partitionInfo.Definitions
|
|
parInfo.Definitions = make([]model.PartitionDefinition, 0, len(newDefs)+len(oldDefs))
|
|
parInfo.Definitions = append(parInfo.Definitions, oldDefs...)
|
|
parInfo.Definitions = append(parInfo.Definitions, newDefs...)
|
|
tblInfo.Partition.Definitions = parInfo.Definitions
|
|
}
|
|
|
|
// checkAddPartitionValue values less than value must be strictly increasing for each partition.
|
|
func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) error {
|
|
if meta.Partition.Type == model.PartitionTypeRange && len(meta.Partition.Columns) == 0 {
|
|
newDefs, oldDefs := part.Definitions, meta.Partition.Definitions
|
|
rangeValue := oldDefs[len(oldDefs)-1].LessThan[0]
|
|
if strings.EqualFold(rangeValue, "MAXVALUE") {
|
|
return errors.Trace(ErrPartitionMaxvalue)
|
|
}
|
|
|
|
currentRangeValue, err := strconv.Atoi(rangeValue)
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
|
|
for i := 0; i < len(newDefs); i++ {
|
|
ifMaxvalue := strings.EqualFold(newDefs[i].LessThan[0], "MAXVALUE")
|
|
if ifMaxvalue && i == len(newDefs)-1 {
|
|
return nil
|
|
} else if ifMaxvalue && i != len(newDefs)-1 {
|
|
return errors.Trace(ErrPartitionMaxvalue)
|
|
}
|
|
|
|
nextRangeValue, err := strconv.Atoi(newDefs[i].LessThan[0])
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
if nextRangeValue <= currentRangeValue {
|
|
return errors.Trace(ErrRangeNotIncreasing)
|
|
}
|
|
currentRangeValue = nextRangeValue
|
|
}
|
|
}
|
|
return nil
|
|
}
|