*: resort fields for reducing the memory allocated (#14172)

This commit is contained in:
Qiannan
2019-12-23 17:10:09 +08:00
committed by pingcap-github-bot
parent 558d9e2967
commit ef7adeb50d
4 changed files with 49 additions and 46 deletions

View File

@ -207,8 +207,6 @@ type IndexReaderExecutor struct {
table table.Table
index *model.IndexInfo
physicalTableID int64
keepOrder bool
desc bool
ranges []*ranger.Range
// kvRanges are only used for union scan.
kvRanges []kv.KeyRange
@ -221,8 +219,12 @@ type IndexReaderExecutor struct {
columns []*model.ColumnInfo
// outputColumns are only required by union scan.
outputColumns []*expression.Column
streaming bool
feedback *statistics.QueryFeedback
feedback *statistics.QueryFeedback
streaming bool
keepOrder bool
desc bool
corColInFilter bool
corColInAccess bool
@ -317,30 +319,22 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange)
type IndexLookUpExecutor struct {
baseExecutor
table table.Table
index *model.IndexInfo
keepOrder bool
desc bool
ranges []*ranger.Range
dagPB *tipb.DAGRequest
startTS uint64
table table.Table
index *model.IndexInfo
ranges []*ranger.Range
dagPB *tipb.DAGRequest
startTS uint64
// handleIdx is the index of handle, which is only used for case of keeping order.
handleIdx int
tableRequest *tipb.DAGRequest
// columns are only required by union scan.
columns []*model.ColumnInfo
indexStreaming bool
tableStreaming bool
columns []*model.ColumnInfo
*dataReaderBuilder
// All fields above are immutable.
idxWorkerWg sync.WaitGroup
tblWorkerWg sync.WaitGroup
finished chan struct{}
kvRanges []kv.KeyRange
workerStarted bool
resultCh chan *lookupTableTask
resultCurr *lookupTableTask
feedback *statistics.QueryFeedback
@ -351,11 +345,20 @@ type IndexLookUpExecutor struct {
// checkIndexValue is used to check the consistency of the index data.
*checkIndexValue
kvRanges []kv.KeyRange
workerStarted bool
keepOrder bool
desc bool
indexStreaming bool
tableStreaming bool
corColInIdxSide bool
idxPlans []plannercore.PhysicalPlan
corColInTblSide bool
tblPlans []plannercore.PhysicalPlan
corColInAccess bool
idxPlans []plannercore.PhysicalPlan
tblPlans []plannercore.PhysicalPlan
idxCols []*expression.Column
colLens []int
// PushedLimit is used to skip the preceding and tailing handles when Limit is sunk into IndexLookUpReader.

View File

@ -144,16 +144,16 @@ type StatementContext struct {
// StmtHints are SessionVars related sql hints.
type StmtHints struct {
// Hint Information
MemQuotaQuery int64
ReplicaRead byte
AllowInSubqToJoinAndAgg bool
NoIndexMergeHint bool
// Hint flags
HasAllowInSubqToJoinAndAggHint bool
HasMemQuotaHint bool
HasReplicaReadHint bool
// Hint Information
AllowInSubqToJoinAndAgg bool
NoIndexMergeHint bool
MemQuotaQuery int64
ReplicaRead byte
}
// GetNowTsCached getter for nowTs, if not set get now time and cache it

View File

@ -31,9 +31,9 @@ type IndexIterator interface {
// CreateIdxOpt contains the options will be used when creating an index.
type CreateIdxOpt struct {
Ctx context.Context
SkipHandleCheck bool // If true, skip the handle constraint check.
SkipCheck bool // If true, skip all the unique indices constraint check.
Ctx context.Context
Untouched bool // If true, the index key/value is no need to commit.
}

View File

@ -160,36 +160,36 @@ func (s *testJSONSuite) TestBinaryJSONModify(c *C) {
base string
setField string
setValue string
mt ModifyType
expected string
success bool
mt ModifyType
}{
{`null`, "$", `{}`, ModifySet, `{}`, true},
{`{}`, "$.a", `3`, ModifySet, `{"a": 3}`, true},
{`{"a": 3}`, "$.a", `[]`, ModifyReplace, `{"a": []}`, true},
{`{"a": 3}`, "$.b", `"3"`, ModifySet, `{"a": 3, "b": "3"}`, true},
{`{"a": []}`, "$.a[0]", `3`, ModifySet, `{"a": [3]}`, true},
{`{"a": [3]}`, "$.a[1]", `4`, ModifyInsert, `{"a": [3, 4]}`, true},
{`{"a": [3]}`, "$[0]", `4`, ModifySet, `4`, true},
{`{"a": [3]}`, "$[1]", `4`, ModifySet, `[{"a": [3]}, 4]`, true},
{`{"b": true}`, "$.b", `false`, ModifySet, `{"b": false}`, true},
{`null`, "$", `{}`, `{}`, true, ModifySet},
{`{}`, "$.a", `3`, `{"a": 3}`, true, ModifySet},
{`{"a": 3}`, "$.a", `[]`, `{"a": []}`, true, ModifyReplace},
{`{"a": 3}`, "$.b", `"3"`, `{"a": 3, "b": "3"}`, true, ModifySet},
{`{"a": []}`, "$.a[0]", `3`, `{"a": [3]}`, true, ModifySet},
{`{"a": [3]}`, "$.a[1]", `4`, `{"a": [3, 4]}`, true, ModifyInsert},
{`{"a": [3]}`, "$[0]", `4`, `4`, true, ModifySet},
{`{"a": [3]}`, "$[1]", `4`, `[{"a": [3]}, 4]`, true, ModifySet},
{`{"b": true}`, "$.b", `false`, `{"b": false}`, true, ModifySet},
// nothing changed because the path is empty and we want to insert.
{`{}`, "$", `1`, ModifyInsert, `{}`, true},
{`{}`, "$", `1`, `{}`, true, ModifyInsert},
// nothing changed because the path without last leg doesn't exist.
{`{"a": [3, 4]}`, "$.b[1]", `3`, ModifySet, `{"a": [3, 4]}`, true},
{`{"a": [3, 4]}`, "$.b[1]", `3`, `{"a": [3, 4]}`, true, ModifySet},
// nothing changed because the path without last leg doesn't exist.
{`{"a": [3, 4]}`, "$.a[2].b", `3`, ModifySet, `{"a": [3, 4]}`, true},
{`{"a": [3, 4]}`, "$.a[2].b", `3`, `{"a": [3, 4]}`, true, ModifySet},
// nothing changed because we want to insert but the full path exists.
{`{"a": [3, 4]}`, "$.a[0]", `30`, ModifyInsert, `{"a": [3, 4]}`, true},
{`{"a": [3, 4]}`, "$.a[0]", `30`, `{"a": [3, 4]}`, true, ModifyInsert},
// nothing changed because we want to replace but the full path doesn't exist.
{`{"a": [3, 4]}`, "$.a[2]", `30`, ModifyReplace, `{"a": [3, 4]}`, true},
{`{"a": [3, 4]}`, "$.a[2]", `30`, `{"a": [3, 4]}`, true, ModifyReplace},
// bad path expression.
{"null", "$.*", "{}", ModifySet, "null", false},
{"null", "$[*]", "{}", ModifySet, "null", false},
{"null", "$**.a", "{}", ModifySet, "null", false},
{"null", "$**[3]", "{}", ModifySet, "null", false},
{"null", "$.*", "{}", "null", false, ModifySet},
{"null", "$[*]", "{}", "null", false, ModifySet},
{"null", "$**.a", "{}", "null", false, ModifySet},
{"null", "$**[3]", "{}", "null", false, ModifySet},
}
for _, tt := range tests {
pathExpr, err := ParseJSONPathExpr(tt.setField)