Skip to content
This repository has been archived by the owner on Nov 24, 2023. It is now read-only.

Commit

Permalink
Merge branch 'master' into feature/kurisu-support-unencrypted-pw-in-t…
Browse files Browse the repository at this point in the history
…oml-200423
  • Loading branch information
Kuri-su authored Apr 28, 2020
2 parents 08f8830 + a03f3e3 commit f670199
Show file tree
Hide file tree
Showing 30 changed files with 682 additions and 98 deletions.
32 changes: 30 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,40 @@

All notable changes to this project will be documented in this file.

## [1.0.5] 2020-04-27

### Improvements

- Improve the incremental replication speed when the `UNIQUE KEY` column has the `NULL` value
- Add retry for the `Write conflict` (9007 and 8005) error returned by TiDB

### Bug fixes

- Fix the issue that the `Duplicate entry` error might occur during the full data import
- Fix the issue that the replication task cannot be stopped or paused when the full data import is completed and the upstream has no written data
- Fix the issue the monitoring metrics still display data after the replication task is stopped

### Action required

- When upgrading from a previous version, note that you must upgrade all DM components (dmctl/DM-master/DM-worker) together

### Detailed Bug Fixes and Changes

- Improve the incremental replication speed when the `UNIQUE KEY` column has the `NULL` value [#588](https://github.com/pingcap/dm/pull/588) [#597](https://github.com/pingcap/dm/pull/597)
- Add retry for the `Write conflict` (9007 and 8005) error returned by TiDB [#632](https://github.com/pingcap/dm/pull/632)
- Fix the issue that the `Duplicate entry` error might occur during the full data import [#554](https://github.com/pingcap/dm/pull/554)
- Fix the issue that the replication task cannot be stopped or paused when the full data import is completed and the upstream has no written data [#622](https://github.com/pingcap/dm/pull/622)
- Fix the issue the monitoring metrics still display data after the replication task is stopped [#616](https://github.com/pingcap/dm/pull/616)
- Fix the issue that the `Column count doesn't match value count` error might be returned during the sharding DDL replication [#624](https://github.com/pingcap/dm/pull/624)
- Fix the issue that some metrics such as `data file size` are incorrectly displayed when the paused task of full data import is resumed [#570](https://github.com/pingcap/dm/pull/570)
- Add and fix multiple monitoring metrics [#590](https://github.com/pingcap/dm/pull/590) [#594](https://github.com/pingcap/dm/pull/594)

## [1.0.4] 2020-03-13

### Improvements

- Add English UI for DM-portal
- Add the ` --more` parameter in the `query-status` command to show complete replication status information
- Add the `--more` parameter in the `query-status` command to show complete replication status information

### Bug fixes

Expand All @@ -23,7 +51,7 @@ All notable changes to this project will be documented in this file.
### Detailed Bug Fixes and Changes

- Add English UI for DM-portal [#480](https://github.com/pingcap/dm/pull/480)
- Add the ` --more` parameter in the `query-status` command to show complete replication status information [#533](https://github.com/pingcap/dm/pull/533)
- Add the `--more` parameter in the `query-status` command to show complete replication status information [#533](https://github.com/pingcap/dm/pull/533)
- Fix the issue that `resume-task` might fail to resume the replication task which is interrupted by the abnormal connection to the downstream TiDB server [#436](https://github.com/pingcap/dm/pull/436)
- Fix the issue that the online DDL operation cannot be properly replicated after a failed replication task is restarted because the online DDL meta information is cleared after the DDL operation failure [#465](https://github.com/pingcap/dm/pull/465)
- Fix the issue that `query-error` might cause the DM-worker to panic after `start-task` goes into error [#519](https://github.com/pingcap/dm/pull/519)
Expand Down
1 change: 1 addition & 0 deletions _utils/terror_gen/errors_release.txt
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,7 @@ ErrSyncerUnitExecWithNoBlockingDDL,[code=36059:class=sync-unit:scope=internal:le
ErrSyncerUnitGenBWList,[code=36060:class=sync-unit:scope=internal:level=high],"generate black white list"
ErrSyncerUnitHandleDDLFailed,[code=36061:class=sync-unit:scope=internal:level=high],"fail to handle ddl job for %s"
ErrSyncerShardDDLConflict,[code=36062:class=sync-unit:scope=internal:level=high],"fail to handle shard ddl %v in optimistic mode, because schema conflict detected"
ErrSyncerFailpoint,[code=36063:class=sync-unit:scope=internal:level=low],"failpoint specified error"
ErrMasterSQLOpNilRequest,[code=38001:class=dm-master:scope=internal:level=medium],"nil request not valid"
ErrMasterSQLOpNotSupport,[code=38002:class=dm-master:scope=internal:level=medium],"op %s not supported"
ErrMasterSQLOpWithoutSharding,[code=38003:class=dm-master:scope=internal:level=medium],"operate request without --sharding specified not valid"
Expand Down
4 changes: 4 additions & 0 deletions cmd/dm-master/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,15 @@ package main
// Reference: https://dzone.com/articles/measuring-integration-test-coverage-rate-in-pouchc

import (
"fmt"
"os"
"strings"
"testing"
"time"
)

func TestRunMain(t *testing.T) {
fmt.Println("dm-master startup", time.Now())
var args []string
for _, arg := range os.Args {
switch {
Expand All @@ -34,4 +37,5 @@ func TestRunMain(t *testing.T) {

os.Args = args
main()
fmt.Println("dm-master exit", time.Now())
}
4 changes: 4 additions & 0 deletions cmd/dm-syncer/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,15 @@ package main
// Reference: https://dzone.com/articles/measuring-integration-test-coverage-rate-in-pouchc

import (
"fmt"
"os"
"strings"
"testing"
"time"
)

func TestRunMain(t *testing.T) {
fmt.Println("dm-syncer startup", time.Now())
var args []string
for _, arg := range os.Args {
switch {
Expand All @@ -34,4 +37,5 @@ func TestRunMain(t *testing.T) {

os.Args = args
main()
fmt.Println("dm-syncer exit", time.Now())
}
4 changes: 4 additions & 0 deletions cmd/dm-tracer/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,15 @@ package main
// Reference: https://dzone.com/articles/measuring-integration-test-coverage-rate-in-pouchc

import (
"fmt"
"os"
"strings"
"testing"
"time"
)

func TestRunMain(t *testing.T) {
fmt.Println("dm-tracer startup", time.Now())
var args []string
for _, arg := range os.Args {
switch {
Expand All @@ -34,4 +37,5 @@ func TestRunMain(t *testing.T) {

os.Args = args
main()
fmt.Println("dm-tracer exit", time.Now())
}
4 changes: 4 additions & 0 deletions cmd/dm-worker/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package main
// Reference: https://dzone.com/articles/measuring-integration-test-coverage-rate-in-pouchc

import (
"fmt"
"os"
"strings"
"testing"
Expand All @@ -28,6 +29,7 @@ import (
)

func TestRunMain(t *testing.T) {
fmt.Println("dm-worker startup", time.Now())
var (
args []string
exit = make(chan int)
Expand Down Expand Up @@ -67,8 +69,10 @@ func TestRunMain(t *testing.T) {

select {
case <-waitCh:
fmt.Println("dm-worker exit", time.Now())
return
case <-exit:
fmt.Println("dm-worker exit", time.Now())
return
}
}
4 changes: 2 additions & 2 deletions dm/master/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ func (t *testScheduler) TestRestartScheduler(c *C) {
return len(bounds) == 1 && bounds[0] == sourceID1
}), IsTrue)
checkSourceBoundCh := func() {
time.Sleep(300 * time.Millisecond)
time.Sleep(time.Second)
c.Assert(sourceBoundCh, HasLen, 1)
sourceBound := <-sourceBoundCh
sourceBound.Revision = 0
Expand Down Expand Up @@ -857,7 +857,7 @@ func (t *testScheduler) TestWatchWorkerEventEtcdCompact(c *C) {
select {
case err := <-workerErrCh:
c.Assert(err, Equals, etcdErrCompacted)
case <-time.After(300 * time.Millisecond):
case <-time.After(time.Second):
c.Fatal("fail to get etcd error compacted")
}

Expand Down
7 changes: 3 additions & 4 deletions dm/master/shardddl/optimist.go
Original file line number Diff line number Diff line change
Expand Up @@ -421,12 +421,11 @@ func (o *Optimist) handleOperationPut(ctx context.Context, opCh <-chan optimism.
if lock == nil {
o.logger.Warn("no lock for the shard DDL lock operation exist", zap.Stringer("operation", op))
continue
} else if synced, _ := lock.IsSynced(); !synced {
// this should not happen in normal case.
o.logger.Warn("the lock for the shard DDL lock operation has not synced", zap.Stringer("operation", op))
continue
}

// in optimistic mode, we always try to mark a table as done after received the `done` status of the DDLs operation.
// NOTE: even all tables have done their previous DDLs operations, the lock may still not resolved,
/// because these tables may have different schemas.
done := lock.TryMarkDone(op.Source, op.UpSchema, op.UpTable)
o.logger.Info("mark operation for a table as done", zap.Bool("done", done), zap.Stringer("operation", op))
if !lock.IsResolved() {
Expand Down
Loading

0 comments on commit f670199

Please sign in to comment.