diff --git a/.github/workflows/replica-tests.yml b/.github/workflows/replica-tests.yml index 957d7a176..5a9bfb7f7 100644 --- a/.github/workflows/replica-tests.yml +++ b/.github/workflows/replica-tests.yml @@ -28,6 +28,20 @@ jobs: - name: Run tests run: script/docker-gh-ost-replica-tests run + - name: Set artifact name + if: failure() + run: | + ARTIFACT_NAME=$(echo "${{ matrix.image }}" | tr '/:' '-') + echo "ARTIFACT_NAME=test-logs-${ARTIFACT_NAME}" >> $GITHUB_ENV + + - name: Upload test logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: /tmp/gh-ost-test.* + retention-days: 7 + - name: Teardown environment if: always() run: script/docker-gh-ost-replica-tests down diff --git a/go/cmd/gh-ost/main.go b/go/cmd/gh-ost/main.go index fae519680..567137fd5 100644 --- a/go/cmd/gh-ost/main.go +++ b/go/cmd/gh-ost/main.go @@ -316,6 +316,9 @@ func main() { if migrationContext.CheckpointIntervalSeconds < 10 { migrationContext.Log.Fatalf("--checkpoint-seconds should be >=10") } + if migrationContext.CountTableRows && migrationContext.PanicOnWarnings { + migrationContext.Log.Warning("--exact-rowcount with --panic-on-warnings: row counts cannot be exact due to warning detection") + } switch *cutOver { case "atomic", "default", "": diff --git a/go/logic/applier.go b/go/logic/applier.go index 58761d844..bd04641a0 100644 --- a/go/logic/applier.go +++ b/go/logic/applier.go @@ -94,6 +94,21 @@ func NewApplier(migrationContext *base.MigrationContext) *Applier { } } +// compileMigrationKeyWarningRegex compiles a regex pattern that matches duplicate key warnings +// for the migration's unique key. Duplicate warnings are formatted differently across MySQL versions, +// hence the optional table name prefix. Metacharacters in table/index names are escaped to avoid +// regex syntax errors. +func (this *Applier) compileMigrationKeyWarningRegex() (*regexp.Regexp, error) { + escapedTable := regexp.QuoteMeta(this.migrationContext.GetGhostTableName()) + escapedKey := regexp.QuoteMeta(this.migrationContext.UniqueKey.NameInGhostTable) + migrationUniqueKeyPattern := fmt.Sprintf(`for key '(%s\.)?%s'`, escapedTable, escapedKey) + migrationKeyRegex, err := regexp.Compile(migrationUniqueKeyPattern) + if err != nil { + return nil, fmt.Errorf("failed to compile migration key pattern: %w", err) + } + return migrationKeyRegex, nil +} + func (this *Applier) InitDBConnections() (err error) { applierUri := this.connectionConfig.GetDBUri(this.migrationContext.DatabaseName) uriWithMulti := fmt.Sprintf("%s&multiStatements=true", applierUri) @@ -917,6 +932,12 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected return nil, err } + // Compile regex once before loop to avoid performance penalty and handle errors properly + migrationKeyRegex, err := this.compileMigrationKeyWarningRegex() + if err != nil { + return nil, err + } + var sqlWarnings []string for rows.Next() { var level, message string @@ -925,10 +946,7 @@ func (this *Applier) ApplyIterationInsertQuery() (chunkSize int64, rowsAffected this.migrationContext.Log.Warningf("Failed to read SHOW WARNINGS row") continue } - // Duplicate warnings are formatted differently across mysql versions, hence the optional table name prefix - migrationUniqueKeyExpression := fmt.Sprintf("for key '(%s\\.)?%s'", this.migrationContext.GetGhostTableName(), this.migrationContext.UniqueKey.NameInGhostTable) - matched, _ := regexp.MatchString(migrationUniqueKeyExpression, message) - if strings.Contains(message, "Duplicate entry") && matched { + if strings.Contains(message, "Duplicate entry") && migrationKeyRegex.MatchString(message) { continue } sqlWarnings = append(sqlWarnings, fmt.Sprintf("%s: %s (%d)", level, message, code)) @@ -1468,6 +1486,107 @@ func (this *Applier) buildDMLEventQuery(dmlEvent *binlog.BinlogDMLEvent) []*dmlB return []*dmlBuildResult{newDmlBuildResultError(fmt.Errorf("Unknown dml event type: %+v", dmlEvent.DML))} } +// executeBatchWithWarningChecking executes a batch of DML statements with SHOW WARNINGS +// interleaved after each statement to detect warnings from any statement in the batch. +// This is used when PanicOnWarnings is enabled to ensure warnings from middle statements +// are not lost (SHOW WARNINGS only shows warnings from the last statement in a multi-statement batch). +func (this *Applier) executeBatchWithWarningChecking(ctx context.Context, tx *gosql.Tx, buildResults []*dmlBuildResult) (int64, error) { + // Build query with interleaved SHOW WARNINGS: stmt1; SHOW WARNINGS; stmt2; SHOW WARNINGS; ... + var queryBuilder strings.Builder + args := make([]interface{}, 0) + + for _, buildResult := range buildResults { + queryBuilder.WriteString(buildResult.query) + queryBuilder.WriteString(";\nSHOW WARNINGS;\n") + args = append(args, buildResult.args...) + } + + query := queryBuilder.String() + + // Execute the multi-statement query + rows, err := tx.QueryContext(ctx, query, args...) + if err != nil { + return 0, fmt.Errorf("%w; query=%s; args=%+v", err, query, args) + } + defer rows.Close() + + var totalDelta int64 + + // QueryContext with multi-statement queries returns rows positioned at the first result set + // that produces rows (i.e., the first SHOW WARNINGS), automatically skipping DML results. + // Verify we're at a SHOW WARNINGS result set (should have 3 columns: Level, Code, Message) + cols, err := rows.Columns() + if err != nil { + return 0, fmt.Errorf("failed to get columns: %w", err) + } + + // If somehow we're not at a result set with columns, try to advance + if len(cols) == 0 { + if !rows.NextResultSet() { + return 0, fmt.Errorf("expected SHOW WARNINGS result set after first statement") + } + } + + // Compile regex once before loop to avoid performance penalty and handle errors properly + migrationKeyRegex, err := this.compileMigrationKeyWarningRegex() + if err != nil { + return 0, err + } + + // Iterate through SHOW WARNINGS result sets. + // DML statements don't create navigable result sets, so we move directly between SHOW WARNINGS. + // Pattern: [at SHOW WARNINGS #1] -> read warnings -> NextResultSet() -> [at SHOW WARNINGS #2] -> ... + for i := 0; i < len(buildResults); i++ { + // We can't get exact rows affected with QueryContext (needed for reading SHOW WARNINGS). + // Use the theoretical delta (+1 for INSERT, -1 for DELETE, 0 for UPDATE) as an approximation. + // This may be inaccurate (e.g., INSERT IGNORE with duplicate affects 0 rows but we count +1). + totalDelta += buildResults[i].rowsDelta + + // Read warnings from this statement's SHOW WARNINGS result set + var sqlWarnings []string + for rows.Next() { + var level, message string + var code int + if err := rows.Scan(&level, &code, &message); err != nil { + // Scan failure means we cannot reliably read warnings. + // Since PanicOnWarnings is a safety feature, we must fail hard rather than silently skip. + return 0, fmt.Errorf("failed to scan SHOW WARNINGS for statement %d: %w", i+1, err) + } + + if strings.Contains(message, "Duplicate entry") && migrationKeyRegex.MatchString(message) { + // Duplicate entry on migration unique key is expected during binlog replay + // (row was already copied during bulk copy phase) + continue + } + sqlWarnings = append(sqlWarnings, fmt.Sprintf("%s: %s (%d)", level, message, code)) + } + + // Check for errors that occurred while iterating through warnings + if err := rows.Err(); err != nil { + return 0, fmt.Errorf("error reading SHOW WARNINGS result set for statement %d: %w", i+1, err) + } + + if len(sqlWarnings) > 0 { + return 0, fmt.Errorf("warnings detected in statement %d of %d: %v", i+1, len(buildResults), sqlWarnings) + } + + // Move to the next statement's SHOW WARNINGS result set + // For the last statement, there's no next result set + // DML statements don't create result sets, so we only need one NextResultSet call + // to move from SHOW WARNINGS #N to SHOW WARNINGS #(N+1) + if i < len(buildResults)-1 { + if !rows.NextResultSet() { + if err := rows.Err(); err != nil { + return 0, fmt.Errorf("error moving to SHOW WARNINGS for statement %d: %w", i+2, err) + } + return 0, fmt.Errorf("expected SHOW WARNINGS result set for statement %d", i+2) + } + } + } + + return totalDelta, nil +} + // ApplyDMLEventQueries applies multiple DML queries onto the _ghost_ table func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) error { var totalDelta int64 @@ -1507,79 +1626,52 @@ func (this *Applier) ApplyDMLEventQueries(dmlEvents [](*binlog.BinlogDMLEvent)) } } - // We batch together the DML queries into multi-statements to minimize network trips. - // We have to use the raw driver connection to access the rows affected - // for each statement in the multi-statement. - execErr := conn.Raw(func(driverConn any) error { - ex := driverConn.(driver.ExecerContext) - nvc := driverConn.(driver.NamedValueChecker) - - multiArgs := make([]driver.NamedValue, 0, nArgs) - multiQueryBuilder := strings.Builder{} - for _, buildResult := range buildResults { - for _, arg := range buildResult.args { - nv := driver.NamedValue{Value: driver.Value(arg)} - nvc.CheckNamedValue(&nv) - multiArgs = append(multiArgs, nv) - } - - multiQueryBuilder.WriteString(buildResult.query) - multiQueryBuilder.WriteString(";\n") - } - - res, err := ex.ExecContext(ctx, multiQueryBuilder.String(), multiArgs) - if err != nil { - err = fmt.Errorf("%w; query=%s; args=%+v", err, multiQueryBuilder.String(), multiArgs) - return err - } - - mysqlRes := res.(drivermysql.Result) - - // each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1). - // multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event - for i, rowsAffected := range mysqlRes.AllRowsAffected() { - totalDelta += buildResults[i].rowsDelta * rowsAffected - } - return nil - }) - - if execErr != nil { - return rollback(execErr) - } - - // Check for warnings when PanicOnWarnings is enabled + // When PanicOnWarnings is enabled, we need to check warnings after each statement + // in the batch. SHOW WARNINGS only shows warnings from the last statement in a + // multi-statement query, so we interleave SHOW WARNINGS after each DML statement. if this.migrationContext.PanicOnWarnings { - //nolint:execinquery - rows, err := tx.Query("SHOW WARNINGS") + totalDelta, err = this.executeBatchWithWarningChecking(ctx, tx, buildResults) if err != nil { return rollback(err) } - defer rows.Close() - if err = rows.Err(); err != nil { - return rollback(err) - } + } else { + // Fast path: batch together DML queries into multi-statements to minimize network trips. + // We use the raw driver connection to access the rows affected for each statement. + execErr := conn.Raw(func(driverConn any) error { + ex := driverConn.(driver.ExecerContext) + nvc := driverConn.(driver.NamedValueChecker) + + multiArgs := make([]driver.NamedValue, 0, nArgs) + multiQueryBuilder := strings.Builder{} + for _, buildResult := range buildResults { + for _, arg := range buildResult.args { + nv := driver.NamedValue{Value: driver.Value(arg)} + nvc.CheckNamedValue(&nv) + multiArgs = append(multiArgs, nv) + } + + multiQueryBuilder.WriteString(buildResult.query) + multiQueryBuilder.WriteString(";\n") + } - var sqlWarnings []string - for rows.Next() { - var level, message string - var code int - if err := rows.Scan(&level, &code, &message); err != nil { - this.migrationContext.Log.Warningf("Failed to read SHOW WARNINGS row") - continue + res, err := ex.ExecContext(ctx, multiQueryBuilder.String(), multiArgs) + if err != nil { + err = fmt.Errorf("%w; query=%s; args=%+v", err, multiQueryBuilder.String(), multiArgs) + return err } - // Duplicate warnings are formatted differently across mysql versions, hence the optional table name prefix - migrationUniqueKeyExpression := fmt.Sprintf("for key '(%s\\.)?%s'", this.migrationContext.GetGhostTableName(), this.migrationContext.UniqueKey.NameInGhostTable) - matched, _ := regexp.MatchString(migrationUniqueKeyExpression, message) - if strings.Contains(message, "Duplicate entry") && matched { - // Duplicate entry on migration unique key is expected during binlog replay - // (row was already copied during bulk copy phase) - continue + + mysqlRes := res.(drivermysql.Result) + + // each DML is either a single insert (delta +1), update (delta +0) or delete (delta -1). + // multiplying by the rows actually affected (either 0 or 1) will give an accurate row delta for this DML event + for i, rowsAffected := range mysqlRes.AllRowsAffected() { + totalDelta += buildResults[i].rowsDelta * rowsAffected } - sqlWarnings = append(sqlWarnings, fmt.Sprintf("%s: %s (%d)", level, message, code)) - } - if len(sqlWarnings) > 0 { - warningMsg := fmt.Sprintf("Warnings detected during DML event application: %v", sqlWarnings) - return rollback(errors.New(warningMsg)) + return nil + }) + + if execErr != nil { + return rollback(execErr) } } diff --git a/go/logic/applier_test.go b/go/logic/applier_test.go index 9c761373a..1656dd141 100644 --- a/go/logic/applier_test.go +++ b/go/logic/applier_test.go @@ -23,7 +23,6 @@ import ( "github.com/github/gh-ost/go/binlog" "github.com/github/gh-ost/go/mysql" "github.com/github/gh-ost/go/sql" - "github.com/testcontainers/testcontainers-go/wait" ) func TestApplierGenerateSqlModeQuery(t *testing.T) { @@ -213,7 +212,6 @@ func (suite *ApplierTestSuite) SetupSuite() { testmysql.WithDatabase(testMysqlDatabase), testmysql.WithUsername(testMysqlUser), testmysql.WithPassword(testMysqlPass), - testcontainers.WithWaitStrategy(wait.ForExposedPort()), testmysql.WithConfigFile("my.cnf.test"), ) suite.Require().NoError(err) @@ -782,23 +780,131 @@ func (suite *ApplierTestSuite) TestPanicOnWarningsWithDuplicateKeyOnNonMigration suite.Require().Error(err) suite.Require().Contains(err.Error(), "Duplicate entry") - // Verify that the ghost table still has only 3 rows (no data loss) - rows, err := suite.db.Query("SELECT * FROM " + getTestGhostTableName() + " ORDER BY id") + // Verify that the ghost table still has only the original 3 rows with correct data (no data loss) + rows, err := suite.db.Query("SELECT id, email FROM " + getTestGhostTableName() + " ORDER BY id") suite.Require().NoError(err) defer rows.Close() - var count int + var results []struct { + id int + email string + } for rows.Next() { var id int var email string err = rows.Scan(&id, &email) suite.Require().NoError(err) - count += 1 + results = append(results, struct { + id int + email string + }{id, email}) + } + suite.Require().NoError(rows.Err()) + + // All 3 original rows should still be present with correct data + suite.Require().Len(results, 3) + suite.Require().Equal(1, results[0].id) + suite.Require().Equal("user1@example.com", results[0].email) + suite.Require().Equal(2, results[1].id) + suite.Require().Equal("user2@example.com", results[1].email) + suite.Require().Equal(3, results[2].id) + suite.Require().Equal("user3@example.com", results[2].email) +} + +func (suite *ApplierTestSuite) TestPanicOnWarningsWithDuplicateCompositeUniqueKey() { + ctx := context.Background() + + var err error + + // Create table with id, email, and username columns + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), username VARCHAR(100));", getTestTableName())) + suite.Require().NoError(err) + + // Create ghost table with same schema plus a composite unique index on (email, username) + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), username VARCHAR(100), UNIQUE KEY email_username_unique (email, username));", getTestGhostTableName())) + suite.Require().NoError(err) + + connectionConfig, err := getTestConnectionConfig(ctx, suite.mysqlContainer) + suite.Require().NoError(err) + + migrationContext := newTestMigrationContext() + migrationContext.ApplierConnectionConfig = connectionConfig + migrationContext.SetConnectionConfig("innodb") + + migrationContext.PanicOnWarnings = true + + migrationContext.OriginalTableColumns = sql.NewColumnList([]string{"id", "email", "username"}) + migrationContext.SharedColumns = sql.NewColumnList([]string{"id", "email", "username"}) + migrationContext.MappedSharedColumns = sql.NewColumnList([]string{"id", "email", "username"}) + migrationContext.UniqueKey = &sql.UniqueKey{ + Name: "PRIMARY", + NameInGhostTable: "PRIMARY", + Columns: *sql.NewColumnList([]string{"id"}), + } + + applier := NewApplier(migrationContext) + suite.Require().NoError(applier.prepareQueries()) + defer applier.Teardown() + + err = applier.InitDBConnections() + suite.Require().NoError(err) + + // Insert initial rows into ghost table (simulating bulk copy phase) + // alice@example.com + bob is ok due to composite unique index + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("INSERT INTO %s (id, email, username) VALUES (1, 'alice@example.com', 'alice'), (2, 'alice@example.com', 'bob'), (3, 'charlie@example.com', 'charlie');", getTestGhostTableName())) + suite.Require().NoError(err) + + // Simulate binlog event: try to insert a row with duplicate composite key (email + username) + // This should fail with a warning because the ghost table has a composite unique index + dmlEvents := []*binlog.BinlogDMLEvent{ + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{4, "alice@example.com", "alice"}), // duplicate (email, username) + }, + } + + // This should return an error when PanicOnWarnings is enabled + err = applier.ApplyDMLEventQueries(dmlEvents) + suite.Require().Error(err) + suite.Require().Contains(err.Error(), "Duplicate entry") + + // Verify that the ghost table still has only the original 3 rows with correct data (no data loss) + rows, err := suite.db.Query("SELECT id, email, username FROM " + getTestGhostTableName() + " ORDER BY id") + suite.Require().NoError(err) + defer rows.Close() + + var results []struct { + id int + email string + username string + } + for rows.Next() { + var id int + var email string + var username string + err = rows.Scan(&id, &email, &username) + suite.Require().NoError(err) + results = append(results, struct { + id int + email string + username string + }{id, email, username}) } suite.Require().NoError(rows.Err()) - // All 3 original rows should still be present - suite.Require().Equal(3, count) + // All 3 original rows should still be present with correct data + suite.Require().Len(results, 3) + suite.Require().Equal(1, results[0].id) + suite.Require().Equal("alice@example.com", results[0].email) + suite.Require().Equal("alice", results[0].username) + suite.Require().Equal(2, results[1].id) + suite.Require().Equal("alice@example.com", results[1].email) + suite.Require().Equal("bob", results[1].username) + suite.Require().Equal(3, results[2].id) + suite.Require().Equal("charlie@example.com", results[2].email) + suite.Require().Equal("charlie", results[2].username) } // TestUpdateModifyingUniqueKeyWithDuplicateOnOtherIndex tests the scenario where: @@ -980,6 +1086,397 @@ func (suite *ApplierTestSuite) TestNormalUpdateWithPanicOnWarnings() { suite.Require().NoError(rows.Err()) } +// TestDuplicateOnMigrationKeyAllowedInBinlogReplay tests the positive case where +// a duplicate on the migration unique key during binlog replay is expected and should be allowed +func (suite *ApplierTestSuite) TestDuplicateOnMigrationKeyAllowedInBinlogReplay() { + ctx := context.Background() + + var err error + + // Create table with id and email columns, where id is the primary key + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100));", getTestTableName())) + suite.Require().NoError(err) + + // Create ghost table with same schema plus a new unique index on email + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), UNIQUE KEY email_unique (email));", getTestGhostTableName())) + suite.Require().NoError(err) + + connectionConfig, err := getTestConnectionConfig(ctx, suite.mysqlContainer) + suite.Require().NoError(err) + + migrationContext := newTestMigrationContext() + migrationContext.ApplierConnectionConfig = connectionConfig + migrationContext.SetConnectionConfig("innodb") + + migrationContext.PanicOnWarnings = true + + migrationContext.OriginalTableColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.SharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.MappedSharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.UniqueKey = &sql.UniqueKey{ + Name: "PRIMARY", + NameInGhostTable: "PRIMARY", + Columns: *sql.NewColumnList([]string{"id"}), + } + + applier := NewApplier(migrationContext) + suite.Require().NoError(applier.prepareQueries()) + defer applier.Teardown() + + err = applier.InitDBConnections() + suite.Require().NoError(err) + + // Insert initial rows into ghost table (simulating bulk copy phase) + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("INSERT INTO %s (id, email) VALUES (1, 'alice@example.com'), (2, 'bob@example.com');", getTestGhostTableName())) + suite.Require().NoError(err) + + // Simulate binlog event: try to insert the same row again (duplicate on PRIMARY KEY - the migration key) + // This is expected during binlog replay when a row was already copied during bulk copy + dmlEvents := []*binlog.BinlogDMLEvent{ + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{1, "alice@example.com"}), // duplicate PRIMARY KEY + }, + } + + // This should succeed - duplicate on migration unique key is expected and should be filtered out + err = applier.ApplyDMLEventQueries(dmlEvents) + suite.Require().NoError(err) + + // Verify that the ghost table still has only the original 2 rows with correct data + rows, err := suite.db.Query("SELECT id, email FROM " + getTestGhostTableName() + " ORDER BY id") + suite.Require().NoError(err) + defer rows.Close() + + var results []struct { + id int + email string + } + for rows.Next() { + var id int + var email string + err = rows.Scan(&id, &email) + suite.Require().NoError(err) + results = append(results, struct { + id int + email string + }{id, email}) + } + suite.Require().NoError(rows.Err()) + + // Should still have exactly 2 rows with correct data + suite.Require().Len(results, 2) + suite.Require().Equal(1, results[0].id) + suite.Require().Equal("alice@example.com", results[0].email) + suite.Require().Equal(2, results[1].id) + suite.Require().Equal("bob@example.com", results[1].email) +} + +// TestRegexMetacharactersInIndexName tests that index names with regex metacharacters +// are properly escaped. We test with a plus sign in the index name, which without +// QuoteMeta would be treated as a regex quantifier (one or more of 'x' in this case). +// This test verifies the pattern matches ONLY the exact index name, not a regex pattern. +func (suite *ApplierTestSuite) TestRegexMetacharactersInIndexName() { + ctx := context.Background() + + var err error + + // Create tables with an index name containing a plus sign + // Without QuoteMeta, "idx+email" would be treated as a regex pattern where + is a quantifier + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), UNIQUE KEY `idx+email` (email));", getTestTableName())) + suite.Require().NoError(err) + + // MySQL allows + in index names when quoted + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), UNIQUE KEY `idx+email` (email));", getTestGhostTableName())) + suite.Require().NoError(err) + + connectionConfig, err := getTestConnectionConfig(ctx, suite.mysqlContainer) + suite.Require().NoError(err) + + migrationContext := newTestMigrationContext() + migrationContext.ApplierConnectionConfig = connectionConfig + migrationContext.SetConnectionConfig("innodb") + + migrationContext.PanicOnWarnings = true + + migrationContext.OriginalTableColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.SharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.MappedSharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.UniqueKey = &sql.UniqueKey{ + Name: "idx+email", + NameInGhostTable: "idx+email", + Columns: *sql.NewColumnList([]string{"email"}), + } + + applier := NewApplier(migrationContext) + suite.Require().NoError(applier.prepareQueries()) + defer applier.Teardown() + + err = applier.InitDBConnections() + suite.Require().NoError(err) + + // Insert initial rows + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("INSERT INTO %s (id, email) VALUES (1, 'alice@example.com'), (2, 'bob@example.com');", getTestGhostTableName())) + suite.Require().NoError(err) + + // Test: duplicate on idx+email (the migration key) should be allowed + // This verifies our regex correctly identifies "idx+email" as the migration key + // Without regexp.QuoteMeta, the + would be treated as a regex quantifier and might not match correctly + dmlEvents := []*binlog.BinlogDMLEvent{ + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{3, "alice@example.com"}), + }, + } + + err = applier.ApplyDMLEventQueries(dmlEvents) + suite.Require().NoError(err, "Duplicate on idx+email (migration key) should be allowed with PanicOnWarnings enabled") + + // Test: duplicate on PRIMARY (not the migration key) should fail + dmlEvents = []*binlog.BinlogDMLEvent{ + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{1, "charlie@example.com"}), + }, + } + + err = applier.ApplyDMLEventQueries(dmlEvents) + suite.Require().Error(err, "Duplicate on PRIMARY (not migration key) should fail with PanicOnWarnings enabled") + suite.Require().Contains(err.Error(), "Duplicate entry") + + // Verify final state - should still have only the original 2 rows + rows, err := suite.db.Query("SELECT id, email FROM " + getTestGhostTableName() + " ORDER BY id") + suite.Require().NoError(err) + defer rows.Close() + + var results []struct { + id int + email string + } + for rows.Next() { + var id int + var email string + err = rows.Scan(&id, &email) + suite.Require().NoError(err) + results = append(results, struct { + id int + email string + }{id, email}) + } + suite.Require().NoError(rows.Err()) + + suite.Require().Len(results, 2) + suite.Require().Equal(1, results[0].id) + suite.Require().Equal("alice@example.com", results[0].email) + suite.Require().Equal(2, results[1].id) + suite.Require().Equal("bob@example.com", results[1].email) +} + +// TestPanicOnWarningsDisabled tests that when PanicOnWarnings is false, +// warnings are not checked and duplicates are silently ignored +func (suite *ApplierTestSuite) TestPanicOnWarningsDisabled() { + ctx := context.Background() + + var err error + + // Create table with id and email columns + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100));", getTestTableName())) + suite.Require().NoError(err) + + // Create ghost table with unique index on email + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), UNIQUE KEY email_unique (email));", getTestGhostTableName())) + suite.Require().NoError(err) + + connectionConfig, err := getTestConnectionConfig(ctx, suite.mysqlContainer) + suite.Require().NoError(err) + + migrationContext := newTestMigrationContext() + migrationContext.ApplierConnectionConfig = connectionConfig + migrationContext.SetConnectionConfig("innodb") + + // PanicOnWarnings is false (default) + migrationContext.PanicOnWarnings = false + + migrationContext.OriginalTableColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.SharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.MappedSharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.UniqueKey = &sql.UniqueKey{ + Name: "PRIMARY", + NameInGhostTable: "PRIMARY", + Columns: *sql.NewColumnList([]string{"id"}), + } + + applier := NewApplier(migrationContext) + suite.Require().NoError(applier.prepareQueries()) + defer applier.Teardown() + + err = applier.InitDBConnections() + suite.Require().NoError(err) + + // Insert initial rows into ghost table + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("INSERT INTO %s (id, email) VALUES (1, 'alice@example.com'), (2, 'bob@example.com');", getTestGhostTableName())) + suite.Require().NoError(err) + + // Simulate binlog event: insert duplicate email on non-migration index + // With PanicOnWarnings disabled, this should succeed (INSERT IGNORE skips it) + dmlEvents := []*binlog.BinlogDMLEvent{ + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{3, "alice@example.com"}), // duplicate email + }, + } + + // Should succeed because PanicOnWarnings is disabled + err = applier.ApplyDMLEventQueries(dmlEvents) + suite.Require().NoError(err) + + // Verify that only 2 original rows exist with correct data (the duplicate was silently ignored) + rows, err := suite.db.Query("SELECT id, email FROM " + getTestGhostTableName() + " ORDER BY id") + suite.Require().NoError(err) + defer rows.Close() + + var results []struct { + id int + email string + } + for rows.Next() { + var id int + var email string + err = rows.Scan(&id, &email) + suite.Require().NoError(err) + results = append(results, struct { + id int + email string + }{id, email}) + } + suite.Require().NoError(rows.Err()) + + // Should still have exactly 2 original rows (id=3 was silently ignored) + suite.Require().Len(results, 2) + suite.Require().Equal(1, results[0].id) + suite.Require().Equal("alice@example.com", results[0].email) + suite.Require().Equal(2, results[1].id) + suite.Require().Equal("bob@example.com", results[1].email) +} + +// TestMultipleDMLEventsInBatch tests that multiple DML events are processed in a single transaction +// and that if one fails due to a warning, the entire batch is rolled back - including events that +// come AFTER the failure. This proves true transaction atomicity. +func (suite *ApplierTestSuite) TestMultipleDMLEventsInBatch() { + ctx := context.Background() + + var err error + + // Create table with id and email columns + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100));", getTestTableName())) + suite.Require().NoError(err) + + // Create ghost table with unique index on email + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, email VARCHAR(100), UNIQUE KEY email_unique (email));", getTestGhostTableName())) + suite.Require().NoError(err) + + connectionConfig, err := getTestConnectionConfig(ctx, suite.mysqlContainer) + suite.Require().NoError(err) + + migrationContext := newTestMigrationContext() + migrationContext.ApplierConnectionConfig = connectionConfig + migrationContext.SetConnectionConfig("innodb") + + migrationContext.PanicOnWarnings = true + + migrationContext.OriginalTableColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.SharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.MappedSharedColumns = sql.NewColumnList([]string{"id", "email"}) + migrationContext.UniqueKey = &sql.UniqueKey{ + Name: "PRIMARY", + NameInGhostTable: "PRIMARY", + Columns: *sql.NewColumnList([]string{"id"}), + } + + applier := NewApplier(migrationContext) + suite.Require().NoError(applier.prepareQueries()) + defer applier.Teardown() + + err = applier.InitDBConnections() + suite.Require().NoError(err) + + // Insert initial rows into ghost table + _, err = suite.db.ExecContext(ctx, fmt.Sprintf("INSERT INTO %s (id, email) VALUES (1, 'alice@example.com'), (3, 'charlie@example.com');", getTestGhostTableName())) + suite.Require().NoError(err) + + // Simulate multiple binlog events in a batch: + // 1. Duplicate on PRIMARY KEY (allowed - expected during binlog replay) + // 2. Duplicate on email index (should fail) ← FAILURE IN MIDDLE + // 3. Valid insert (would succeed) ← SUCCESS AFTER FAILURE + // + // The critical test: Even though event #3 would succeed on its own, it must be rolled back + // because event #2 failed. This proves the entire batch is truly atomic. + dmlEvents := []*binlog.BinlogDMLEvent{ + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{1, "alice@example.com"}), // duplicate PRIMARY (normally allowed) + }, + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{4, "alice@example.com"}), // duplicate email (FAILS) + }, + { + DatabaseName: testMysqlDatabase, + TableName: testMysqlTableName, + DML: binlog.InsertDML, + NewColumnValues: sql.ToColumnValues([]interface{}{2, "bob@example.com"}), // valid insert (would succeed) + }, + } + + // Should fail due to the second event + err = applier.ApplyDMLEventQueries(dmlEvents) + suite.Require().Error(err) + suite.Require().Contains(err.Error(), "Duplicate entry") + + // Verify that the entire batch was rolled back - still only the original 2 rows + // Critically: id=2 (bob@example.com) from event #3 should NOT be present + rows, err := suite.db.Query("SELECT id, email FROM " + getTestGhostTableName() + " ORDER BY id") + suite.Require().NoError(err) + defer rows.Close() + + var results []struct { + id int + email string + } + for rows.Next() { + var id int + var email string + err = rows.Scan(&id, &email) + suite.Require().NoError(err) + results = append(results, struct { + id int + email string + }{id, email}) + } + suite.Require().NoError(rows.Err()) + + // Should still have exactly 2 original rows (entire batch was rolled back) + // This proves that even event #3 (which would have succeeded) was rolled back + suite.Require().Len(results, 2) + suite.Require().Equal(1, results[0].id) + suite.Require().Equal("alice@example.com", results[0].email) + suite.Require().Equal(3, results[1].id) + suite.Require().Equal("charlie@example.com", results[1].email) + // Critically: id=2 (bob@example.com) is NOT present, proving event #3 was rolled back +} + func TestApplier(t *testing.T) { suite.Run(t, new(ApplierTestSuite)) } diff --git a/go/logic/migrator_test.go b/go/logic/migrator_test.go index c4fd49233..fcffa777a 100644 --- a/go/logic/migrator_test.go +++ b/go/logic/migrator_test.go @@ -32,7 +32,6 @@ import ( "github.com/github/gh-ost/go/mysql" "github.com/github/gh-ost/go/sql" "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" ) func TestMigratorOnChangelogEvent(t *testing.T) { @@ -302,7 +301,6 @@ func (suite *MigratorTestSuite) SetupSuite() { testmysql.WithDatabase(testMysqlDatabase), testmysql.WithUsername(testMysqlUser), testmysql.WithPassword(testMysqlPass), - testcontainers.WithWaitStrategy(wait.ForExposedPort()), testmysql.WithConfigFile("my.cnf.test"), ) suite.Require().NoError(err) diff --git a/go/logic/streamer_test.go b/go/logic/streamer_test.go index 2c5d3886b..8e0b57f80 100644 --- a/go/logic/streamer_test.go +++ b/go/logic/streamer_test.go @@ -13,7 +13,6 @@ import ( "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/modules/mysql" - "github.com/testcontainers/testcontainers-go/wait" "golang.org/x/sync/errgroup" ) @@ -31,7 +30,6 @@ func (suite *EventsStreamerTestSuite) SetupSuite() { mysql.WithDatabase(testMysqlDatabase), mysql.WithUsername(testMysqlUser), mysql.WithPassword(testMysqlPass), - testcontainers.WithWaitStrategy(wait.ForExposedPort()), ) suite.Require().NoError(err) diff --git a/localtests/panic-on-warnings-batch-middle/create.sql b/localtests/panic-on-warnings-batch-middle/create.sql new file mode 100644 index 000000000..e4883ca66 --- /dev/null +++ b/localtests/panic-on-warnings-batch-middle/create.sql @@ -0,0 +1,11 @@ +drop table if exists gh_ost_test; +create table gh_ost_test ( + id int auto_increment, + email varchar(255) not null, + primary key (id) +) auto_increment=1; + +-- Insert initial data - all unique emails +insert into gh_ost_test (email) values ('alice@example.com'); +insert into gh_ost_test (email) values ('bob@example.com'); +insert into gh_ost_test (email) values ('charlie@example.com'); diff --git a/localtests/panic-on-warnings-batch-middle/expect_failure b/localtests/panic-on-warnings-batch-middle/expect_failure new file mode 100644 index 000000000..11800efb0 --- /dev/null +++ b/localtests/panic-on-warnings-batch-middle/expect_failure @@ -0,0 +1 @@ +ERROR warnings detected in statement 1 of 2 diff --git a/localtests/panic-on-warnings-batch-middle/extra_args b/localtests/panic-on-warnings-batch-middle/extra_args new file mode 100644 index 000000000..55cdcd0e8 --- /dev/null +++ b/localtests/panic-on-warnings-batch-middle/extra_args @@ -0,0 +1 @@ +--default-retries=1 --panic-on-warnings --alter "add unique index email_idx(email)" --postpone-cut-over-flag-file=/tmp/gh-ost-test.postpone-cutover diff --git a/localtests/panic-on-warnings-batch-middle/test.sh b/localtests/panic-on-warnings-batch-middle/test.sh new file mode 100755 index 000000000..cbe3829c7 --- /dev/null +++ b/localtests/panic-on-warnings-batch-middle/test.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Custom test: inject batched DML events AFTER row copy completes +# Tests that warnings in the middle of a DML batch are detected + +# Create postpone flag file (referenced in extra_args) +postpone_flag_file=/tmp/gh-ost-test.postpone-cutover +touch $postpone_flag_file + +# Set table names (required by build_ghost_command) +table_name="gh_ost_test" +ghost_table_name="_gh_ost_test_gho" + +# Build gh-ost command using framework function +build_ghost_command + +# Run in background +echo_dot +echo > $test_logfile +bash -c "$cmd" >>$test_logfile 2>&1 & +ghost_pid=$! + +# Wait for row copy to complete +echo_dot +for i in {1..30}; do + grep -q "Row copy complete" $test_logfile && break + ps -p $ghost_pid > /dev/null || { echo; echo "ERROR gh-ost exited early"; rm -f $postpone_flag_file; return 1; } + sleep 1; echo_dot +done + +# Inject batched DML events that will create warnings +# These must be in a single transaction to be batched during binlog replay +echo_dot +gh-ost-test-mysql-master test << 'EOF' +BEGIN; +-- INSERT with duplicate PRIMARY KEY - warning on migration key (filtered by gh-ost) +INSERT IGNORE INTO gh_ost_test (id, email) VALUES (1, 'duplicate_pk@example.com'); +-- INSERT with duplicate email - warning on unique index (should trigger failure) +INSERT IGNORE INTO gh_ost_test (email) VALUES ('alice@example.com'); +-- INSERT with unique data - would succeed if not for previous warning +INSERT IGNORE INTO gh_ost_test (email) VALUES ('new@example.com'); +COMMIT; +EOF + +# Wait for binlog events to replicate and be applied +sleep 10; echo_dot + +# Complete cutover by removing postpone flag +rm -f $postpone_flag_file + +# Wait for gh-ost to complete +wait $ghost_pid +execution_result=$? +rm -f $postpone_flag_file + +# Validate using framework function +validate_expected_failure +return $? diff --git a/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/create.sql b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/create.sql new file mode 100644 index 000000000..444092cb2 --- /dev/null +++ b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/create.sql @@ -0,0 +1,10 @@ +drop table if exists gh_ost_test; +create table gh_ost_test ( + id int auto_increment, + email varchar(100) not null, + primary key (id) +) auto_increment=1; + +insert into gh_ost_test (email) values ('alice@example.com'); +insert into gh_ost_test (email) values ('bob@example.com'); +insert into gh_ost_test (email) values ('charlie@example.com'); diff --git a/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/expect_failure b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/expect_failure new file mode 100644 index 000000000..fb8dc562a --- /dev/null +++ b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/expect_failure @@ -0,0 +1 @@ +ERROR warnings detected in statement 1 of 1 diff --git a/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/extra_args b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/extra_args new file mode 100644 index 000000000..04c41a471 --- /dev/null +++ b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/extra_args @@ -0,0 +1 @@ +--panic-on-warnings --alter "ADD UNIQUE KEY email_unique (email)" --postpone-cut-over-flag-file=/tmp/gh-ost-test.postpone-cutover diff --git a/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/test.sh b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/test.sh new file mode 100755 index 000000000..cd34937a0 --- /dev/null +++ b/localtests/panic-on-warnings-update-pk-with-duplicate-on-new-unique-index/test.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Custom test: inject conflicting data AFTER row copy completes +# This tests the DML event application code path, not row copy + +# Create postpone flag file (referenced in extra_args) +postpone_flag_file=/tmp/gh-ost-test.postpone-cutover +touch $postpone_flag_file + +# Build gh-ost command using framework function +build_ghost_command + +# Run in background +echo_dot +# Clear log file before starting gh-ost +echo > $test_logfile +bash -c "$cmd" >>$test_logfile 2>&1 & +ghost_pid=$! + +# Wait for row copy to complete +echo_dot +for i in {1..30}; do + grep -q "Row copy complete" $test_logfile && break + ps -p $ghost_pid > /dev/null || { echo; echo "ERROR gh-ost exited early"; rm -f $postpone_flag_file; return 1; } + sleep 1; echo_dot +done + +# Inject conflicting SQL after row copy (UPDATE with PK change creates DELETE+INSERT in binlog) +echo_dot +gh-ost-test-mysql-master test -e "update gh_ost_test set id = 200, email = 'alice@example.com' where id = 2" + +# Wait for binlog event to replicate and be applied +sleep 10; echo_dot + +# Complete cutover by removing postpone flag +rm -f $postpone_flag_file + +# Wait for gh-ost to complete +wait $ghost_pid +execution_result=$? +rm -f $postpone_flag_file + +# Validate using framework function +validate_expected_failure +return $? diff --git a/localtests/test.sh b/localtests/test.sh index 404eeece3..68dd4150c 100755 --- a/localtests/test.sh +++ b/localtests/test.sh @@ -107,12 +107,6 @@ verify_master_and_replica() { fi } -exec_cmd() { - echo "$@" - command "$@" 1>$test_logfile 2>&1 - return $? -} - echo_dot() { echo -n "." } @@ -141,6 +135,77 @@ start_replication() { done } +build_ghost_command() { + # Build gh-ost command with all standard options + # Expects: ghost_binary, replica_host, replica_port, master_host, master_port, + # table_name, storage_engine, throttle_flag_file, extra_args + cmd="GOTRACEBACK=crash $ghost_binary \ + --user=gh-ost \ + --password=gh-ost \ + --host=$replica_host \ + --port=$replica_port \ + --assume-master-host=${master_host}:${master_port} \ + --database=test \ + --table=${table_name} \ + --storage-engine=${storage_engine} \ + --alter='engine=${storage_engine}' \ + --exact-rowcount \ + --assume-rbr \ + --skip-metadata-lock-check \ + --initially-drop-old-table \ + --initially-drop-ghost-table \ + --throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _${table_name}_ghc' \ + --throttle-flag-file=$throttle_flag_file \ + --serve-socket-file=/tmp/gh-ost.test.sock \ + --initially-drop-socket-file \ + --test-on-replica \ + --default-retries=3 \ + --chunk-size=10 \ + --verbose \ + --debug \ + --stack \ + --checkpoint \ + --execute ${extra_args[@]}" +} + +validate_expected_failure() { + # Check if test expected to fail and validate error message + # Expects: tests_path, test_name, execution_result, test_logfile + if [ -f $tests_path/$test_name/expect_failure ]; then + if [ $execution_result -eq 0 ]; then + echo + echo "ERROR $test_name execution was expected to exit on error but did not." + echo "=== Last 50 lines of $test_logfile ===" + tail -n 50 $test_logfile + echo "=== End log excerpt ===" + return 1 + fi + if [ -s $tests_path/$test_name/expect_failure ]; then + # 'expect_failure' file has content. We expect to find this content in the log. + expected_error_message="$(cat $tests_path/$test_name/expect_failure)" + if grep -q "$expected_error_message" $test_logfile; then + return 0 + fi + echo + echo "ERROR $test_name execution was expected to exit with error message '${expected_error_message}' but did not." + echo "=== Last 50 lines of $test_logfile ===" + tail -n 50 $test_logfile + echo "=== End log excerpt ===" + return 1 + fi + # 'expect_failure' file has no content. We generally agree that the failure is correct + return 0 + fi + + if [ $execution_result -ne 0 ]; then + echo + echo "ERROR $test_name execution failure. cat $test_logfile:" + cat $test_logfile + return 1 + fi + return 0 +} + sysbench_prepare() { local mysql_host="$1" local mysql_port="$2" @@ -225,7 +290,7 @@ test_single() { cat $tests_path/$test_name/create.sql return 1 fi - + if [ -f $tests_path/$test_name/before.sql ]; then gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/before.sql gh-ost-test-mysql-replica --default-character-set=utf8mb4 test < $tests_path/$test_name/before.sql @@ -259,6 +324,15 @@ test_single() { table_name="gh_ost_test" ghost_table_name="_gh_ost_test_gho" + + # Check for custom test script + if [ -f $tests_path/$test_name/test.sh ]; then + # Source the custom test script which can override default behavior + # It has access to all variables and functions from this script + source $tests_path/$test_name/test.sh + return $? + fi + # test with sysbench oltp write load if [[ "$test_name" == "sysbench" ]]; then if ! command -v sysbench &>/dev/null; then @@ -279,38 +353,12 @@ test_single() { fi trap cleanup SIGINT - # - cmd="GOTRACEBACK=crash $ghost_binary \ - --user=gh-ost \ - --password=gh-ost \ - --host=$replica_host \ - --port=$replica_port \ - --assume-master-host=${master_host}:${master_port} - --database=test \ - --table=${table_name} \ - --storage-engine=${storage_engine} \ - --alter='engine=${storage_engine}' \ - --exact-rowcount \ - --assume-rbr \ - --skip-metadata-lock-check \ - --initially-drop-old-table \ - --initially-drop-ghost-table \ - --throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _${table_name}_ghc' \ - --throttle-flag-file=$throttle_flag_file \ - --serve-socket-file=/tmp/gh-ost.test.sock \ - --initially-drop-socket-file \ - --test-on-replica \ - --default-retries=3 \ - --chunk-size=10 \ - --verbose \ - --debug \ - --stack \ - --checkpoint \ - --execute ${extra_args[@]}" + # Build and execute gh-ost command + build_ghost_command echo_dot echo $cmd >$exec_command_file echo_dot - bash $exec_command_file 1>$test_logfile 2>&1 + bash $exec_command_file >$test_logfile 2>&1 execution_result=$? cleanup @@ -329,32 +377,9 @@ test_single() { gh-ost-test-mysql-master --default-character-set=utf8mb4 test <$tests_path/$test_name/destroy.sql fi - if [ -f $tests_path/$test_name/expect_failure ]; then - if [ $execution_result -eq 0 ]; then - echo - echo "ERROR $test_name execution was expected to exit on error but did not. cat $test_logfile" - return 1 - fi - if [ -s $tests_path/$test_name/expect_failure ]; then - # 'expect_failure' file has content. We expect to find this content in the log. - expected_error_message="$(cat $tests_path/$test_name/expect_failure)" - if grep -q "$expected_error_message" $test_logfile; then - return 0 - fi - echo - echo "ERROR $test_name execution was expected to exit with error message '${expected_error_message}' but did not. cat $test_logfile" - return 1 - fi - # 'expect_failure' file has no content. We generally agree that the failure is correct - return 0 - fi - - if [ $execution_result -ne 0 ]; then - echo - echo "ERROR $test_name execution failure. cat $test_logfile:" - cat $test_logfile - return 1 - fi + # Validate expected failure or success + validate_expected_failure + return $? gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "show create table ${ghost_table_name}\G" -ss >$ghost_structure_output_file