diff --git a/admin_test.go b/admin_test.go index 40e2592..c61c693 100644 --- a/admin_test.go +++ b/admin_test.go @@ -477,3 +477,353 @@ func TestCreateIndexWithBackgroundOption(t *testing.T) { require.Contains(t, row, "name") }) } + +func TestCreateIndexes(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_create_idxs_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice", "email": "alice@example.com", "age": 30}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.createIndexes([{ key: { name: 1 }, name: "name_idx" }, { key: { email: 1 }, name: "email_idx" }])`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 2, len(result.Value)) + + // Values should be index name strings + name0, ok := result.Value[0].(string) + require.True(t, ok) + require.Equal(t, "name_idx", name0) + name1, ok := result.Value[1].(string) + require.True(t, ok) + require.Equal(t, "email_idx", name1) + + // Verify indexes exist + idxResult, err := gc.Execute(ctx, dbName, `db.users.getIndexes()`) + require.NoError(t, err) + require.Equal(t, 3, len(idxResult.Value)) // _id + name_idx + email_idx + }) +} + +func TestDbStats(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_db_stats_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + // Create some data + _, err := db.Client.Database(dbName).Collection("test").InsertOne(ctx, bson.M{"x": 1}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.stats()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"db"`) + require.Contains(t, row, `"collections"`) + }) +} + +func TestCollectionStats(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_coll_stats_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.stats()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"ns"`) + require.Contains(t, row, `"count"`) + }) +} + +func TestServerStatus(t *testing.T) { + // serverStatus is not supported on DocumentDB + testutil.RunOnMongoDBOnly(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_server_status_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.serverStatus()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"ok"`) + }) +} + +func TestServerBuildInfo(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_build_info_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.serverBuildInfo()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"version"`) + }) +} + +func TestDbVersion(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_db_version_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.version()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + version, ok := result.Value[0].(string) + require.True(t, ok) + require.NotEmpty(t, version) + // Version should look like a semver (e.g., "4.4.0", "8.0.0") + require.True(t, strings.Contains(version, "."), "version should contain dots: %s", version) + }) +} + +func TestHostInfo(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_host_info_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.hostInfo()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"ok"`) + }) +} + +func TestListCommands(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_list_cmds_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.listCommands()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"ok"`) + require.Contains(t, row, `"commands"`) + }) +} + +func TestDataSize(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_data_size_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.dataSize()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + // dataSize returns a numeric value (int32 or int64) + require.NotNil(t, result.Value[0]) + }) +} + +func TestStorageSize(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_storage_size_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.storageSize()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + require.NotNil(t, result.Value[0]) + }) +} + +func TestTotalIndexSize(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_total_idx_size_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.totalIndexSize()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + require.NotNil(t, result.Value[0]) + }) +} + +func TestTotalSize(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_total_size_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.totalSize()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + // totalSize is int64 (storageSize + totalIndexSize) + _, ok := result.Value[0].(int64) + require.True(t, ok, "expected int64, got %T", result.Value[0]) + }) +} + +func TestIsCapped(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_is_capped_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + // Regular collection should not be capped + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.isCapped()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + capped, ok := result.Value[0].(bool) + require.True(t, ok) + require.False(t, capped) + }) +} + +func TestIsCappedTrue(t *testing.T) { + testutil.RunOnMongoDBOnly(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_is_capped_true_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + gc := gomongo.NewClient(db.Client) + + // Create a capped collection + _, err := gc.Execute(ctx, dbName, `db.createCollection("capped_coll", { capped: true, size: 1048576 })`) + require.NoError(t, err) + + result, err := gc.Execute(ctx, dbName, `db.capped_coll.isCapped()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + capped, ok := result.Value[0].(bool) + require.True(t, ok) + require.True(t, capped) + }) +} + +func TestValidate(t *testing.T) { + testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_validate_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.validate()`) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 1, len(result.Value)) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"ok"`) + require.Contains(t, row, `"ns"`) + }) +} + +func TestLatencyStats(t *testing.T) { + // latencyStats uses $collStats aggregation which may not be available on all platforms + testutil.RunOnMongoDBOnly(t, func(t *testing.T, db testutil.TestDB) { + dbName := fmt.Sprintf("testdb_latency_%s", db.Name) + defer testutil.CleanupDatabase(t, db.Client, dbName) + + ctx := context.Background() + + _, err := db.Client.Database(dbName).Collection("users").InsertOne(ctx, bson.M{"name": "alice"}) + require.NoError(t, err) + + gc := gomongo.NewClient(db.Client) + + result, err := gc.Execute(ctx, dbName, `db.users.latencyStats()`) + require.NoError(t, err) + require.NotNil(t, result) + require.GreaterOrEqual(t, len(result.Value), 1) + + row := valueToJSON(result.Value[0]) + require.Contains(t, row, `"latencyStats"`) + }) +} diff --git a/client.go b/client.go index 53b8f88..0476779 100644 --- a/client.go +++ b/client.go @@ -28,8 +28,15 @@ func NewClient(client *mongo.Client) *Client { // - OpShowDatabases, OpShowCollections, OpGetCollectionNames: each element is string // - OpInsertOne, OpInsertMany, OpUpdateOne, OpUpdateMany, OpReplaceOne, OpDeleteOne, OpDeleteMany: single bson.D with operation result // - OpCreateIndex: single element of string (index name) +// - OpCreateIndexes: each element is string (index name) // - OpDropIndex, OpDropIndexes, OpCreateCollection, OpDropDatabase, OpRenameCollection: single bson.D with {ok: 1} // - OpDrop: single element of bool (true) +// - OpDbStats, OpCollectionStats, OpServerStatus, OpServerBuildInfo, OpHostInfo, OpListCommands, OpValidate: single bson.D (command result) +// - OpDbVersion: single element of string (version) +// - OpDataSize, OpStorageSize, OpTotalIndexSize: single numeric value from collStats +// - OpTotalSize: single int64 (storageSize + totalIndexSize) +// - OpIsCapped: single element of bool +// - OpLatencyStats: each element is bson.D (aggregation result) type Result struct { Operation types.OperationType Value []any diff --git a/error_test.go b/error_test.go index 7ee64bc..2aa8834 100644 --- a/error_test.go +++ b/error_test.go @@ -26,25 +26,6 @@ func TestParseError(t *testing.T) { }) } -func TestPlannedOperation(t *testing.T) { - testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { - dbName := fmt.Sprintf("testdb_planned_op_%s", db.Name) - defer testutil.CleanupDatabase(t, db.Client, dbName) - - gc := gomongo.NewClient(db.Client) - ctx := context.Background() - - // createIndexes is a planned M3 operation - should return PlannedOperationError - // (createIndex is now implemented, so we use createIndexes instead) - _, err := gc.Execute(ctx, dbName, "db.users.createIndexes([{ key: { name: 1 } }])") - require.Error(t, err) - - var plannedErr *gomongo.PlannedOperationError - require.ErrorAs(t, err, &plannedErr) - require.Equal(t, "createIndexes()", plannedErr.Operation) - }) -} - func TestUnsupportedOperation(t *testing.T) { testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { dbName := fmt.Sprintf("testdb_unsup_op_%s", db.Name) @@ -81,20 +62,6 @@ func TestUnsupportedOptionError(t *testing.T) { }) } -func TestMethodRegistryStats(t *testing.T) { - total := gomongo.MethodRegistryStats() - - // Registry should contain 15 planned methods after M3 high-ROI implementations - // M3 high-ROI methods implemented (removed from registry): - // - createIndex, dropIndex, dropIndexes (index management: 3) - // - drop, createCollection, dropDatabase, renameCollection (collection management: 4) - // M3 remaining planned methods: 15 (originally 22) - require.Equal(t, 15, total, "expected 15 planned methods in registry (M3 remaining)") - - // Log stats for visibility - t.Logf("Method Registry Stats: total=%d planned methods", total) -} - func TestFindOneUnsupportedOption(t *testing.T) { testutil.RunOnAllDBs(t, func(t *testing.T, db testutil.TestDB) { dbName := fmt.Sprintf("testdb_findone_unsup_opt_%s", db.Name) diff --git a/errors.go b/errors.go index 37f7f7a..29d4117 100644 --- a/errors.go +++ b/errors.go @@ -1,10 +1,6 @@ package gomongo -import ( - "fmt" - - "github.com/bytebase/gomongo/internal/translator" -) +import "fmt" // ParseError represents a syntax error during parsing. type ParseError struct { @@ -51,13 +47,3 @@ type UnsupportedOptionError struct { func (e *UnsupportedOptionError) Error() string { return fmt.Sprintf("unsupported option '%s' in %s", e.Option, e.Method) } - -// MethodRegistryStats returns statistics about the method registry. -func MethodRegistryStats() int { - return translator.MethodRegistryStats() -} - -// IsPlannedMethod checks if a method is planned for future implementation. -func IsPlannedMethod(context, methodName string) bool { - return translator.IsPlannedMethod(context, methodName) -} diff --git a/internal/executor/admin.go b/internal/executor/admin.go index 5d35853..d7661e3 100644 --- a/internal/executor/admin.go +++ b/internal/executor/admin.go @@ -277,3 +277,270 @@ func executeRenameCollection(ctx context.Context, client *mongo.Client, database Value: []any{response}, }, nil } + +// executeCreateIndexes executes a db.collection.createIndexes() command. +func executeCreateIndexes(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + collection := client.Database(database).Collection(op.Collection) + + var models []mongo.IndexModel + for _, spec := range op.IndexSpecs { + model := mongo.IndexModel{} + opts := options.Index() + hasOptions := false + + for _, field := range spec { + switch field.Key { + case "key": + keys, ok := field.Value.(bson.D) + if !ok { + return nil, fmt.Errorf("createIndexes failed: 'key' must be a document") + } + model.Keys = keys + case "name": + name, ok := field.Value.(string) + if !ok { + return nil, fmt.Errorf("createIndexes failed: 'name' must be a string") + } + opts.SetName(name) + hasOptions = true + case "unique": + unique, ok := field.Value.(bool) + if !ok { + return nil, fmt.Errorf("createIndexes failed: 'unique' must be a bool") + } + if unique { + opts.SetUnique(true) + hasOptions = true + } + case "sparse": + sparse, ok := field.Value.(bool) + if !ok { + return nil, fmt.Errorf("createIndexes failed: 'sparse' must be a bool") + } + if sparse { + opts.SetSparse(true) + hasOptions = true + } + case "expireAfterSeconds": + val, ok := translator.ToInt32(field.Value) + if !ok { + return nil, fmt.Errorf("createIndexes failed: 'expireAfterSeconds' must be a number") + } + opts.SetExpireAfterSeconds(val) + hasOptions = true + } + } + + if hasOptions { + model.Options = opts + } + models = append(models, model) + } + + names, err := collection.Indexes().CreateMany(ctx, models) + if err != nil { + return nil, fmt.Errorf("createIndexes failed: %w", err) + } + + values := make([]any, len(names)) + for i, name := range names { + values[i] = name + } + + return &Result{ + Operation: types.OpCreateIndexes, + Value: values, + }, nil +} + +// runCommand executes a RunCommand and returns the result as bson.D. +func runCommand(ctx context.Context, db *mongo.Database, command bson.D) (bson.D, error) { + var result bson.D + if err := db.RunCommand(ctx, command).Decode(&result); err != nil { + return nil, err + } + return result, nil +} + +// runCollStats executes the collStats command and returns the full result. +func runCollStats(ctx context.Context, client *mongo.Client, database, collection string) (bson.D, error) { + return runCommand(ctx, client.Database(database), bson.D{{Key: "collStats", Value: collection}}) +} + +// findField finds a field value in a bson.D by key. +func findField(doc bson.D, key string) any { + for _, elem := range doc { + if elem.Key == key { + return elem.Value + } + } + return nil +} + +// executeDbStats executes a db.stats() command. +func executeDbStats(ctx context.Context, client *mongo.Client, database string) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "dbStats", Value: int32(1)}}) + if err != nil { + return nil, fmt.Errorf("dbStats failed: %w", err) + } + return &Result{Operation: types.OpDbStats, Value: []any{result}}, nil +} + +// executeCollectionStats executes a db.collection.stats() command. +func executeCollectionStats(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + result, err := runCollStats(ctx, client, database, op.Collection) + if err != nil { + return nil, fmt.Errorf("collStats failed: %w", err) + } + return &Result{Operation: types.OpCollectionStats, Value: []any{result}}, nil +} + +// executeServerStatus executes a db.serverStatus() command. +func executeServerStatus(ctx context.Context, client *mongo.Client, database string) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "serverStatus", Value: int32(1)}}) + if err != nil { + return nil, fmt.Errorf("serverStatus failed: %w", err) + } + return &Result{Operation: types.OpServerStatus, Value: []any{result}}, nil +} + +// executeServerBuildInfo executes a db.serverBuildInfo() command. +func executeServerBuildInfo(ctx context.Context, client *mongo.Client, database string) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "buildInfo", Value: int32(1)}}) + if err != nil { + return nil, fmt.Errorf("serverBuildInfo failed: %w", err) + } + return &Result{Operation: types.OpServerBuildInfo, Value: []any{result}}, nil +} + +// executeDbVersion executes a db.version() command. +func executeDbVersion(ctx context.Context, client *mongo.Client, database string) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "buildInfo", Value: int32(1)}}) + if err != nil { + return nil, fmt.Errorf("version failed: %w", err) + } + version, ok := findField(result, "version").(string) + if !ok { + return nil, fmt.Errorf("version failed: version field missing or not a string in buildInfo result") + } + return &Result{Operation: types.OpDbVersion, Value: []any{version}}, nil +} + +// executeHostInfo executes a db.hostInfo() command. +func executeHostInfo(ctx context.Context, client *mongo.Client, database string) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "hostInfo", Value: int32(1)}}) + if err != nil { + return nil, fmt.Errorf("hostInfo failed: %w", err) + } + return &Result{Operation: types.OpHostInfo, Value: []any{result}}, nil +} + +// executeListCommands executes a db.listCommands() command. +func executeListCommands(ctx context.Context, client *mongo.Client, database string) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "listCommands", Value: int32(1)}}) + if err != nil { + return nil, fmt.Errorf("listCommands failed: %w", err) + } + return &Result{Operation: types.OpListCommands, Value: []any{result}}, nil +} + +// executeDataSize executes a db.collection.dataSize() command. +func executeDataSize(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + stats, err := runCollStats(ctx, client, database, op.Collection) + if err != nil { + return nil, fmt.Errorf("dataSize failed: %w", err) + } + size := findField(stats, "size") + if size == nil { + size = int32(0) + } + return &Result{Operation: types.OpDataSize, Value: []any{size}}, nil +} + +// executeStorageSize executes a db.collection.storageSize() command. +func executeStorageSize(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + stats, err := runCollStats(ctx, client, database, op.Collection) + if err != nil { + return nil, fmt.Errorf("storageSize failed: %w", err) + } + storageSize := findField(stats, "storageSize") + if storageSize == nil { + storageSize = int32(0) + } + return &Result{Operation: types.OpStorageSize, Value: []any{storageSize}}, nil +} + +// executeTotalIndexSize executes a db.collection.totalIndexSize() command. +func executeTotalIndexSize(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + stats, err := runCollStats(ctx, client, database, op.Collection) + if err != nil { + return nil, fmt.Errorf("totalIndexSize failed: %w", err) + } + totalIndexSize := findField(stats, "totalIndexSize") + if totalIndexSize == nil { + totalIndexSize = int32(0) + } + return &Result{Operation: types.OpTotalIndexSize, Value: []any{totalIndexSize}}, nil +} + +// executeTotalSize executes a db.collection.totalSize() command. +func executeTotalSize(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + stats, err := runCollStats(ctx, client, database, op.Collection) + if err != nil { + return nil, fmt.Errorf("totalSize failed: %w", err) + } + storage, _ := translator.ToInt64(findField(stats, "storageSize")) + indexSize, _ := translator.ToInt64(findField(stats, "totalIndexSize")) + return &Result{Operation: types.OpTotalSize, Value: []any{storage + indexSize}}, nil +} + +// executeIsCapped executes a db.collection.isCapped() command. +func executeIsCapped(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + stats, err := runCollStats(ctx, client, database, op.Collection) + if err != nil { + return nil, fmt.Errorf("isCapped failed: %w", err) + } + capped, _ := findField(stats, "capped").(bool) + return &Result{Operation: types.OpIsCapped, Value: []any{capped}}, nil +} + +// executeValidate executes a db.collection.validate() command. +func executeValidate(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + result, err := runCommand(ctx, client.Database(database), bson.D{{Key: "validate", Value: op.Collection}}) + if err != nil { + return nil, fmt.Errorf("validate failed: %w", err) + } + return &Result{Operation: types.OpValidate, Value: []any{result}}, nil +} + +// executeLatencyStats executes a db.collection.latencyStats() command via $collStats aggregation. +func executeLatencyStats(ctx context.Context, client *mongo.Client, database string, op *translator.Operation) (*Result, error) { + collection := client.Database(database).Collection(op.Collection) + + pipeline := bson.A{ + bson.D{{Key: "$collStats", Value: bson.D{ + {Key: "latencyStats", Value: bson.D{{Key: "histograms", Value: true}}}, + }}}, + } + + cursor, err := collection.Aggregate(ctx, pipeline) + if err != nil { + return nil, fmt.Errorf("latencyStats failed: %w", err) + } + defer func() { _ = cursor.Close(ctx) }() + + var values []any + for cursor.Next(ctx) { + var doc bson.D + if err := cursor.Decode(&doc); err != nil { + return nil, fmt.Errorf("decode failed: %w", err) + } + values = append(values, doc) + } + + if err := cursor.Err(); err != nil { + return nil, fmt.Errorf("cursor error: %w", err) + } + + return &Result{Operation: types.OpLatencyStats, Value: values}, nil +} diff --git a/internal/executor/executor.go b/internal/executor/executor.go index b24cfbd..f203972 100644 --- a/internal/executor/executor.go +++ b/internal/executor/executor.go @@ -75,6 +75,38 @@ func Execute(ctx context.Context, client *mongo.Client, database string, op *tra return executeDropDatabase(ctx, client, database) case types.OpRenameCollection: return executeRenameCollection(ctx, client, database, op) + case types.OpCreateIndexes: + return executeCreateIndexes(ctx, client, database, op) + // Database Information + case types.OpDbStats: + return executeDbStats(ctx, client, database) + case types.OpCollectionStats: + return executeCollectionStats(ctx, client, database, op) + case types.OpServerStatus: + return executeServerStatus(ctx, client, database) + case types.OpServerBuildInfo: + return executeServerBuildInfo(ctx, client, database) + case types.OpDbVersion: + return executeDbVersion(ctx, client, database) + case types.OpHostInfo: + return executeHostInfo(ctx, client, database) + case types.OpListCommands: + return executeListCommands(ctx, client, database) + // Collection Information + case types.OpDataSize: + return executeDataSize(ctx, client, database, op) + case types.OpStorageSize: + return executeStorageSize(ctx, client, database, op) + case types.OpTotalIndexSize: + return executeTotalIndexSize(ctx, client, database, op) + case types.OpTotalSize: + return executeTotalSize(ctx, client, database, op) + case types.OpIsCapped: + return executeIsCapped(ctx, client, database, op) + case types.OpValidate: + return executeValidate(ctx, client, database, op) + case types.OpLatencyStats: + return executeLatencyStats(ctx, client, database, op) default: return nil, fmt.Errorf("unsupported operation: %s", statement) } diff --git a/internal/translator/collection.go b/internal/translator/collection.go index a69738d..0857ee1 100644 --- a/internal/translator/collection.go +++ b/internal/translator/collection.go @@ -2146,3 +2146,82 @@ func (v *visitor) extractRenameCollectionArgs(ctx mongodb.IRenameCollectionMetho return } } + +// extractCreateIndexesArgs extracts arguments from CreateIndexesMethodContext. +func (v *visitor) extractCreateIndexesArgs(ctx mongodb.ICreateIndexesMethodContext) { + method, ok := ctx.(*mongodb.CreateIndexesMethodContext) + if !ok { + return + } + + args := method.Arguments() + if args == nil { + v.err = fmt.Errorf("createIndexes() requires an array of index specifications") + return + } + + argsCtx, ok := args.(*mongodb.ArgumentsContext) + if !ok { + v.err = fmt.Errorf("createIndexes() requires an array of index specifications") + return + } + + allArgs := argsCtx.AllArgument() + if len(allArgs) == 0 { + v.err = fmt.Errorf("createIndexes() requires an array of index specifications") + return + } + + // First argument: array of index spec documents (required) + firstArg, ok := allArgs[0].(*mongodb.ArgumentContext) + if !ok { + v.err = fmt.Errorf("createIndexes() requires an array of index specifications") + return + } + + valueCtx := firstArg.Value() + if valueCtx == nil { + v.err = fmt.Errorf("createIndexes() requires an array of index specifications") + return + } + + arrayValue, ok := valueCtx.(*mongodb.ArrayValueContext) + if !ok { + v.err = fmt.Errorf("createIndexes() requires an array argument") + return + } + + arr, err := convertArray(arrayValue.Array()) + if err != nil { + v.err = fmt.Errorf("invalid index specifications: %w", err) + return + } + + var specs []bson.D + for i, elem := range arr { + doc, ok := elem.(bson.D) + if !ok { + v.err = fmt.Errorf("createIndexes() element %d must be a document", i) + return + } + // Validate that each spec has a "key" field that is a non-empty document + var keyDoc bson.D + for _, field := range doc { + if field.Key == "key" { + keyDoc, _ = field.Value.(bson.D) + break + } + } + if len(keyDoc) == 0 { + v.err = fmt.Errorf("createIndexes() element %d must have a non-empty 'key' document", i) + return + } + specs = append(specs, doc) + } + v.operation.IndexSpecs = specs + + if len(allArgs) > 1 { + v.err = fmt.Errorf("createIndexes() takes exactly 1 argument") + return + } +} diff --git a/internal/translator/method_registry.go b/internal/translator/method_registry.go deleted file mode 100644 index 5461801..0000000 --- a/internal/translator/method_registry.go +++ /dev/null @@ -1,60 +0,0 @@ -package translator - -// methodStatus represents the support status of a MongoDB method. -type methodStatus int - -const ( - // statusPlanned means the method is planned for implementation (M2/M3). - // When encountered, the caller should fallback to mongosh. - statusPlanned methodStatus = iota -) - -// methodInfo contains metadata about a MongoDB method. -type methodInfo struct { - status methodStatus -} - -// methodRegistry contains only methods we plan to implement (M2, M3). -// If a method is NOT in this registry, it's unsupported (throw error, no fallback). -// If a method IS in this registry, it's planned (fallback to mongosh). -// Note: Methods that are now implemented have been removed from this registry. -var methodRegistry = map[string]methodInfo{ - // ============================================================ - // MILESTONE 3: Administrative Operations (remaining planned) - // ============================================================ - - // Index Management (1 remaining - createIndexes has lower ROI) - "collection:createIndexes": {status: statusPlanned}, - - // Database Information (7) - lower ROI, keep as planned - "database:stats": {status: statusPlanned}, - "collection:stats": {status: statusPlanned}, - "database:serverStatus": {status: statusPlanned}, - "database:serverBuildInfo": {status: statusPlanned}, - "database:version": {status: statusPlanned}, - "database:hostInfo": {status: statusPlanned}, - "database:listCommands": {status: statusPlanned}, - - // Collection Information (7) - lower ROI, keep as planned - "collection:dataSize": {status: statusPlanned}, - "collection:storageSize": {status: statusPlanned}, - "collection:totalIndexSize": {status: statusPlanned}, - "collection:totalSize": {status: statusPlanned}, - "collection:isCapped": {status: statusPlanned}, - "collection:validate": {status: statusPlanned}, - "collection:latencyStats": {status: statusPlanned}, -} - -// IsPlannedMethod checks if a method is in the registry (planned for implementation). -// Returns true if the method should fallback to mongosh. -// Returns false if the method is unsupported (throw error). -func IsPlannedMethod(context, methodName string) bool { - key := context + ":" + methodName - _, ok := methodRegistry[key] - return ok -} - -// MethodRegistryStats returns statistics about the method registry. -func MethodRegistryStats() int { - return len(methodRegistry) -} diff --git a/internal/translator/types.go b/internal/translator/types.go index a0206b4..c837a24 100644 --- a/internal/translator/types.go +++ b/internal/translator/types.go @@ -54,6 +54,7 @@ type Operation struct { IndexUnique *bool // createIndex unique option IndexSparse *bool // createIndex sparse option IndexTTL *int32 // createIndex expireAfterSeconds option + IndexSpecs []bson.D // createIndexes array of index specifications // createCollection options Capped *bool // createCollection capped option diff --git a/internal/translator/visitor.go b/internal/translator/visitor.go index 4795e28..06bcca6 100644 --- a/internal/translator/visitor.go +++ b/internal/translator/visitor.go @@ -66,6 +66,19 @@ func (v *visitor) visitDbStatement(ctx mongodb.IDbStatementContext) { v.extractCreateCollectionArgs(c) case *mongodb.DropDatabaseContext: v.operation.OpType = types.OpDropDatabase + // Database information commands + case *mongodb.DbStatsContext: + v.operation.OpType = types.OpDbStats + case *mongodb.ServerStatusContext: + v.operation.OpType = types.OpServerStatus + case *mongodb.ServerBuildInfoContext: + v.operation.OpType = types.OpServerBuildInfo + case *mongodb.DbVersionContext: + v.operation.OpType = types.OpDbVersion + case *mongodb.HostInfoContext: + v.operation.OpType = types.OpHostInfo + case *mongodb.ListCommandsContext: + v.operation.OpType = types.OpListCommands } } @@ -205,7 +218,8 @@ func (v *visitor) visitCollectionMethodCall(ctx mongodb.ICollectionMethodCallCon v.operation.OpType = types.OpCreateIndex v.extractCreateIndexArgs(mc.CreateIndexMethod()) case mc.CreateIndexesMethod() != nil: - v.handleUnsupportedMethod("collection", "createIndexes") + v.operation.OpType = types.OpCreateIndexes + v.extractCreateIndexesArgs(mc.CreateIndexesMethod()) case mc.DropIndexMethod() != nil: v.operation.OpType = types.OpDropIndex v.extractDropIndexArgs(mc.DropIndexMethod()) @@ -220,23 +234,23 @@ func (v *visitor) visitCollectionMethodCall(ctx mongodb.ICollectionMethodCallCon v.operation.OpType = types.OpRenameCollection v.extractRenameCollectionArgs(mc.RenameCollectionMethod()) - // Planned stats operations + // Collection information commands case mc.StatsMethod() != nil: - v.handleUnsupportedMethod("collection", "stats") + v.operation.OpType = types.OpCollectionStats case mc.StorageSizeMethod() != nil: - v.handleUnsupportedMethod("collection", "storageSize") + v.operation.OpType = types.OpStorageSize case mc.TotalIndexSizeMethod() != nil: - v.handleUnsupportedMethod("collection", "totalIndexSize") + v.operation.OpType = types.OpTotalIndexSize case mc.TotalSizeMethod() != nil: - v.handleUnsupportedMethod("collection", "totalSize") + v.operation.OpType = types.OpTotalSize case mc.DataSizeMethod() != nil: - v.handleUnsupportedMethod("collection", "dataSize") + v.operation.OpType = types.OpDataSize case mc.IsCappedMethod() != nil: - v.handleUnsupportedMethod("collection", "isCapped") + v.operation.OpType = types.OpIsCapped case mc.ValidateMethod() != nil: - v.handleUnsupportedMethod("collection", "validate") + v.operation.OpType = types.OpValidate case mc.LatencyStatsMethod() != nil: - v.handleUnsupportedMethod("collection", "latencyStats") + v.operation.OpType = types.OpLatencyStats default: methodName := extractMethodNameFromText(mc.GetText()) @@ -283,16 +297,8 @@ func extractMethodNameFromText(text string) string { return text } -// handleUnsupportedMethod checks the method registry and returns appropriate errors. -// If method is in registry (planned for M2/M3) -> PlannedOperationError (fallback to mongosh) -// If method is NOT in registry -> UnsupportedOperationError (no fallback) -func (v *visitor) handleUnsupportedMethod(context, methodName string) { - if IsPlannedMethod(context, methodName) { - v.err = &PlannedOperationError{ - Operation: methodName + "()", - } - return - } +// handleUnsupportedMethod returns an UnsupportedOperationError for unknown methods. +func (v *visitor) handleUnsupportedMethod(_, methodName string) { v.err = &UnsupportedOperationError{ Operation: methodName + "()", } diff --git a/method_registry.go b/method_registry.go deleted file mode 100644 index 2382da1..0000000 --- a/method_registry.go +++ /dev/null @@ -1,5 +0,0 @@ -package gomongo - -// This file is kept for backward compatibility. -// The method registry is now maintained in internal/translator/method_registry.go. -// Public access to registry functions is provided via errors.go. diff --git a/types/operation_type.go b/types/operation_type.go index 39000d5..fd6fe8a 100644 --- a/types/operation_type.go +++ b/types/operation_type.go @@ -30,10 +30,27 @@ const ( OpFindOneAndDelete // Administrative Operations OpCreateIndex + OpCreateIndexes OpDropIndex OpDropIndexes OpDrop OpCreateCollection OpDropDatabase OpRenameCollection + // Database Information + OpDbStats + OpCollectionStats + OpServerStatus + OpServerBuildInfo + OpDbVersion + OpHostInfo + OpListCommands + // Collection Information + OpDataSize + OpStorageSize + OpTotalIndexSize + OpTotalSize + OpIsCapped + OpValidate + OpLatencyStats )