diff --git a/README.md b/README.md index 13f971f2..3a3ec810 100644 --- a/README.md +++ b/README.md @@ -121,6 +121,75 @@ If the build fails with a `vendorHash` mismatch, update `nix/pgschema.nix` with - [Docs](https://www.pgschema.com) - [GitHub issues](https://github.com/pgplex/pgschema/issues) +## Configuration file + +Instead of passing flags every time, you can create a `pgschema.toml` config file: + +```toml +host = "localhost" +port = 5432 +db = "myapp" +user = "postgres" +schema = "public" +file = "schema.sql" +``` + +Then simply run: + +```bash +pgschema plan +pgschema apply +``` + +### Named environments + +Use `[env.*]` blocks to define per-environment overrides. Values inherit from the base level: + +```toml +schema = "public" +file = "schema.sql" + +[env.dev] +host = "localhost" +db = "myapp_dev" +user = "postgres" + +[env.prod] +host = "prod-db.internal" +db = "myapp_prod" +user = "app_user" +lock-timeout = "30s" +auto-approve = false +``` + +```bash +pgschema plan --env dev +pgschema apply --env prod +``` + +### Multi-tenant schema loop + +For multi-tenant setups where each tenant has its own schema, define a `[schemas]` block with a SQL query that returns schema names. `plan` and `apply` will iterate over all discovered schemas automatically: + +```toml +host = "localhost" +db = "myapp" +user = "postgres" +file = "tenant.sql" + +[schemas] +query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'tenant_%'" +``` + +```bash +pgschema plan # plans migration for each tenant schema +pgschema apply # applies migration to each tenant schema +``` + +### Priority + +CLI flags always take precedence: **CLI flags > env vars > config env > config base > defaults**. + ## Quick example ### Step 1: Dump schema diff --git a/cmd/apply/apply.go b/cmd/apply/apply.go index a699cb9d..65d01a6c 100644 --- a/cmd/apply/apply.go +++ b/cmd/apply/apply.go @@ -8,6 +8,7 @@ import ( "os" "strings" + "github.com/pgplex/pgschema/cmd/config" planCmd "github.com/pgplex/pgschema/cmd/plan" "github.com/pgplex/pgschema/cmd/util" "github.com/pgplex/pgschema/internal/fingerprint" @@ -49,7 +50,10 @@ var ApplyCmd = &cobra.Command{ Long: "Apply a migration plan to update a database schema. Either provide a desired state file (--file) to generate and apply a plan, or provide a pre-generated plan file (--plan) to execute directly.", RunE: RunApply, SilenceUsage: true, - PreRunE: util.PreRunEWithEnvVarsAndConnectionAndApp(&applyDB, &applyUser, &applyHost, &applyPort, &applyApplicationName), + PreRunE: func(cmd *cobra.Command, args []string) error { + applyConfigToApply(cmd) + return util.PreRunEWithEnvVarsAndConnectionAndApp(&applyDB, &applyUser, &applyHost, &applyPort, &applyApplicationName)(cmd, args) + }, } func init() { @@ -96,8 +100,8 @@ type ApplyConfig struct { User string Password string Schema string - File string // Desired state file (optional, used with embeddedPG) - Plan *plan.Plan // Pre-generated plan (optional, alternative to File) + File string // Desired state file (optional, used with embeddedPG) + Plan *plan.SchemaPlan // Pre-generated plan (optional, alternative to File) AutoApprove bool NoColor bool Quiet bool // Suppress plan display and progress messages (useful for tests) @@ -117,7 +121,7 @@ type ApplyConfig struct { // If config.File is provided, provider is used to generate the plan. // The caller is responsible for managing the provider lifecycle (creation and cleanup). func ApplyMigration(config *ApplyConfig, provider postgres.DesiredStateProvider) error { - var migrationPlan *plan.Plan + var migrationPlan *plan.SchemaPlan var err error // Either use provided plan or generate from file @@ -144,7 +148,7 @@ func ApplyMigration(config *ApplyConfig, provider postgres.DesiredStateProvider) } // Generate plan using shared logic - migrationPlan, err = planCmd.GeneratePlan(planConfig, provider) + migrationPlan, err = planCmd.GenerateSchemaPlan(planConfig, provider) if err != nil { return err } @@ -266,6 +270,12 @@ func ApplyMigration(config *ApplyConfig, provider postgres.DesiredStateProvider) // RunApply executes the apply command logic. Exported for testing. func RunApply(cmd *cobra.Command, args []string) error { + cfg := config.Get() + if cfg != nil && cfg.Schemas != nil && cfg.Schemas.Query != "" && + !cmd.Flags().Changed("schema") && !cmd.Flags().Changed("plan") { + return runApplyMultiSchema(cmd, cfg) + } + // Validate that either --file or --plan is provided if applyFile == "" && applyPlan == "" { return fmt.Errorf("either --file or --plan must be specified") @@ -292,14 +302,30 @@ func RunApply(cmd *cobra.Command, args []string) error { return err } - // Build configuration - config := &ApplyConfig{ + // If using --plan flag, load plan from JSON file + if applyPlan != "" { + planData, err := os.ReadFile(applyPlan) + if err != nil { + return fmt.Errorf("failed to read plan file: %w", err) + } + + loaded, err := plan.FromJSON(planData) + if err != nil { + return fmt.Errorf("failed to load plan: %w", err) + } + + return applyPlanFile(loaded, finalPassword, finalSSLMode) + } + + // Using --file flag, will need desired state provider + applyCfg := &ApplyConfig{ Host: applyHost, Port: applyPort, DB: applyDB, User: applyUser, Password: finalPassword, Schema: applySchema, + File: applyFile, AutoApprove: applyAutoApprove, NoColor: applyNoColor, LockTimeout: applyLockTimeout, @@ -307,97 +333,121 @@ func RunApply(cmd *cobra.Command, args []string) error { SSLMode: finalSSLMode, } - var provider postgres.DesiredStateProvider - var err error + // Apply environment variables to plan database flags (only needed for File Mode) + util.ApplyPlanDBEnvVars(cmd, &applyPlanDBHost, &applyPlanDBDatabase, &applyPlanDBUser, &applyPlanDBPassword, &applyPlanDBPort, &applyPlanDBSSLMode) - // If using --plan flag, load plan from JSON file - if applyPlan != "" { - planData, err := os.ReadFile(applyPlan) - if err != nil { - return fmt.Errorf("failed to read plan file: %w", err) - } + // Validate plan database flags if plan-host is provided + if err := util.ValidatePlanDBFlags(applyPlanDBHost, applyPlanDBDatabase, applyPlanDBUser); err != nil { + return err + } - migrationPlan, err := plan.FromJSON(planData) - if err != nil { - return fmt.Errorf("failed to load plan: %w", err) + // Validate plan database sslmode if plan-host is provided + if applyPlanDBHost != "" { + if err := util.ValidateSSLMode(applyPlanDBSSLMode); err != nil { + return fmt.Errorf("plan database: %w", err) } + } - // Validate that the plan was generated by the same pgschema version - currentVersion := version.App() - if migrationPlan.PgschemaVersion != currentVersion { - return fmt.Errorf("plan version mismatch: plan was generated by pgschema version %s, but current version is %s. Please regenerate the plan with the current version", migrationPlan.PgschemaVersion, currentVersion) + // Derive final plan database password + finalPlanPassword := applyPlanDBPassword + if finalPlanPassword == "" { + if envPassword := os.Getenv("PGSCHEMA_PLAN_PASSWORD"); envPassword != "" { + finalPlanPassword = envPassword } + } - // Validate that the plan format version is supported (forward compatibility) - supportedPlanVersion := version.PlanFormat() - if migrationPlan.Version != supportedPlanVersion { - return fmt.Errorf("unsupported plan format version: plan uses format version %s, but this pgschema version only supports format version %s. Please upgrade pgschema to apply this plan", migrationPlan.Version, supportedPlanVersion) - } + // Create desired state provider (embedded postgres or external database) + planConfig := &planCmd.PlanConfig{ + Host: applyHost, + Port: applyPort, + DB: applyDB, + User: applyUser, + Password: finalPassword, + Schema: applySchema, + File: applyFile, + ApplicationName: applyApplicationName, + SSLMode: finalSSLMode, + // Plan database configuration + PlanDBHost: applyPlanDBHost, + PlanDBPort: applyPlanDBPort, + PlanDBDatabase: applyPlanDBDatabase, + PlanDBUser: applyPlanDBUser, + PlanDBPassword: finalPlanPassword, + PlanDBSSLMode: applyPlanDBSSLMode, + } + provider, err := planCmd.CreateDesiredStateProvider(planConfig) + if err != nil { + return err + } + defer provider.Stop() - config.Plan = migrationPlan - } else { - // Using --file flag, will need desired state provider - config.File = applyFile + // Propagate plan DB fields so ApplyMigration -> GeneratePlan knows the provider type + applyCfg.PlanDBHost = applyPlanDBHost + applyCfg.PlanDBSSLMode = applyPlanDBSSLMode - // Apply environment variables to plan database flags (only needed for File Mode) - util.ApplyPlanDBEnvVars(cmd, &applyPlanDBHost, &applyPlanDBDatabase, &applyPlanDBUser, &applyPlanDBPassword, &applyPlanDBPort, &applyPlanDBSSLMode) + // Apply the migration + return ApplyMigration(applyCfg, provider) +} - // Validate plan database flags if plan-host is provided - if err := util.ValidatePlanDBFlags(applyPlanDBHost, applyPlanDBDatabase, applyPlanDBUser); err != nil { - return err - } +// applyPlanFile validates and applies a plan loaded from a JSON file. +// It iterates over schemas in sorted order and applies each schema's plan individually. +func applyPlanFile(p *plan.Plan, finalPassword, finalSSLMode string) error { + // Validate that the plan was generated by the same pgschema version + currentVersion := version.App() + if p.PgschemaVersion != currentVersion { + return fmt.Errorf("plan version mismatch: plan was generated by pgschema version %s, but current version is %s. Please regenerate the plan with the current version", p.PgschemaVersion, currentVersion) + } - // Validate plan database sslmode if plan-host is provided - if applyPlanDBHost != "" { - if err := util.ValidateSSLMode(applyPlanDBSSLMode); err != nil { - return fmt.Errorf("plan database: %w", err) - } - } + // Validate that the plan format version is supported (forward compatibility) + supportedPlanVersion := version.PlanFormat() + if p.Version != supportedPlanVersion { + return fmt.Errorf("unsupported plan format version: plan uses format version %s, but this pgschema version only supports format version %s. Please upgrade pgschema to apply this plan", p.Version, supportedPlanVersion) + } - // Derive final plan database password - finalPlanPassword := applyPlanDBPassword - if finalPlanPassword == "" { - if envPassword := os.Getenv("PGSCHEMA_PLAN_PASSWORD"); envPassword != "" { - finalPlanPassword = envPassword - } + var hasErrors bool + for _, schemaName := range p.SortedSchemaNames() { + schemaPlan := p.Schemas[schemaName] + + if len(p.Schemas) > 1 { + fmt.Fprintf(os.Stderr, "\n── Schema: %s ──────────────────────\n", schemaName) } - // Create desired state provider (embedded postgres or external database) - planConfig := &planCmd.PlanConfig{ + applyCfg := &ApplyConfig{ Host: applyHost, Port: applyPort, DB: applyDB, User: applyUser, Password: finalPassword, - Schema: applySchema, - File: applyFile, + Schema: schemaName, + Plan: schemaPlan, + AutoApprove: applyAutoApprove, + NoColor: applyNoColor, + LockTimeout: applyLockTimeout, ApplicationName: applyApplicationName, SSLMode: finalSSLMode, - // Plan database configuration - PlanDBHost: applyPlanDBHost, - PlanDBPort: applyPlanDBPort, - PlanDBDatabase: applyPlanDBDatabase, - PlanDBUser: applyPlanDBUser, - PlanDBPassword: finalPlanPassword, - PlanDBSSLMode: applyPlanDBSSLMode, } - provider, err = planCmd.CreateDesiredStateProvider(planConfig) - if err != nil { - return err - } - defer provider.Stop() - // Propagate plan DB fields so ApplyMigration -> GeneratePlan knows the provider type - config.PlanDBHost = applyPlanDBHost - config.PlanDBSSLMode = applyPlanDBSSLMode + if err := ApplyMigration(applyCfg, nil); err != nil { + if len(p.Schemas) > 1 { + fmt.Fprintf(os.Stderr, "Error for schema %s: %v\n", schemaName, err) + hasErrors = true + } else { + return err + } + } } - // Apply the migration - return ApplyMigration(config, provider) + if len(p.Schemas) > 1 { + fmt.Fprintf(os.Stderr, "\nSummary: %d schemas processed\n", len(p.Schemas)) + } + if hasErrors { + return fmt.Errorf("one or more schemas had errors") + } + return nil } // validateSchemaFingerprint validates that the current database schema matches the expected fingerprint -func validateSchemaFingerprint(migrationPlan *plan.Plan, host string, port int, db, user, password, sslmode, schema, applicationName string, ignoreConfig *ir.IgnoreConfig) error { +func validateSchemaFingerprint(migrationPlan *plan.SchemaPlan, host string, port int, db, user, password, sslmode, schema, applicationName string, ignoreConfig *ir.IgnoreConfig) error { // Get current state from target database with ignore config // This ensures ignored objects are excluded from fingerprint calculation currentStateIR, err := util.GetIRFromDatabase(host, port, db, user, password, sslmode, schema, applicationName, ignoreConfig) @@ -489,6 +539,173 @@ func executeGroupIndividually(ctx context.Context, conn *sql.DB, group plan.Exec return nil } +func runApplyMultiSchema(cmd *cobra.Command, cfg *config.ResolvedConfig) error { + // Apply plan DB environment variables (same as single-schema path) + util.ApplyPlanDBEnvVars(cmd, &applyPlanDBHost, &applyPlanDBDatabase, &applyPlanDBUser, &applyPlanDBPassword, &applyPlanDBPort, &applyPlanDBSSLMode) + + // Validate plan database flags if plan-host is provided + if err := util.ValidatePlanDBFlags(applyPlanDBHost, applyPlanDBDatabase, applyPlanDBUser); err != nil { + return err + } + + finalPassword := applyPassword + if finalPassword == "" { + if envPassword := os.Getenv("PGPASSWORD"); envPassword != "" { + finalPassword = envPassword + } + } + finalSSLMode := applySSLMode + if cmd == nil || !cmd.Flags().Changed("sslmode") { + if envSSLMode := os.Getenv("PGSSLMODE"); envSSLMode != "" { + finalSSLMode = envSSLMode + } + } + + // Derive final plan database password + finalPlanPassword := applyPlanDBPassword + if finalPlanPassword == "" { + if envPassword := os.Getenv("PGSCHEMA_PLAN_PASSWORD"); envPassword != "" { + finalPlanPassword = envPassword + } + } + + schemas, err := config.DiscoverSchemas(applyHost, applyPort, applyDB, applyUser, finalPassword, finalSSLMode, cfg.Schemas.Query) + if err != nil { + return err + } + + if len(schemas) == 0 { + fmt.Fprintln(os.Stderr, "Warning: schema discovery query returned no schemas.") + return nil + } + + if applyFile == "" { + return fmt.Errorf("--file is required for multi-schema apply") + } + + var hasErrors bool + for _, schemaName := range schemas { + fmt.Fprintf(os.Stderr, "\n── Schema: %s ──────────────────────\n", schemaName) + + perSchemaConfig := &ApplyConfig{ + Host: applyHost, + Port: applyPort, + DB: applyDB, + User: applyUser, + Password: finalPassword, + Schema: schemaName, + File: applyFile, + AutoApprove: applyAutoApprove, + NoColor: applyNoColor, + LockTimeout: applyLockTimeout, + ApplicationName: applyApplicationName, + SSLMode: finalSSLMode, + PlanDBHost: applyPlanDBHost, + PlanDBSSLMode: applyPlanDBSSLMode, + } + + planConfig := &planCmd.PlanConfig{ + Host: applyHost, + Port: applyPort, + DB: applyDB, + User: applyUser, + Password: finalPassword, + Schema: schemaName, + File: applyFile, + ApplicationName: applyApplicationName, + SSLMode: finalSSLMode, + PlanDBHost: applyPlanDBHost, + PlanDBPort: applyPlanDBPort, + PlanDBDatabase: applyPlanDBDatabase, + PlanDBUser: applyPlanDBUser, + PlanDBPassword: finalPlanPassword, + PlanDBSSLMode: applyPlanDBSSLMode, + } + + provider, err := planCmd.CreateDesiredStateProvider(planConfig) + if err != nil { + fmt.Fprintf(os.Stderr, "Error for schema %s: %v\n", schemaName, err) + hasErrors = true + continue + } + + err = ApplyMigration(perSchemaConfig, provider) + provider.Stop() + if err != nil { + fmt.Fprintf(os.Stderr, "Error for schema %s: %v\n", schemaName, err) + hasErrors = true + } + } + + fmt.Fprintf(os.Stderr, "\nSummary: %d schemas processed\n", len(schemas)) + if hasErrors { + return fmt.Errorf("one or more schemas had errors") + } + return nil +} + +func applyConfigToApply(cmd *cobra.Command) { + cfg := config.Get() + if cfg == nil { + return + } + + if !cmd.Flags().Changed("host") && cfg.Host != "" { + applyHost = cfg.Host + } + if !cmd.Flags().Changed("port") && cfg.Port != 0 { + applyPort = cfg.Port + } + if !cmd.Flags().Changed("db") && cfg.DB != "" { + applyDB = cfg.DB + } + if !cmd.Flags().Changed("user") && cfg.User != "" { + applyUser = cfg.User + } + if !cmd.Flags().Changed("password") && cfg.Password != "" { + applyPassword = cfg.Password + } + if !cmd.Flags().Changed("schema") && cfg.Schema != "" { + applySchema = cfg.Schema + } + if !cmd.Flags().Changed("file") && cfg.File != "" { + applyFile = cfg.File + } + if !cmd.Flags().Changed("sslmode") && cfg.SSLMode != "" { + applySSLMode = cfg.SSLMode + } + if !cmd.Flags().Changed("lock-timeout") && cfg.LockTimeout != "" { + applyLockTimeout = cfg.LockTimeout + } + if !cmd.Flags().Changed("auto-approve") && cfg.AutoApprove { + applyAutoApprove = cfg.AutoApprove + } + if !cmd.Flags().Changed("application-name") && cfg.ApplicationName != "" { + applyApplicationName = cfg.ApplicationName + } + if !cmd.Flags().Changed("no-color") && cfg.NoColor { + applyNoColor = cfg.NoColor + } + if !cmd.Flags().Changed("plan-host") && cfg.PlanHost != "" { + applyPlanDBHost = cfg.PlanHost + } + if !cmd.Flags().Changed("plan-port") && cfg.PlanPort != 0 { + applyPlanDBPort = cfg.PlanPort + } + if !cmd.Flags().Changed("plan-db") && cfg.PlanDB != "" { + applyPlanDBDatabase = cfg.PlanDB + } + if !cmd.Flags().Changed("plan-user") && cfg.PlanUser != "" { + applyPlanDBUser = cfg.PlanUser + } + if !cmd.Flags().Changed("plan-password") && cfg.PlanPassword != "" { + applyPlanDBPassword = cfg.PlanPassword + } + if !cmd.Flags().Changed("plan-sslmode") && cfg.PlanSSLMode != "" { + applyPlanDBSSLMode = cfg.PlanSSLMode + } +} + // truncateSQL truncates a SQL statement for display purposes func truncateSQL(sql string, maxLen int) string { // Remove extra whitespace and newlines diff --git a/cmd/apply/apply_integration_test.go b/cmd/apply/apply_integration_test.go index 1dc08a54..9ff5b5a2 100644 --- a/cmd/apply/apply_integration_test.go +++ b/cmd/apply/apply_integration_test.go @@ -163,7 +163,7 @@ func TestApplyCommand_TransactionRollback(t *testing.T) { ApplicationName: "pgschema", } - migrationPlan, err := planCmd.GeneratePlan(planConfig, sharedEmbeddedPG) + migrationPlan, err := planCmd.GenerateSchemaPlan(planConfig, sharedEmbeddedPG) if err != nil { t.Fatalf("Failed to generate migration plan: %v", err) } @@ -419,7 +419,7 @@ func TestApplyCommand_CreateIndexConcurrently(t *testing.T) { ApplicationName: "pgschema", } - migrationPlan, err := planCmd.GeneratePlan(planConfig, sharedEmbeddedPG) + migrationPlan, err := planCmd.GenerateSchemaPlan(planConfig, sharedEmbeddedPG) if err != nil { t.Fatalf("Failed to generate migration plan: %v", err) } @@ -632,7 +632,7 @@ func TestApplyCommand_WithPlanFile(t *testing.T) { ApplicationName: "pgschema", } - migrationPlan, err := planCmd.GeneratePlan(planConfig, sharedEmbeddedPG) + migrationPlan, err := planCmd.GenerateSchemaPlan(planConfig, sharedEmbeddedPG) if err != nil { t.Fatalf("Failed to generate migration plan: %v", err) } @@ -826,7 +826,7 @@ func TestApplyCommand_FingerprintMismatch(t *testing.T) { ApplicationName: "pgschema", } - migrationPlan, err := planCmd.GeneratePlan(planConfig, sharedEmbeddedPG) + migrationPlan, err := planCmd.GenerateSchemaPlan(planConfig, sharedEmbeddedPG) if err != nil { t.Fatalf("Failed to generate migration plan: %v", err) } @@ -1032,7 +1032,7 @@ func TestApplyCommand_WaitDirective(t *testing.T) { ApplicationName: "pgschema", } - migrationPlan, err := planCmd.GeneratePlan(planConfig, sharedEmbeddedPG) + migrationPlan, err := planCmd.GenerateSchemaPlan(planConfig, sharedEmbeddedPG) if err != nil { t.Fatalf("Failed to generate plan: %v", err) } @@ -1276,9 +1276,13 @@ CREATE TABLE users ( require.NoError(t, err, "should create provider") defer provider.Stop() - generatedPlan, err := planCmd.GeneratePlan(planConfig, provider) + generatedSchemaPlan, err := planCmd.GenerateSchemaPlan(planConfig, provider) require.NoError(t, err, "should generate plan") + // Wrap in unified Plan for JSON serialization + generatedPlan := plan.NewPlan() + generatedPlan.AddSchema("public", generatedSchemaPlan) + // Save plan to JSON file planJSON, err := generatedPlan.ToJSON() require.NoError(t, err, "should serialize plan to JSON") @@ -1306,10 +1310,10 @@ CREATE TABLE users ( planData, err := os.ReadFile(planFile) require.NoError(t, err, "should read plan file") - migrationPlan, err := plan.FromJSON(planData) + loadedPlan, err := plan.FromJSON(planData) require.NoError(t, err, "should load plan from JSON") - config.Plan = migrationPlan + config.Plan = loadedPlan.Schemas["public"] // Set environment variables with INVALID plan database configuration // This would normally fail validation, but should be ignored in Plan Mode diff --git a/cmd/apply/apply_test.go b/cmd/apply/apply_test.go index 7b8dd6b2..1724a070 100644 --- a/cmd/apply/apply_test.go +++ b/cmd/apply/apply_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/pgplex/pgschema/cmd/config" "github.com/pgplex/pgschema/internal/version" "github.com/spf13/cobra" ) @@ -541,3 +542,47 @@ func TestApplyCommand_PlanDatabaseFlags(t *testing.T) { t.Errorf("Expected default plan-password to be empty, got '%s'", planPasswordFlag.DefValue) } } + +func TestRunApply_PlanFlagSkipsMultiSchema(t *testing.T) { + // When --plan is provided alongside a [schemas] config, RunApply must NOT enter + // the multi-schema path. If it did, it would return "--file is required for + // multi-schema apply" instead of the expected plan-loading error. + + // Set up a config that has a schemas query + cfg := &config.ResolvedConfig{ + Schemas: &config.SchemasConfig{Query: "SELECT schema_name FROM information_schema.schemata"}, + } + config.SetResolved(cfg) + defer config.SetResolved(nil) + + // Create a minimal (but invalid version) plan file to trigger a version error, + // not a "file required" error. + tmpDir := t.TempDir() + planPath := filepath.Join(tmpDir, "plan.json") + planJSON := `{"version":"0.0.0","pgschema_version":"test","created_at":"2024-01-01T00:00:00Z","transaction":true,"summary":{"total":0,"add":0,"change":0,"destroy":0,"by_type":{}},"diffs":[]}` + if err := os.WriteFile(planPath, []byte(planJSON), 0644); err != nil { + t.Fatalf("Failed to write plan file: %v", err) + } + + applyDB = "testdb" + applyUser = "testuser" + applyFile = "" + applyPlan = planPath + defer func() { + applyDB = "" + applyUser = "" + applyFile = "" + applyPlan = "" + }() + + // Mark --plan as explicitly set so the guard can detect it + ApplyCmd.Flags().Set("plan", planPath) + defer ApplyCmd.Flags().Set("plan", "") + + err := RunApply(ApplyCmd, []string{}) + + // Must NOT be the multi-schema error + if err != nil && strings.Contains(err.Error(), "--file is required for multi-schema apply") { + t.Errorf("--plan flag should prevent entering multi-schema path, got: %v", err) + } +} diff --git a/cmd/config/config.go b/cmd/config/config.go new file mode 100644 index 00000000..5fe32920 --- /dev/null +++ b/cmd/config/config.go @@ -0,0 +1,298 @@ +package config + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "strings" + "time" + + "github.com/BurntSushi/toml" + _ "github.com/jackc/pgx/v5/stdlib" +) + +// ResolvedConfig is the final, flattened configuration consumed by plan/apply/dump commands. +// It is produced by merging base config with an optional named env override via LoadConfig. +type ResolvedConfig struct { + Host string + Port int + DB string + User string + Password string + SSLMode string + + Schema string + File string + + PlanHost string + PlanPort int + PlanDB string + PlanUser string + PlanPassword string + PlanSSLMode string + + LockTimeout string + AutoApprove bool + ApplicationName string + + OutputHuman string + OutputJSON string + OutputSQL string + + MultiFile bool + NoComments bool + + NoColor bool + + Schemas *SchemasConfig +} + +type SchemasConfig struct { + Query string `toml:"query"` +} + +// envConfig is the TOML deserialization target. It mirrors ResolvedConfig but carries +// toml struct tags. Both the base level and each [env.*] block parse into this type. +// It is unexported — callers only see the merged ResolvedConfig. +type envConfig struct { + Host string `toml:"host"` + Port int `toml:"port"` + DB string `toml:"db"` + User string `toml:"user"` + Password string `toml:"password"` + SSLMode string `toml:"sslmode"` + Schema string `toml:"schema"` + File string `toml:"file"` + PlanHost string `toml:"plan-host"` + PlanPort int `toml:"plan-port"` + PlanDB string `toml:"plan-db"` + PlanUser string `toml:"plan-user"` + PlanPassword string `toml:"plan-password"` + PlanSSLMode string `toml:"plan-sslmode"` + LockTimeout string `toml:"lock-timeout"` + AutoApprove bool `toml:"auto-approve"` + ApplicationName string `toml:"application-name"` + OutputHuman string `toml:"output-human"` + OutputJSON string `toml:"output-json"` + OutputSQL string `toml:"output-sql"` + MultiFile bool `toml:"multi-file"` + NoComments bool `toml:"no-comments"` + NoColor bool `toml:"no-color"` + Schemas *SchemasConfig `toml:"schemas"` +} + +// fileConfig is the top-level TOML structure: base-level fields (embedded envConfig) +// plus a map of named environment overrides ([env.dev], [env.prod], etc.). +type fileConfig struct { + envConfig + Env map[string]envConfig `toml:"env"` +} + +func LoadConfig(path string, envName string) (*ResolvedConfig, error) { + var fc fileConfig + meta, err := toml.DecodeFile(path, &fc) + if err != nil { + return nil, fmt.Errorf("failed to parse config file %s: %w", path, err) + } + + resolved := toResolved(&fc.envConfig) + + if envName != "" { + env, ok := fc.Env[envName] + if !ok { + return nil, fmt.Errorf("environment %q not found in %s", envName, path) + } + mergeEnvConfig(resolved, &env, meta, "env", envName) + } + + return resolved, nil +} + +func toResolved(ec *envConfig) *ResolvedConfig { + return &ResolvedConfig{ + Host: ec.Host, + Port: ec.Port, + DB: ec.DB, + User: ec.User, + Password: ec.Password, + SSLMode: ec.SSLMode, + Schema: ec.Schema, + File: ec.File, + PlanHost: ec.PlanHost, + PlanPort: ec.PlanPort, + PlanDB: ec.PlanDB, + PlanUser: ec.PlanUser, + PlanPassword: ec.PlanPassword, + PlanSSLMode: ec.PlanSSLMode, + LockTimeout: ec.LockTimeout, + AutoApprove: ec.AutoApprove, + ApplicationName: ec.ApplicationName, + OutputHuman: ec.OutputHuman, + OutputJSON: ec.OutputJSON, + OutputSQL: ec.OutputSQL, + MultiFile: ec.MultiFile, + NoComments: ec.NoComments, + NoColor: ec.NoColor, + Schemas: ec.Schemas, + } +} + +var resolvedCfg *ResolvedConfig + +func SetResolved(cfg *ResolvedConfig) { + resolvedCfg = cfg +} + +func Get() *ResolvedConfig { + return resolvedCfg +} + +func DiscoverSchemas(host string, port int, db, user, password, sslmode, query string) ([]string, error) { + u := &url.URL{ + Scheme: "postgres", + Host: fmt.Sprintf("%s:%d", host, port), + Path: "/" + db, + } + if user != "" || password != "" { + u.User = url.UserPassword(user, password) + } + q := url.Values{} + q.Set("sslmode", sslmode) + q.Set("connect_timeout", "30") + u.RawQuery = q.Encode() + dsn := u.String() + + conn, err := sql.Open("pgx", dsn) + if err != nil { + return nil, fmt.Errorf("failed to connect for schema discovery: %w", err) + } + defer conn.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Run in a read-only transaction so the discovery query cannot modify data, + // even if the config file contains a non-SELECT statement. + tx, err := conn.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) + if err != nil { + return nil, fmt.Errorf("failed to begin read-only transaction: %w", err) + } + defer tx.Rollback() + + rows, err := tx.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("schema discovery query failed: %w", err) + } + defer rows.Close() + + cols, err := rows.Columns() + if err != nil { + return nil, fmt.Errorf("failed to get query columns: %w", err) + } + if len(cols) != 1 { + return nil, fmt.Errorf("schema discovery query must return exactly 1 column, got %d", len(cols)) + } + + var schemas []string + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, fmt.Errorf("failed to scan schema name: %w", err) + } + schemas = append(schemas, name) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("error reading schema names: %w", err) + } + + return schemas, nil +} + +// isDefined checks if a TOML key is explicitly present. +// prefix is a dot-separated path like "env.dev", key is the field name. +func isDefined(meta toml.MetaData, prefix string, key string) bool { + var keys []string + if prefix != "" { + keys = strings.Split(prefix, ".") + } + keys = append(keys, key) + return meta.IsDefined(keys...) +} + +func mergeEnvConfig(resolved *ResolvedConfig, env *envConfig, meta toml.MetaData, prefixParts ...string) { + prefix := strings.Join(prefixParts, ".") + + if isDefined(meta, prefix, "host") { + resolved.Host = env.Host + } + if isDefined(meta, prefix, "port") { + resolved.Port = env.Port + } + if isDefined(meta, prefix, "db") { + resolved.DB = env.DB + } + if isDefined(meta, prefix, "user") { + resolved.User = env.User + } + if isDefined(meta, prefix, "password") { + resolved.Password = env.Password + } + if isDefined(meta, prefix, "sslmode") { + resolved.SSLMode = env.SSLMode + } + if isDefined(meta, prefix, "schema") { + resolved.Schema = env.Schema + } + if isDefined(meta, prefix, "file") { + resolved.File = env.File + } + if isDefined(meta, prefix, "plan-host") { + resolved.PlanHost = env.PlanHost + } + if isDefined(meta, prefix, "plan-port") { + resolved.PlanPort = env.PlanPort + } + if isDefined(meta, prefix, "plan-db") { + resolved.PlanDB = env.PlanDB + } + if isDefined(meta, prefix, "plan-user") { + resolved.PlanUser = env.PlanUser + } + if isDefined(meta, prefix, "plan-password") { + resolved.PlanPassword = env.PlanPassword + } + if isDefined(meta, prefix, "plan-sslmode") { + resolved.PlanSSLMode = env.PlanSSLMode + } + if isDefined(meta, prefix, "lock-timeout") { + resolved.LockTimeout = env.LockTimeout + } + if isDefined(meta, prefix, "auto-approve") { + resolved.AutoApprove = env.AutoApprove + } + if isDefined(meta, prefix, "application-name") { + resolved.ApplicationName = env.ApplicationName + } + if isDefined(meta, prefix, "output-human") { + resolved.OutputHuman = env.OutputHuman + } + if isDefined(meta, prefix, "output-json") { + resolved.OutputJSON = env.OutputJSON + } + if isDefined(meta, prefix, "output-sql") { + resolved.OutputSQL = env.OutputSQL + } + if isDefined(meta, prefix, "multi-file") { + resolved.MultiFile = env.MultiFile + } + if isDefined(meta, prefix, "no-comments") { + resolved.NoComments = env.NoComments + } + if isDefined(meta, prefix, "no-color") { + resolved.NoColor = env.NoColor + } + if isDefined(meta, prefix, "schemas") { + resolved.Schemas = env.Schemas + } +} diff --git a/cmd/config/config_test.go b/cmd/config/config_test.go new file mode 100644 index 00000000..b551a09f --- /dev/null +++ b/cmd/config/config_test.go @@ -0,0 +1,193 @@ +package config + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestLoadConfig_MinimalFlat(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "pgschema.toml") + os.WriteFile(path, []byte(` +host = "localhost" +port = 5432 +db = "myapp_dev" +user = "postgres" +schema = "public" +file = "schema.sql" +`), 0644) + + resolved, err := LoadConfig(path, "") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resolved.Host != "localhost" { + t.Errorf("Host = %q, want %q", resolved.Host, "localhost") + } + if resolved.Port != 5432 { + t.Errorf("Port = %d, want %d", resolved.Port, 5432) + } + if resolved.DB != "myapp_dev" { + t.Errorf("DB = %q, want %q", resolved.DB, "myapp_dev") + } + if resolved.User != "postgres" { + t.Errorf("User = %q, want %q", resolved.User, "postgres") + } + if resolved.Schema != "public" { + t.Errorf("Schema = %q, want %q", resolved.Schema, "public") + } + if resolved.File != "schema.sql" { + t.Errorf("File = %q, want %q", resolved.File, "schema.sql") + } + if resolved.Schemas != nil { + t.Errorf("Schemas = %v, want nil", resolved.Schemas) + } +} + +func TestLoadConfig_EnvOverride(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "pgschema.toml") + os.WriteFile(path, []byte(` +schema = "public" +file = "schema.sql" +lock-timeout = "10s" + +[env.dev] +host = "localhost" +port = 5432 +db = "myapp_dev" +user = "postgres" + +[env.prod] +host = "prod-db.internal" +db = "myapp_prod" +user = "app_user" +lock-timeout = "60s" +`), 0644) + + t.Run("base only", func(t *testing.T) { + resolved, err := LoadConfig(path, "") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resolved.Schema != "public" { + t.Errorf("Schema = %q, want %q", resolved.Schema, "public") + } + if resolved.Host != "" { + t.Errorf("Host = %q, want empty", resolved.Host) + } + }) + + t.Run("dev env inherits base", func(t *testing.T) { + resolved, err := LoadConfig(path, "dev") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resolved.Schema != "public" { + t.Errorf("Schema = %q, want %q (inherited from base)", resolved.Schema, "public") + } + if resolved.File != "schema.sql" { + t.Errorf("File = %q, want %q (inherited from base)", resolved.File, "schema.sql") + } + if resolved.Host != "localhost" { + t.Errorf("Host = %q, want %q", resolved.Host, "localhost") + } + if resolved.LockTimeout != "10s" { + t.Errorf("LockTimeout = %q, want %q (inherited from base)", resolved.LockTimeout, "10s") + } + }) + + t.Run("prod env overrides base", func(t *testing.T) { + resolved, err := LoadConfig(path, "prod") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resolved.LockTimeout != "60s" { + t.Errorf("LockTimeout = %q, want %q (overridden by prod)", resolved.LockTimeout, "60s") + } + if resolved.File != "schema.sql" { + t.Errorf("File = %q, want %q (inherited from base)", resolved.File, "schema.sql") + } + }) +} + +func TestLoadConfig_BooleanOverride(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "pgschema.toml") + os.WriteFile(path, []byte(` +auto-approve = true + +[env.safe] +auto-approve = false +`), 0644) + + resolved, err := LoadConfig(path, "safe") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resolved.AutoApprove != false { + t.Errorf("AutoApprove = %v, want false (explicit override of base true)", resolved.AutoApprove) + } +} + +func TestLoadConfig_UnknownEnv(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "pgschema.toml") + os.WriteFile(path, []byte(` +db = "test" +`), 0644) + + _, err := LoadConfig(path, "nonexistent") + if err == nil { + t.Fatal("expected error for unknown env, got nil") + } + expected := `environment "nonexistent" not found` + if !strings.Contains(err.Error(), expected) { + t.Errorf("error = %q, want it to contain %q", err.Error(), expected) + } +} + +func TestLoadConfig_InvalidTOML(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "pgschema.toml") + os.WriteFile(path, []byte(` +this is not valid toml [[[ +`), 0644) + + _, err := LoadConfig(path, "") + if err == nil { + t.Fatal("expected error for invalid TOML, got nil") + } +} + +func TestLoadConfig_SchemasQuery(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "pgschema.toml") + os.WriteFile(path, []byte(` +file = "tenant.sql" + +[env.tenants] +host = "localhost" +db = "myapp" +user = "postgres" + +[env.tenants.schemas] +query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'tenant_%'" +`), 0644) + + resolved, err := LoadConfig(path, "tenants") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resolved.Schemas == nil { + t.Fatal("Schemas is nil, want non-nil") + } + if resolved.Schemas.Query != "SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'tenant_%'" { + t.Errorf("Schemas.Query = %q, want tenant query", resolved.Schemas.Query) + } + if resolved.File != "tenant.sql" { + t.Errorf("File = %q, want %q (inherited from base)", resolved.File, "tenant.sql") + } +} diff --git a/cmd/config_integration_test.go b/cmd/config_integration_test.go new file mode 100644 index 00000000..a162e490 --- /dev/null +++ b/cmd/config_integration_test.go @@ -0,0 +1,357 @@ +package cmd + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/pgplex/pgschema/cmd/config" + "github.com/pgplex/pgschema/testutil" +) + +func resetRootCmd() { + config.SetResolved(nil) + configPath = "pgschema.toml" + envName = "" +} + +func TestConfigLoading_NoFile(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + origDir, _ := os.Getwd() + os.Chdir(dir) + defer os.Chdir(origDir) + + loadConfig(RootCmd) + + if config.Get() != nil { + t.Error("expected no config when pgschema.toml is absent") + } +} + +func TestConfigLoading_WithFile(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +host = "testhost" +port = 9999 +db = "testdb" +user = "testuser" +schema = "myschema" +file = "schema.sql" +`), 0644) + + configPath = tomlPath + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("expected config to be loaded") + } + if cfg.Host != "testhost" { + t.Errorf("Host = %q, want %q", cfg.Host, "testhost") + } + if cfg.Port != 9999 { + t.Errorf("Port = %d, want %d", cfg.Port, 9999) + } + if cfg.DB != "testdb" { + t.Errorf("DB = %q, want %q", cfg.DB, "testdb") + } + if cfg.Schema != "myschema" { + t.Errorf("Schema = %q, want %q", cfg.Schema, "myschema") + } +} + +func TestConfigLoading_WithEnv(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +host = "base-host" +schema = "public" +file = "schema.sql" + +[env.dev] +host = "dev-host" +db = "dev_db" +user = "dev_user" + +[env.prod] +host = "prod-host" +db = "prod_db" +user = "prod_user" +lock-timeout = "30s" +`), 0644) + + configPath = tomlPath + envName = "dev" + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("expected config to be loaded") + } + if cfg.Host != "dev-host" { + t.Errorf("Host = %q, want %q (dev override)", cfg.Host, "dev-host") + } + if cfg.DB != "dev_db" { + t.Errorf("DB = %q, want %q (dev override)", cfg.DB, "dev_db") + } + if cfg.Schema != "public" { + t.Errorf("Schema = %q, want %q (inherited from base)", cfg.Schema, "public") + } + if cfg.File != "schema.sql" { + t.Errorf("File = %q, want %q (inherited from base)", cfg.File, "schema.sql") + } +} + +func TestConfigLoading_SchemasSection(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +host = "localhost" +db = "myapp" +user = "postgres" +file = "tenant.sql" + +[schemas] +query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'tenant_%'" +`), 0644) + + configPath = tomlPath + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("expected config to be loaded") + } + if cfg.Schemas == nil { + t.Fatal("expected Schemas to be non-nil") + } + if cfg.Schemas.Query == "" { + t.Error("expected Schemas.Query to be set") + } +} + +func TestConfigLoading_PlanFieldsFallback(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +host = "myhost" +port = 5433 +db = "mydb" +user = "myuser" +plan-host = "plan-server" +plan-port = 15432 +plan-db = "plandb" +plan-user = "planner" +plan-sslmode = "require" +application-name = "myapp" +lock-timeout = "15s" +auto-approve = true +no-color = true +`), 0644) + + configPath = tomlPath + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("expected config to be loaded") + } + if cfg.PlanHost != "plan-server" { + t.Errorf("PlanHost = %q, want %q", cfg.PlanHost, "plan-server") + } + if cfg.PlanPort != 15432 { + t.Errorf("PlanPort = %d, want %d", cfg.PlanPort, 15432) + } + if cfg.PlanDB != "plandb" { + t.Errorf("PlanDB = %q, want %q", cfg.PlanDB, "plandb") + } + if cfg.LockTimeout != "15s" { + t.Errorf("LockTimeout = %q, want %q", cfg.LockTimeout, "15s") + } + if !cfg.AutoApprove { + t.Error("AutoApprove should be true") + } + if !cfg.NoColor { + t.Error("NoColor should be true") + } +} + +func TestConfigLoading_EnvOverridesBooleans(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +auto-approve = true +no-color = true +multi-file = true + +[env.safe] +auto-approve = false +no-color = false +multi-file = false +`), 0644) + + configPath = tomlPath + envName = "safe" + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("expected config to be loaded") + } + if cfg.AutoApprove { + t.Error("AutoApprove should be false (overridden by env)") + } + if cfg.NoColor { + t.Error("NoColor should be false (overridden by env)") + } + if cfg.MultiFile { + t.Error("MultiFile should be false (overridden by env)") + } +} + +func TestDumpCommand_ConfigFallback(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +host = "config-host" +port = 9876 +db = "config-db" +user = "config-user" +schema = "config_schema" +sslmode = "require" +no-comments = true +`), 0644) + + configPath = tomlPath + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("config should be loaded") + } + + // Verify config values are accessible for dump command fallback + if cfg.Host != "config-host" { + t.Errorf("Host = %q, want %q", cfg.Host, "config-host") + } + if cfg.Port != 9876 { + t.Errorf("Port = %d, want %d", cfg.Port, 9876) + } + if cfg.Schema != "config_schema" { + t.Errorf("Schema = %q, want %q", cfg.Schema, "config_schema") + } + if cfg.SSLMode != "require" { + t.Errorf("SSLMode = %q, want %q", cfg.SSLMode, "require") + } + if !cfg.NoComments { + t.Error("NoComments should be true") + } +} + +func TestApplyConfigToPlan_UsesConfigValues(t *testing.T) { + resetRootCmd() + + dir := t.TempDir() + tomlPath := filepath.Join(dir, "pgschema.toml") + os.WriteFile(tomlPath, []byte(` +host = "plan-test-host" +port = 1234 +db = "plan-test-db" +user = "plan-test-user" +schema = "plan-test-schema" +file = "plan-test.sql" + +[env.staging] +host = "staging-host" +db = "staging-db" +lock-timeout = "45s" +`), 0644) + + configPath = tomlPath + envName = "staging" + loadConfig(RootCmd) + + cfg := config.Get() + if cfg == nil { + t.Fatal("config should be loaded") + } + if cfg.Host != "staging-host" { + t.Errorf("Host = %q, want %q", cfg.Host, "staging-host") + } + if cfg.DB != "staging-db" { + t.Errorf("DB = %q, want %q", cfg.DB, "staging-db") + } + if cfg.File != "plan-test.sql" { + t.Errorf("File = %q, want %q (inherited from base)", cfg.File, "plan-test.sql") + } + if cfg.LockTimeout != "45s" { + t.Errorf("LockTimeout = %q, want %q", cfg.LockTimeout, "45s") + } +} + +func TestDiscoverSchemas_ReadOnlyEnforcement(t *testing.T) { + _, host, port, dbname, user, password := testutil.ConnectToPostgres(t, sharedEmbeddedPG) + + // Valid SELECT query should succeed + t.Run("SELECT is allowed", func(t *testing.T) { + schemas, err := config.DiscoverSchemas(host, port, dbname, user, password, "disable", + "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'public'") + if err != nil { + t.Fatalf("SELECT query should succeed: %v", err) + } + if len(schemas) == 0 { + t.Error("expected at least one schema") + } + }) + + // CREATE TABLE should be rejected by read-only transaction + t.Run("CREATE is rejected", func(t *testing.T) { + _, err := config.DiscoverSchemas(host, port, dbname, user, password, "disable", + "CREATE TABLE pgschema_injection_test (id int)") + if err == nil { + t.Fatal("CREATE should be rejected in read-only transaction") + } + if !strings.Contains(err.Error(), "read-only") { + t.Errorf("error should mention read-only, got: %v", err) + } + }) + + // DROP should be rejected + t.Run("DROP is rejected", func(t *testing.T) { + _, err := config.DiscoverSchemas(host, port, dbname, user, password, "disable", + "DROP TABLE IF EXISTS pgschema_injection_test") + if err == nil { + t.Fatal("DROP should be rejected in read-only transaction") + } + if !strings.Contains(err.Error(), "read-only") { + t.Errorf("error should mention read-only, got: %v", err) + } + }) + + // INSERT should be rejected + t.Run("INSERT is rejected", func(t *testing.T) { + // Create a temp table first via direct connection, then try INSERT via DiscoverSchemas + _, err := config.DiscoverSchemas(host, port, dbname, user, password, "disable", + "INSERT INTO information_schema.schemata VALUES ('hacked')") + if err == nil { + t.Fatal("INSERT should be rejected in read-only transaction") + } + }) +} + diff --git a/cmd/dump/dump.go b/cmd/dump/dump.go index 59fc167e..d931d7cc 100644 --- a/cmd/dump/dump.go +++ b/cmd/dump/dump.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + "github.com/pgplex/pgschema/cmd/config" "github.com/pgplex/pgschema/cmd/util" "github.com/pgplex/pgschema/internal/diff" "github.com/pgplex/pgschema/internal/dump" @@ -44,7 +45,10 @@ var DumpCmd = &cobra.Command{ Long: "Dump and output database schema information for a specific schema. Uses the --schema flag to target a particular schema (defaults to 'public').", RunE: runDump, SilenceUsage: true, - PreRunE: util.PreRunEWithEnvVarsAndConnection(&db, &user, &host, &port), + PreRunE: func(cmd *cobra.Command, args []string) error { + applyConfigToDump(cmd) + return util.PreRunEWithEnvVarsAndConnection(&db, &user, &host, &port)(cmd, args) + }, } func init() { @@ -153,3 +157,41 @@ func runDump(cmd *cobra.Command, args []string) error { return nil } + +func applyConfigToDump(cmd *cobra.Command) { + cfg := config.Get() + if cfg == nil { + return + } + + if !cmd.Flags().Changed("host") && cfg.Host != "" { + host = cfg.Host + } + if !cmd.Flags().Changed("port") && cfg.Port != 0 { + port = cfg.Port + } + if !cmd.Flags().Changed("db") && cfg.DB != "" { + db = cfg.DB + } + if !cmd.Flags().Changed("user") && cfg.User != "" { + user = cfg.User + } + if !cmd.Flags().Changed("password") && cfg.Password != "" { + password = cfg.Password + } + if !cmd.Flags().Changed("schema") && cfg.Schema != "" { + schema = cfg.Schema + } + if !cmd.Flags().Changed("sslmode") && cfg.SSLMode != "" { + sslmode = cfg.SSLMode + } + if !cmd.Flags().Changed("multi-file") && cfg.MultiFile { + multiFile = cfg.MultiFile + } + if !cmd.Flags().Changed("file") && cfg.File != "" { + file = cfg.File + } + if !cmd.Flags().Changed("no-comments") && cfg.NoComments { + noComments = cfg.NoComments + } +} diff --git a/cmd/ignore_integration_test.go b/cmd/ignore_integration_test.go index e85b5438..da5e7b26 100644 --- a/cmd/ignore_integration_test.go +++ b/cmd/ignore_integration_test.go @@ -803,7 +803,7 @@ func executeIgnorePlanCommand(t *testing.T, containerInfo *struct { } // Generate the plan (reuse shared embedded postgres from migrate_integration_test.go) - migrationPlan, err := planCmd.GeneratePlan(config, sharedEmbeddedPG) + migrationPlan, err := planCmd.GenerateSchemaPlan(config, sharedEmbeddedPG) if err != nil { t.Fatalf("Failed to execute plan command: %v", err) } diff --git a/cmd/migrate_integration_test.go b/cmd/migrate_integration_test.go index e0dcfd10..565d9c11 100644 --- a/cmd/migrate_integration_test.go +++ b/cmd/migrate_integration_test.go @@ -457,12 +457,16 @@ func generatePlanOutput(host string, port int, database, user, password, schema, ApplicationName: "pgschema", } - // Generate the plan (reuse shared embedded postgres for performance) - migrationPlan, err := planCmd.GeneratePlan(config, sharedEmbeddedPG) + // Generate the per-schema plan (reuse shared embedded postgres for performance) + schemaPlan, err := planCmd.GenerateSchemaPlan(config, sharedEmbeddedPG) if err != nil { return "", err } + // Wrap in unified Plan for output + migrationPlan := plan.NewPlan() + migrationPlan.AddSchema(schema, schemaPlan) + // Format output based on the requested format var output string switch outputFlag { diff --git a/cmd/plan/external_db_integration_test.go b/cmd/plan/external_db_integration_test.go index 4aa19972..fbb33805 100644 --- a/cmd/plan/external_db_integration_test.go +++ b/cmd/plan/external_db_integration_test.go @@ -80,7 +80,7 @@ CREATE INDEX idx_users_email ON users(email); assert.Contains(t, tempSchema, "pgschema_tmp_", "temporary schema should have timestamp prefix") // Generate plan - migrationPlan, err := GeneratePlan(config, provider) + migrationPlan, err := GenerateSchemaPlan(config, provider) require.NoError(t, err, "should generate plan") // Verify plan has changes (target is empty, desired has tables) diff --git a/cmd/plan/output_test.go b/cmd/plan/output_test.go index 4e4a6fe6..b3e1b26e 100644 --- a/cmd/plan/output_test.go +++ b/cmd/plan/output_test.go @@ -9,13 +9,13 @@ import ( func TestDetermineOutputs(t *testing.T) { tests := []struct { - name string - outputHuman string - outputJSON string - outputSQL string - expectError bool - errorMsg string - expectCount int + name string + outputHuman string + outputJSON string + outputSQL string + expectError bool + errorMsg string + expectCount int }{ { name: "no flags - default to human stdout", @@ -116,4 +116,4 @@ func TestProcessOutput_FileCreation(t *testing.T) { if string(content) != "test content" { t.Errorf("expected 'test content', got '%s'", string(content)) } -} \ No newline at end of file +} diff --git a/cmd/plan/plan.go b/cmd/plan/plan.go index afd5e3bd..9d08d4c7 100644 --- a/cmd/plan/plan.go +++ b/cmd/plan/plan.go @@ -8,6 +8,7 @@ import ( "regexp" "strings" + "github.com/pgplex/pgschema/cmd/config" "github.com/pgplex/pgschema/cmd/util" "github.com/pgplex/pgschema/internal/diff" "github.com/pgplex/pgschema/internal/fingerprint" @@ -48,7 +49,10 @@ var PlanCmd = &cobra.Command{ Long: "Generate a migration plan to apply a desired schema state to a target database schema. Compares the desired state (from --file) with the current state of a specific schema (specified by --schema, defaults to 'public').", RunE: runPlan, SilenceUsage: true, - PreRunE: util.PreRunEWithEnvVarsAndConnection(&planDB, &planUser, &planHost, &planPort), + PreRunE: func(cmd *cobra.Command, args []string) error { + applyConfigToPlan(cmd) + return util.PreRunEWithEnvVarsAndConnection(&planDB, &planUser, &planHost, &planPort)(cmd, args) + }, } func init() { @@ -80,10 +84,18 @@ func init() { PlanCmd.Flags().StringVar(&outputSQL, "output-sql", "", "Output SQL format to stdout or file path") PlanCmd.Flags().BoolVar(&planNoColor, "no-color", false, "Disable colored output") - PlanCmd.MarkFlagRequired("file") } func runPlan(cmd *cobra.Command, args []string) error { + if planFile == "" { + return fmt.Errorf("--file is required (provide via flag, config file, or environment)") + } + + cfg := config.Get() + if cfg != nil && cfg.Schemas != nil && cfg.Schemas.Query != "" && !cmd.Flags().Changed("schema") { + return runPlanMultiSchema(cmd, cfg) + } + // Apply environment variables to plan database flags util.ApplyPlanDBEnvVars(cmd, &planDBHost, &planDBDatabase, &planDBUser, &planDBPassword, &planDBPort, &planDBSSLMode) @@ -153,21 +165,28 @@ func runPlan(cmd *cobra.Command, args []string) error { } defer provider.Stop() - // Generate plan - migrationPlan, err := GeneratePlan(config, provider) + // Generate per-schema plan + schemaPlan, err := GenerateSchemaPlan(config, provider) if err != nil { return err } + // Wrap in unified Plan + migrationPlan := plan.NewPlan() + migrationPlan.AddSchema(config.Schema, schemaPlan) + // Determine which outputs to generate outputs, err := determineOutputs() if err != nil { return err } + // Check if debug flag is set + debug, _ := cmd.Root().PersistentFlags().GetBool("debug") + // Process each output for _, output := range outputs { - if err := processOutput(migrationPlan, output, cmd); err != nil { + if err := processOutput(migrationPlan, output, debug); err != nil { return err } } @@ -263,10 +282,10 @@ func CreateEmbeddedPostgresForPlan(config *PlanConfig, pgVersion postgres.Postgr return embeddedPG, nil } -// GeneratePlan generates a migration plan from configuration. +// GenerateSchemaPlan generates a migration plan from configuration. // The caller must provide a non-nil provider instance for validating the desired state schema. // The caller is responsible for managing the provider lifecycle (creation and cleanup). -func GeneratePlan(config *PlanConfig, provider postgres.DesiredStateProvider) (*plan.Plan, error) { +func GenerateSchemaPlan(config *PlanConfig, provider postgres.DesiredStateProvider) (*plan.SchemaPlan, error) { // Load ignore configuration ignoreConfig, err := util.LoadIgnoreFileWithStructure() if err != nil { @@ -336,10 +355,10 @@ func GeneratePlan(config *PlanConfig, provider postgres.DesiredStateProvider) (* // Generate diff (current -> desired) using IR directly diffs := diff.GenerateMigration(currentStateIR, desiredStateIR, config.Schema) - // Create plan from diffs with fingerprint - migrationPlan := plan.NewPlanWithFingerprint(diffs, sourceFingerprint) + // Create schema plan from diffs with fingerprint + schemaPlan := plan.NewSchemaPlanWithFingerprint(diffs, sourceFingerprint) - return migrationPlan, nil + return schemaPlan, nil } // outputSpec represents a single output specification @@ -388,36 +407,31 @@ func determineOutputs() ([]outputSpec, error) { return outputs, nil } -// processOutput writes the plan in the specified format to the target destination -func processOutput(migrationPlan *plan.Plan, output outputSpec, cmd *cobra.Command) error { +// processOutput writes a plan.Plan in the specified +// format to the target destination. +func processOutput(p *plan.Plan, output outputSpec, debug bool) error { var content string var err error - // Generate content based on format switch output.format { case "human": - // For human format, use colored output when writing to stdout, unless explicitly disabled useColor := output.target == "stdout" && !planNoColor - content = migrationPlan.HumanColored(useColor) + content = p.HumanColored(useColor) case "json": - // Check if debug flag is set on the root command - debug, _ := cmd.Root().PersistentFlags().GetBool("debug") - content, err = migrationPlan.ToJSONWithDebug(debug) + content, err = p.ToJSONWithDebug(debug) if err != nil { return fmt.Errorf("failed to generate JSON output: %w", err) } content += "\n" case "sql": - content = migrationPlan.ToSQL(plan.SQLFormatRaw) + content = p.ToSQL(plan.SQLFormatRaw) default: return fmt.Errorf("unknown output format: %s", output.format) } - // Write to target if output.target == "stdout" { fmt.Print(content) } else { - // Write to file if err := os.WriteFile(output.target, []byte(content), 0644); err != nil { return fmt.Errorf("failed to write %s output to %s: %w", output.format, output.target, err) } @@ -714,6 +728,177 @@ func newSameSchemaQualifierStripper(schema string) func(string) string { } } +func runPlanMultiSchema(cmd *cobra.Command, cfg *config.ResolvedConfig) error { + // Apply plan DB environment variables (same as single-schema path) + util.ApplyPlanDBEnvVars(cmd, &planDBHost, &planDBDatabase, &planDBUser, &planDBPassword, &planDBPort, &planDBSSLMode) + + // Validate plan database flags if plan-host is provided + if err := util.ValidatePlanDBFlags(planDBHost, planDBDatabase, planDBUser); err != nil { + return err + } + + finalPassword := planPassword + if finalPassword == "" { + if envPassword := os.Getenv("PGPASSWORD"); envPassword != "" { + finalPassword = envPassword + } + } + finalSSLMode := planSSLMode + if cmd == nil || !cmd.Flags().Changed("sslmode") { + if envSSLMode := os.Getenv("PGSSLMODE"); envSSLMode != "" { + finalSSLMode = envSSLMode + } + } + + // Derive final plan database password + finalPlanPassword := planDBPassword + if finalPlanPassword == "" { + if envPassword := os.Getenv("PGSCHEMA_PLAN_PASSWORD"); envPassword != "" { + finalPlanPassword = envPassword + } + } + + schemas, err := config.DiscoverSchemas(planHost, planPort, planDB, planUser, finalPassword, finalSSLMode, cfg.Schemas.Query) + if err != nil { + return err + } + + if len(schemas) == 0 { + fmt.Fprintln(os.Stderr, "Warning: schema discovery query returned no schemas.") + return nil + } + + outputs, err := determineOutputs() + if err != nil { + return err + } + + multiPlan := plan.NewPlan() + var hasErrors bool + + for _, schemaName := range schemas { + fmt.Fprintf(os.Stderr, "\n── Schema: %s ──────────────────────\n", schemaName) + + perSchemaConfig := &PlanConfig{ + Host: planHost, + Port: planPort, + DB: planDB, + User: planUser, + Password: finalPassword, + Schema: schemaName, + File: planFile, + ApplicationName: "pgschema", + SSLMode: finalSSLMode, + PlanDBHost: planDBHost, + PlanDBPort: planDBPort, + PlanDBDatabase: planDBDatabase, + PlanDBUser: planDBUser, + PlanDBPassword: finalPlanPassword, + PlanDBSSLMode: planDBSSLMode, + } + + provider, err := CreateDesiredStateProvider(perSchemaConfig) + if err != nil { + fmt.Fprintf(os.Stderr, "Error for schema %s: %v\n", schemaName, err) + hasErrors = true + continue + } + + migrationPlan, err := GenerateSchemaPlan(perSchemaConfig, provider) + provider.Stop() + if err != nil { + fmt.Fprintf(os.Stderr, "Error for schema %s: %v\n", schemaName, err) + hasErrors = true + continue + } + + // Print per-schema human-readable preview to stderr so users get + // visibility even when only file outputs are configured. + fmt.Fprintln(os.Stderr, migrationPlan.HumanColored(!planNoColor)) + + multiPlan.AddSchema(schemaName, migrationPlan) + } + + // Check if debug flag is set + debug, _ := cmd.Root().PersistentFlags().GetBool("debug") + + // Write combined output for all schemas + for _, output := range outputs { + if err := processOutput(multiPlan, output, debug); err != nil { + return err + } + } + + fmt.Fprintln(os.Stderr, "\n"+multiPlan.SummaryString()) + + if hasErrors { + return fmt.Errorf("one or more schemas had errors") + } + return nil +} + +func applyConfigToPlan(cmd *cobra.Command) { + cfg := config.Get() + if cfg == nil { + return + } + + if !cmd.Flags().Changed("host") && cfg.Host != "" { + planHost = cfg.Host + } + if !cmd.Flags().Changed("port") && cfg.Port != 0 { + planPort = cfg.Port + } + if !cmd.Flags().Changed("db") && cfg.DB != "" { + planDB = cfg.DB + } + if !cmd.Flags().Changed("user") && cfg.User != "" { + planUser = cfg.User + } + if !cmd.Flags().Changed("password") && cfg.Password != "" { + planPassword = cfg.Password + } + if !cmd.Flags().Changed("schema") && cfg.Schema != "" { + planSchema = cfg.Schema + } + if !cmd.Flags().Changed("file") && cfg.File != "" { + planFile = cfg.File + } + if !cmd.Flags().Changed("sslmode") && cfg.SSLMode != "" { + planSSLMode = cfg.SSLMode + } + if !cmd.Flags().Changed("plan-host") && cfg.PlanHost != "" { + planDBHost = cfg.PlanHost + } + if !cmd.Flags().Changed("plan-port") && cfg.PlanPort != 0 { + planDBPort = cfg.PlanPort + } + if !cmd.Flags().Changed("plan-db") && cfg.PlanDB != "" { + planDBDatabase = cfg.PlanDB + } + if !cmd.Flags().Changed("plan-user") && cfg.PlanUser != "" { + planDBUser = cfg.PlanUser + } + if !cmd.Flags().Changed("plan-password") && cfg.PlanPassword != "" { + planDBPassword = cfg.PlanPassword + } + if !cmd.Flags().Changed("plan-sslmode") && cfg.PlanSSLMode != "" { + planDBSSLMode = cfg.PlanSSLMode + } + if !cmd.Flags().Changed("no-color") && cfg.NoColor { + planNoColor = cfg.NoColor + } + if !cmd.Flags().Changed("output-human") && cfg.OutputHuman != "" { + outputHuman = cfg.OutputHuman + } + if !cmd.Flags().Changed("output-json") && cfg.OutputJSON != "" { + outputJSON = cfg.OutputJSON + } + if !cmd.Flags().Changed("output-sql") && cfg.OutputSQL != "" { + outputSQL = cfg.OutputSQL + } +} + // ResetFlags resets all global flag variables to their default values for testing func ResetFlags() { planHost = "localhost" diff --git a/cmd/root.go b/cmd/root.go index af24ac11..dbb56763 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -7,6 +7,7 @@ import ( "runtime" "github.com/pgplex/pgschema/cmd/apply" + "github.com/pgplex/pgschema/cmd/config" "github.com/pgplex/pgschema/cmd/dump" "github.com/pgplex/pgschema/cmd/plan" globallogger "github.com/pgplex/pgschema/internal/logger" @@ -15,6 +16,8 @@ import ( ) var Debug bool +var configPath string +var envName string var logger *slog.Logger // Build-time variables set via ldflags @@ -40,11 +43,14 @@ Use "pgschema [command] --help" for more information about a command.`, PersistentPreRun: func(cmd *cobra.Command, args []string) { setupLogger() globallogger.SetGlobal(logger, Debug) + loadConfig(cmd) }, } func init() { RootCmd.PersistentFlags().BoolVar(&Debug, "debug", false, "Enable debug logging") + RootCmd.PersistentFlags().StringVar(&configPath, "config", "pgschema.toml", "Path to config file") + RootCmd.PersistentFlags().StringVar(&envName, "env", "", "Named environment to use from config file") RootCmd.CompletionOptions.DisableDefaultCmd = true RootCmd.AddCommand(dump.DumpCmd) RootCmd.AddCommand(plan.PlanCmd) @@ -83,6 +89,30 @@ func platform() string { return runtime.GOOS + "/" + runtime.GOARCH } +func loadConfig(cmd *cobra.Command) { + configExplicit := cmd.Flags().Changed("config") + + if _, err := os.Stat(configPath); os.IsNotExist(err) { + if configExplicit { + fmt.Fprintf(os.Stderr, "Error: config file not found: %s\n", configPath) + os.Exit(1) + } + if envName != "" { + fmt.Fprintf(os.Stderr, "Error: --env requires a config file, but %s not found\n", configPath) + os.Exit(1) + } + config.SetResolved(nil) + return + } + + resolved, err := config.LoadConfig(configPath, envName) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + config.SetResolved(resolved) +} + func Execute() { if err := RootCmd.Execute(); err != nil { os.Exit(1) diff --git a/internal/plan/plan.go b/internal/plan/plan.go index e5f81df2..ff146fa4 100644 --- a/internal/plan/plan.go +++ b/internal/plan/plan.go @@ -8,1176 +8,162 @@ import ( "strings" "time" - "github.com/pgplex/pgschema/internal/color" - "github.com/pgplex/pgschema/internal/diff" - "github.com/pgplex/pgschema/internal/fingerprint" "github.com/pgplex/pgschema/internal/version" ) -// DirectiveType represents the different types of directives -type DirectiveType string - -const ( - DirectiveTypeWait DirectiveType = "wait" -) - -// String returns the string representation of DirectiveType -func (dt DirectiveType) String() string { - return string(dt) -} - -// Directive represents a special directive for execution (wait, assert, etc.) -type Directive struct { - Type DirectiveType `json:"type"` // DirectiveTypeWait, etc. - Message string `json:"message"` // Auto-generated descriptive message -} - -// Step represents a single execution step with SQL and optional directive -type Step struct { - SQL string `json:"sql"` - Directive *Directive `json:"directive,omitempty"` - // Metadata for summary generation - Type string `json:"type,omitempty"` // e.g., "table", "index" - Operation string `json:"operation,omitempty"` // e.g., "create", "alter", "drop" - Path string `json:"path,omitempty"` // e.g., "public.users" -} - -// ExecutionGroup represents a group of steps that should be executed together -type ExecutionGroup struct { - Steps []Step `json:"steps"` -} - -// Plan represents the migration plan between two DDL states +// Plan is the top-level migration plan file. It always uses the unified +// schemas map format, even for single-schema operations (one entry). type Plan struct { - // Version information - Version string `json:"version"` - PgschemaVersion string `json:"pgschema_version"` - - // When the plan was created - CreatedAt time.Time `json:"created_at"` - - // Source database fingerprint when plan was created - SourceFingerprint *fingerprint.SchemaFingerprint `json:"source_fingerprint,omitempty"` - - // Groups is the ordered list of execution groups - Groups []ExecutionGroup `json:"groups"` - - // SourceDiffs stores original diff information for summary calculation - // This field is only serialized in debug mode - SourceDiffs []diff.Diff `json:"source_diffs,omitempty"` -} - -// PlanSummary provides counts of changes by type -type PlanSummary struct { - Total int `json:"total"` - Add int `json:"add"` - Change int `json:"change"` - Destroy int `json:"destroy"` - ByType map[string]TypeSummary `json:"by_type"` -} - -// TypeSummary provides counts for a specific object type -type TypeSummary struct { - Add int `json:"add"` - Change int `json:"change"` - Destroy int `json:"destroy"` -} - -// Type represents the database object types in dependency order -type Type string - -const ( - TypeSchema Type = "schemas" - TypeType Type = "types" - TypeFunction Type = "functions" - TypeProcedure Type = "procedures" - TypeSequence Type = "sequences" - TypeTable Type = "tables" - TypeView Type = "views" - TypeMaterializedView Type = "materialized views" - TypeIndex Type = "indexes" - TypeTrigger Type = "triggers" - TypePolicy Type = "policies" - TypeColumn Type = "columns" - TypeRLS Type = "rls" - TypeDefaultPrivilege Type = "default privileges" - TypePrivilege Type = "privileges" - TypeColumnPrivilege Type = "column privileges" - TypeRevokedDefaultPrivilege Type = "revoked default privileges" -) - -// SQLFormat represents the different output formats for SQL generation -type SQLFormat string - -const ( - // SQLFormatRaw outputs just the raw SQL statements without additional formatting - SQLFormatRaw SQLFormat = "raw" - // Human-readable format with comments - SQLFormatHuman SQLFormat = "human" -) - -// getObjectOrder returns the dependency order for database objects -func getObjectOrder() []Type { - return []Type{ - TypeSchema, - TypeDefaultPrivilege, - TypeType, - TypeFunction, - TypeProcedure, - TypeSequence, - TypeTable, - TypeView, - TypeMaterializedView, - TypeIndex, - TypeTrigger, - TypePolicy, - TypeColumn, - TypeRLS, - TypePrivilege, - TypeColumnPrivilege, - TypeRevokedDefaultPrivilege, - } + Version string `json:"version"` + PgschemaVersion string `json:"pgschema_version"` + CreatedAt time.Time `json:"created_at"` + Schemas map[string]*SchemaPlan `json:"schemas"` } -// ========== PUBLIC METHODS ========== - -// groupDiffs groups diffs into execution groups with configurable online operations -func groupDiffs(diffs []diff.Diff) []ExecutionGroup { - if len(diffs) == 0 { - return nil - } - - var groups []ExecutionGroup - var transactionalSteps []Step - - // Track newly created tables/materialized views to avoid concurrent rewrites for their indexes. - // Single-pass: diffs are topologically sorted, so creates come before dependent index operations. - // We build these maps incrementally as we process each diff. - newlyCreatedTables := make(map[string]bool) - newlyCreatedMaterializedViews := make(map[string]bool) - - // Convert diffs to steps - for _, d := range diffs { - // Track creates as we encounter them (before processing dependent operations) - if d.Type == diff.DiffTypeTable && d.Operation == diff.DiffOperationCreate { - newlyCreatedTables[d.Path] = true - } - if d.Type == diff.DiffTypeMaterializedView && d.Operation == diff.DiffOperationCreate { - newlyCreatedMaterializedViews[d.Path] = true - } - // Try to generate rewrites if online operations are enabled - rewriteSteps := generateRewrite(d, newlyCreatedTables, newlyCreatedMaterializedViews) - - if len(rewriteSteps) > 0 { - // For operations with rewrites, create one step per rewrite statement - for _, rewriteStep := range rewriteSteps { - step := Step{ - SQL: rewriteStep.SQL, - Type: d.Type.String(), - Operation: d.Operation.String(), - Path: d.Path, - Directive: rewriteStep.Directive, - } - - // Check if this step needs isolation (has directive or cannot run in transaction) - needsIsolation := step.Directive != nil || !rewriteStep.CanRunInTransaction - - if needsIsolation { - // Flush any pending transactional steps - if len(transactionalSteps) > 0 { - groups = append(groups, ExecutionGroup{Steps: transactionalSteps}) - transactionalSteps = nil - } - - // Add this step in its own group - groups = append(groups, ExecutionGroup{Steps: []Step{step}}) - } else { - // Accumulate transactional steps - transactionalSteps = append(transactionalSteps, step) - } - } - } else { - // For operations without rewrites, create one step per canonical statement - for _, stmt := range d.Statements { - step := Step{ - SQL: stmt.SQL, - Type: d.Type.String(), - Operation: d.Operation.String(), - Path: d.Path, - } - // Canonical statements don't have directives - transactionalSteps = append(transactionalSteps, step) - } - } - } - - // Flush remaining transactional steps - if len(transactionalSteps) > 0 { - groups = append(groups, ExecutionGroup{Steps: transactionalSteps}) - } - - return groups -} - -// NewPlan creates a new plan from a list of diffs with online operations enabled -func NewPlan(diffs []diff.Diff) *Plan { - // Use environment variable for timestamp if provided, otherwise use current time +// NewPlan creates an empty Plan with version metadata and current timestamp. +func NewPlan() *Plan { createdAt := time.Now().Truncate(time.Second) if testTime := os.Getenv("PGSCHEMA_TEST_TIME"); testTime != "" { if parsedTime, err := time.Parse(time.RFC3339, testTime); err == nil { createdAt = parsedTime } } - - plan := &Plan{ + return &Plan{ Version: version.PlanFormat(), PgschemaVersion: version.App(), CreatedAt: createdAt, - Groups: groupDiffs(diffs), - SourceDiffs: diffs, + Schemas: make(map[string]*SchemaPlan), } - - return plan } -// NewPlanWithFingerprint creates a new plan from diffs and includes source fingerprint -func NewPlanWithFingerprint(diffs []diff.Diff, sourceFingerprint *fingerprint.SchemaFingerprint) *Plan { - plan := NewPlan(diffs) - plan.SourceFingerprint = sourceFingerprint - return plan +// AddSchema adds a per-schema plan to the Plan. +func (p *Plan) AddSchema(schemaName string, sp *SchemaPlan) { + p.Schemas[schemaName] = sp } -// HasAnyChanges checks if the plan contains any changes by examining the groups +// HasAnyChanges returns true if any schema plan has changes. func (p *Plan) HasAnyChanges() bool { - for _, g := range p.Groups { - if len(g.Steps) > 0 { + for _, sp := range p.Schemas { + if sp.HasAnyChanges() { return true } } return false } -// HumanColored returns a human-readable summary of the plan with color support -func (p *Plan) HumanColored(enableColor bool) string { - c := color.New(enableColor) - var summary strings.Builder - - // Calculate summary from diffs - summaryData := p.calculateSummaryFromSteps() - - if summaryData.Total == 0 { - summary.WriteString("No changes detected.\n") - return summary.String() +// SortedSchemaNames returns schema names in sorted order for deterministic iteration. +func (p *Plan) SortedSchemaNames() []string { + names := make([]string, 0, len(p.Schemas)) + for name := range p.Schemas { + names = append(names, name) } - - // Write header with overall summary (colored like Terraform) - summary.WriteString(c.FormatPlanHeader(summaryData.Add, summaryData.Change, summaryData.Destroy) + "\n\n") - - // Write summary by type with colors - summary.WriteString(c.Bold("Summary by type:") + "\n") - for _, objType := range getObjectOrder() { - objTypeStr := string(objType) - if typeSummary, exists := summaryData.ByType[objTypeStr]; exists && (typeSummary.Add > 0 || typeSummary.Change > 0 || typeSummary.Destroy > 0) { - line := c.FormatSummaryLine(objTypeStr, typeSummary.Add, typeSummary.Change, typeSummary.Destroy) - summary.WriteString(line + "\n") - } - } - summary.WriteString("\n") - - // Detailed changes by type with symbols - for _, objType := range getObjectOrder() { - objTypeStr := string(objType) - if typeSummary, exists := summaryData.ByType[objTypeStr]; exists && (typeSummary.Add > 0 || typeSummary.Change > 0 || typeSummary.Destroy > 0) { - // Capitalize first letter for display - displayName := strings.ToUpper(objTypeStr[:1]) + objTypeStr[1:] - p.writeDetailedChangesFromSteps(&summary, displayName, objTypeStr, c) - } - } - - // Add DDL section if there are changes - if summaryData.Total > 0 { - summary.WriteString(c.Bold("DDL to be executed:") + "\n") - summary.WriteString(strings.Repeat("-", 50) + "\n\n") - migrationSQL := p.ToSQL(SQLFormatHuman) - if migrationSQL != "" { - summary.WriteString(migrationSQL) - if !strings.HasSuffix(migrationSQL, "\n") { - summary.WriteString("\n") - } - } else { - summary.WriteString("-- No DDL statements generated\n") - } - } - - return summary.String() -} - -// ToSQL returns the SQL statements with formatting based on the specified format -func (p *Plan) ToSQL(format SQLFormat) string { - // Build SQL output from groups - var sqlOutput strings.Builder - - for groupIdx, group := range p.Groups { - // Add transaction group comment for human-readable format - if format == SQLFormatHuman && len(p.Groups) > 1 { - sqlOutput.WriteString(fmt.Sprintf("-- Transaction Group #%d\n", groupIdx+1)) - } - - for stepIdx, step := range group.Steps { - if step.Directive != nil { - // Handle directive statements - sqlOutput.WriteString(fmt.Sprintf("-- pgschema:%s\n", step.Directive.Type.String())) - sqlOutput.WriteString(step.SQL) - sqlOutput.WriteString("\n") - } else { - // Handle regular SQL statements - sqlOutput.WriteString(step.SQL) - sqlOutput.WriteString("\n") - } - - // Add blank line between steps except for the last one in the last group - if stepIdx < len(group.Steps)-1 || groupIdx < len(p.Groups)-1 { - sqlOutput.WriteString("\n") - } - } - } - - return sqlOutput.String() + sort.Strings(names) + return names } -// ToJSON returns the plan as structured JSON with only changed statements +// ToJSON returns the Plan as structured JSON. func (p *Plan) ToJSON() (string, error) { return p.ToJSONWithDebug(false) } -// ToJSONWithDebug returns the plan as structured JSON with optional source field inclusion +// ToJSONWithDebug returns the Plan as structured JSON with optional source_diffs. func (p *Plan) ToJSONWithDebug(includeSource bool) (string, error) { + if !includeSource { + for _, sp := range p.Schemas { + sp.SourceDiffs = nil + } + } + var buf strings.Builder encoder := json.NewEncoder(&buf) encoder.SetIndent("", " ") encoder.SetEscapeHTML(false) - // Create a copy of the plan to control SourceDiffs serialization - planCopy := *p - if !includeSource { - // Clear SourceDiffs in normal mode to keep JSON clean - planCopy.SourceDiffs = nil - } - - if err := encoder.Encode(&planCopy); err != nil { + if err := encoder.Encode(p); err != nil { return "", fmt.Errorf("failed to marshal plan to JSON: %w", err) } - // Remove the trailing newline that encoder.Encode adds result := buf.String() if len(result) > 0 && result[len(result)-1] == '\n' { result = result[:len(result)-1] } - return result, nil } -// FromJSON creates a Plan from JSON data -func FromJSON(jsonData []byte) (*Plan, error) { - var plan Plan - if err := json.Unmarshal(jsonData, &plan); err != nil { - return nil, fmt.Errorf("failed to unmarshal plan JSON: %w", err) - } - return &plan, nil -} - -// ========== PRIVATE METHODS ========== - -// calculateSummaryFromSteps calculates summary statistics from the plan diffs -func (p *Plan) calculateSummaryFromSteps() PlanSummary { - summary := PlanSummary{ - ByType: make(map[string]TypeSummary), - } - - // For tables, we need to group by table path to avoid counting duplicates - // For other object types, count each operation individually - - // Track table operations by table path - tableOperations := make(map[string]string) // table_path -> operation - - // Track tables that have sub-resource changes (these should be counted as modified) - tablesWithSubResources := make(map[string]bool) // table_path -> true - - // Track view operations by view path (regular views only) - viewOperations := make(map[string]string) // view_path -> operation - - // Track views that have sub-resource changes (these should be counted as modified) - viewsWithSubResources := make(map[string]bool) // view_path -> true - - // Track materialized view operations by path - materializedViewOperations := make(map[string]string) // materialized_view_path -> operation - - // Track materialized views that have sub-resource changes - materializedViewsWithSubResources := make(map[string]bool) // materialized_view_path -> true - - // Track materialized views that have "recreate" operations (DROP that will be followed by CREATE) - // These should be counted as modifications, not adds - materializedViewsRecreating := make(map[string]bool) // materialized_view_path -> true - - // Track non-table/non-view/non-materialized-view operations - nonTableOperations := make(map[string][]string) // objType -> []operations - - // Use source diffs for summary calculation if available, - // otherwise use steps metadata (for plans loaded from JSON) - var dataToProcess []struct { - Type string - Operation string - Path string - } - - if len(p.SourceDiffs) > 0 { - // Use SourceDiffs (for freshly generated plans) - for _, diff := range p.SourceDiffs { - dataToProcess = append(dataToProcess, struct { - Type string - Operation string - Path string - }{ - Type: diff.Type.String(), - Operation: diff.Operation.String(), - Path: diff.Path, - }) - } - } else { - // Use Steps metadata (for plans loaded from JSON) - for _, group := range p.Groups { - for _, step := range group.Steps { - if step.Type != "" && step.Operation != "" && step.Path != "" { - dataToProcess = append(dataToProcess, struct { - Type string - Operation string - Path string - }{ - Type: step.Type, - Operation: step.Operation, - Path: step.Path, - }) - } - } - } - } - - // Single-pass: process all steps, determining parent type from step.Type prefix - // Sub-resource types encode their parent: "table.index", "view.index", "materialized_view.index" - for _, step := range dataToProcess { - // Normalize object type to match the expected format (add 's' for plural) - stepObjTypeStr := step.Type - if !strings.HasSuffix(stepObjTypeStr, "s") { - stepObjTypeStr += "s" - } - - if stepObjTypeStr == "tables" { - // For tables, track unique table paths and their primary operation - tableOperations[step.Path] = step.Operation - } else if stepObjTypeStr == "views" { - // For views, track unique view paths and their primary operation - viewOperations[step.Path] = step.Operation - } else if stepObjTypeStr == "materialized_views" { - // For materialized views, track unique paths and their primary operation - // If this is a "recreate" operation, mark it so subsequent "create" is treated as modify - if step.Operation == "recreate" { - materializedViewsRecreating[step.Path] = true - } - materializedViewOperations[step.Path] = step.Operation - } else if isSubResource(step.Type) { - // For sub-resources, determine parent type from step.Type prefix - // Types are: "table.index", "table.column", "view.comment", "materialized_view.index", etc. - parentPath := extractTablePathFromSubResource(step.Path, step.Type) - if parentPath != "" { - if strings.HasPrefix(step.Type, "materialized_view.") { - // Parent is a materialized view - materializedViewsWithSubResources[parentPath] = true - } else if strings.HasPrefix(step.Type, "view.") { - // Parent is a view - viewsWithSubResources[parentPath] = true - } else { - // Parent is a table (table.index, table.column, table.constraint, etc.) - tablesWithSubResources[parentPath] = true - } - } - } else { - // For non-table/non-view objects, track each operation - nonTableOperations[stepObjTypeStr] = append(nonTableOperations[stepObjTypeStr], step.Operation) - } - } - - // Count table operations (one per unique table) - // Include both direct table operations and tables with sub-resource changes - allAffectedTables := make(map[string]string) - - // First, add direct table operations - for tablePath, operation := range tableOperations { - allAffectedTables[tablePath] = operation - } - - // Then, add tables that only have sub-resource changes (count as "alter") - for tablePath := range tablesWithSubResources { - if _, alreadyCounted := allAffectedTables[tablePath]; !alreadyCounted { - allAffectedTables[tablePath] = "alter" // Sub-resource changes = table modification - } - } - - if len(allAffectedTables) > 0 { - stats := summary.ByType["tables"] - for _, operation := range allAffectedTables { - switch operation { - case "create": - stats.Add++ - summary.Add++ - case "alter": - stats.Change++ - summary.Change++ - case "drop": - stats.Destroy++ - summary.Destroy++ - } - } - summary.ByType["tables"] = stats - } - - // Count view operations (one per unique view) - // Include both direct view operations and views with sub-resource changes - allAffectedViews := make(map[string]string) - - // First, add direct view operations - for viewPath, operation := range viewOperations { - allAffectedViews[viewPath] = operation - } - - // Then, add views that only have sub-resource changes (count as "alter") - for viewPath := range viewsWithSubResources { - if _, alreadyCounted := allAffectedViews[viewPath]; !alreadyCounted { - allAffectedViews[viewPath] = "alter" // Sub-resource changes = view modification - } - } - - if len(allAffectedViews) > 0 { - stats := summary.ByType["views"] - for _, operation := range allAffectedViews { - switch operation { - case "create": - stats.Add++ - summary.Add++ - case "alter": - stats.Change++ - summary.Change++ - case "drop": - stats.Destroy++ - summary.Destroy++ - } - } - summary.ByType["views"] = stats - } - - // Count materialized view operations (one per unique materialized view) - // Include both direct materialized view operations and materialized views with sub-resource changes - allAffectedMaterializedViews := make(map[string]string) - - // First, add direct materialized view operations - for mvPath, operation := range materializedViewOperations { - allAffectedMaterializedViews[mvPath] = operation - } - - // Then, add materialized views that only have sub-resource changes (count as "alter") - for mvPath := range materializedViewsWithSubResources { - if _, alreadyCounted := allAffectedMaterializedViews[mvPath]; !alreadyCounted { - allAffectedMaterializedViews[mvPath] = "alter" // Sub-resource changes = materialized view modification - } - } - - if len(allAffectedMaterializedViews) > 0 { - stats := summary.ByType["materialized views"] - for mvPath, operation := range allAffectedMaterializedViews { - // If this path had a "recreate" operation, treat any subsequent "create" as a modify - // because the object existed before and is being recreated due to dependencies - if materializedViewsRecreating[mvPath] && operation == "create" { - operation = "alter" - } - switch operation { - case "create": - stats.Add++ - summary.Add++ - case "alter", "recreate": - // Both "alter" and "recreate" count as modifications - stats.Change++ - summary.Change++ - case "drop": - stats.Destroy++ - summary.Destroy++ - } - } - summary.ByType["materialized views"] = stats - } - - // Count non-table/non-view/non-materialized-view operations (each operation counted individually) - for objType, operations := range nonTableOperations { - // Normalize object type to match the Type constants (replace underscores with spaces) - normalizedObjType := strings.ReplaceAll(objType, "_", " ") - stats := summary.ByType[normalizedObjType] - for _, operation := range operations { - switch operation { - case "create": - stats.Add++ - summary.Add++ - case "alter": - stats.Change++ - summary.Change++ - case "drop": - stats.Destroy++ - summary.Destroy++ - } - } - summary.ByType[normalizedObjType] = stats - } - - summary.Total = summary.Add + summary.Change + summary.Destroy - return summary -} - -// writeDetailedChangesFromSteps writes detailed changes from plan diffs -func (p *Plan) writeDetailedChangesFromSteps(summary *strings.Builder, displayName, objType string, c *color.Color) { - fmt.Fprintf(summary, "%s:\n", c.Bold(displayName)) - - if objType == "tables" { - // For tables, group all changes by table path to avoid duplicates - p.writeTableChanges(summary, c) - } else if objType == "views" { - // For views, group all changes by view path to avoid duplicates - p.writeViewChanges(summary, c) - } else if objType == "materialized views" { - // For materialized views, group all changes by path to avoid duplicates - p.writeMaterializedViewChanges(summary, c) - } else { - // For non-table/non-view objects, use the original logic - p.writeNonTableChanges(summary, objType, c) - } - - summary.WriteString("\n") -} - -// writeTableChanges handles table-specific output with proper grouping -func (p *Plan) writeTableChanges(summary *strings.Builder, c *color.Color) { - // Group all changes by table path and track operations - tableOperations := make(map[string]string) // table_path -> operation - subResources := make(map[string][]struct { - operation string - path string - subType string - source diff.DiffSource - }) - - // Track all seen operations globally to avoid duplicates across groups - seenOperations := make(map[string]bool) // "path.operation.subType" -> true - - // Use source diffs for summary calculation - for _, step := range p.SourceDiffs { - // Normalize object type - stepObjTypeStr := step.Type.String() - if !strings.HasSuffix(stepObjTypeStr, "s") { - stepObjTypeStr += "s" - } - - if stepObjTypeStr == "tables" { - // This is a table-level change, record the operation - tableOperations[step.Path] = step.Operation.String() - } else if isSubResource(step.Type.String()) && strings.HasPrefix(step.Type.String(), "table.") { - // This is a table sub-resource change (skip view sub-resources) - tablePath := extractTablePathFromSubResource(step.Path, step.Type.String()) - if tablePath != "" { - // Deduplicate all operations based on (type, operation, path) triplet - operationKey := step.Path + "." + step.Operation.String() + "." + step.Type.String() - if !seenOperations[operationKey] { - seenOperations[operationKey] = true - subResources[tablePath] = append(subResources[tablePath], struct { - operation string - path string - subType string - source diff.DiffSource - }{ - operation: step.Operation.String(), - path: step.Path, - subType: step.Type.String(), - source: step.Source, - }) - } - } - } - } - - // Get all unique table paths (from both direct table changes and sub-resources) - allTables := make(map[string]bool) - for tablePath := range tableOperations { - allTables[tablePath] = true - } - for tablePath := range subResources { - allTables[tablePath] = true +// HumanColored returns a combined human-readable summary for all schemas. +// For single-schema plans it omits the schema header. +func (p *Plan) HumanColored(enableColor bool) string { + names := p.SortedSchemaNames() + if len(names) == 1 { + return p.Schemas[names[0]].HumanColored(enableColor) } - - // Sort table paths for consistent output - var sortedTables []string - for tablePath := range allTables { - sortedTables = append(sortedTables, tablePath) - } - sort.Strings(sortedTables) - - // Display each table once with all its changes - for _, tablePath := range sortedTables { - var symbol string - if operation, hasDirectChange := tableOperations[tablePath]; hasDirectChange { - // Table has direct changes, use the operation to determine symbol - switch operation { - case "create": - symbol = c.PlanSymbol("add") - case "alter": - symbol = c.PlanSymbol("change") - case "drop": - symbol = c.PlanSymbol("destroy") - default: - symbol = c.PlanSymbol("change") - } - } else { - // Table has no direct changes, only sub-resource changes - // Sub-resource changes to existing tables should always be considered modifications - symbol = c.PlanSymbol("change") - } - - fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(tablePath)) - - // Show sub-resources for this table - if subResourceList, exists := subResources[tablePath]; exists { - // Sort sub-resources by type then path - sort.Slice(subResourceList, func(i, j int) bool { - if subResourceList[i].subType != subResourceList[j].subType { - return subResourceList[i].subType < subResourceList[j].subType - } - return subResourceList[i].path < subResourceList[j].path - }) - - for _, subRes := range subResourceList { - // Extract object name from source - objectName := getObjectNameFromSource(subRes.source) - - // Handle online index replacement display - if subRes.subType == diff.DiffTypeTableIndex.String() && subRes.operation == diff.DiffOperationAlter.String() { - subSymbol := c.PlanSymbol("change") - displaySubType := strings.TrimPrefix(subRes.subType, "table.") - fmt.Fprintf(summary, " %s %s (%s - concurrent rebuild)\n", subSymbol, objectName, displaySubType) - continue - } - - var subSymbol string - switch subRes.operation { - case "create": - subSymbol = c.PlanSymbol("add") - case "alter": - subSymbol = c.PlanSymbol("change") - case "drop": - subSymbol = c.PlanSymbol("destroy") - default: - subSymbol = c.PlanSymbol("change") - } - // Clean up sub-resource type for display (remove "table." prefix) - displaySubType := strings.TrimPrefix(subRes.subType, "table.") - fmt.Fprintf(summary, " %s %s (%s)\n", subSymbol, objectName, displaySubType) - } - } + var out strings.Builder + for _, name := range names { + fmt.Fprintf(&out, "\n── Schema: %s ──────────────────────\n", name) + out.WriteString(p.Schemas[name].HumanColored(enableColor)) } + return out.String() } -// writeViewChanges handles view-specific output with proper grouping -func (p *Plan) writeViewChanges(summary *strings.Builder, c *color.Color) { - // Group all changes by view path and track operations - viewOperations := make(map[string]string) // view_path -> operation - subResources := make(map[string][]struct { - operation string - path string - subType string - }) - - // Track all seen operations globally to avoid duplicates across groups - seenOperations := make(map[string]bool) // "path.operation.subType" -> true - - // Use source diffs for summary calculation - for _, step := range p.SourceDiffs { - // Normalize object type - stepObjTypeStr := step.Type.String() - if !strings.HasSuffix(stepObjTypeStr, "s") { - stepObjTypeStr += "s" - } - - if stepObjTypeStr == "views" { - // This is a view-level change, record the operation - viewOperations[step.Path] = step.Operation.String() - } else if isSubResource(step.Type.String()) && strings.HasPrefix(step.Type.String(), "view.") { - // This is a view sub-resource change - viewPath := extractTablePathFromSubResource(step.Path, step.Type.String()) - if viewPath != "" { - // Deduplicate all operations based on (type, operation, path) triplet - operationKey := step.Path + "." + step.Operation.String() + "." + step.Type.String() - if !seenOperations[operationKey] { - seenOperations[operationKey] = true - subResources[viewPath] = append(subResources[viewPath], struct { - operation string - path string - subType string - }{ - operation: step.Operation.String(), - path: step.Path, - subType: step.Type.String(), - }) - } - } - } - } - - // Get all unique view paths (from both direct view changes and sub-resources) - allViews := make(map[string]bool) - for viewPath := range viewOperations { - allViews[viewPath] = true - } - for viewPath := range subResources { - allViews[viewPath] = true - } - - // Sort view paths for consistent output - var sortedViews []string - for viewPath := range allViews { - sortedViews = append(sortedViews, viewPath) +// ToSQL returns combined SQL for all schemas. +// For single-schema plans it returns the SQL directly without a schema header. +func (p *Plan) ToSQL(format SQLFormat) string { + names := p.SortedSchemaNames() + if len(names) == 1 { + return p.Schemas[names[0]].ToSQL(format) } - sort.Strings(sortedViews) - - // Display each view once with all its changes - for _, viewPath := range sortedViews { - var symbol string - if operation, hasDirectChange := viewOperations[viewPath]; hasDirectChange { - // View has direct changes, use the operation to determine symbol - switch operation { - case "create": - symbol = c.PlanSymbol("add") - case "alter": - symbol = c.PlanSymbol("change") - case "drop": - symbol = c.PlanSymbol("destroy") - default: - symbol = c.PlanSymbol("change") - } - } else { - // View has no direct changes, only sub-resource changes - // Sub-resource changes to existing views should always be considered modifications - symbol = c.PlanSymbol("change") - } - - fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(viewPath)) - - // Show sub-resources for this view - if subResourceList, exists := subResources[viewPath]; exists { - // Sort sub-resources by type then path - sort.Slice(subResourceList, func(i, j int) bool { - if subResourceList[i].subType != subResourceList[j].subType { - return subResourceList[i].subType < subResourceList[j].subType - } - return subResourceList[i].path < subResourceList[j].path - }) - - for _, subRes := range subResourceList { - var subSymbol string - switch subRes.operation { - case "create": - subSymbol = c.PlanSymbol("add") - case "alter": - subSymbol = c.PlanSymbol("change") - case "drop": - subSymbol = c.PlanSymbol("destroy") - default: - subSymbol = c.PlanSymbol("change") - } - // Clean up sub-resource type for display (remove "view." prefix) - displaySubType := strings.TrimPrefix(subRes.subType, "view.") - fmt.Fprintf(summary, " %s %s (%s)\n", subSymbol, getLastPathComponent(subRes.path), displaySubType) + var out strings.Builder + for _, name := range names { + sql := p.Schemas[name].ToSQL(format) + if sql != "" { + fmt.Fprintf(&out, "-- Schema: %s\n", name) + out.WriteString(sql) + if !strings.HasSuffix(sql, "\n") { + out.WriteString("\n") } + out.WriteString("\n") } } + return out.String() } -// writeMaterializedViewChanges handles materialized view-specific output with proper grouping -func (p *Plan) writeMaterializedViewChanges(summary *strings.Builder, c *color.Color) { - // Group all changes by materialized view path and track operations - mvOperations := make(map[string]string) // mv_path -> operation - subResources := make(map[string][]struct { - operation string - path string - subType string - }) - - // Track all seen operations globally to avoid duplicates across groups - seenOperations := make(map[string]bool) // "path.operation.subType" -> true - - // Track materialized views that have "recreate" operations - mvsRecreating := make(map[string]bool) - - // Use source diffs for summary calculation - for _, step := range p.SourceDiffs { - // Normalize object type - stepObjTypeStr := step.Type.String() - if !strings.HasSuffix(stepObjTypeStr, "s") { - stepObjTypeStr += "s" - } - - if stepObjTypeStr == "materialized_views" { - // Track recreate operations so subsequent create is treated as modify - if step.Operation.String() == "recreate" { - mvsRecreating[step.Path] = true - } - // This is a materialized view-level change, record the operation - mvOperations[step.Path] = step.Operation.String() - } else if isSubResource(step.Type.String()) && strings.HasPrefix(step.Type.String(), "materialized_view.") { - // This is a materialized view sub-resource change - mvPath := extractTablePathFromSubResource(step.Path, step.Type.String()) - if mvPath != "" { - // Deduplicate all operations based on (type, operation, path) triplet - operationKey := step.Path + "." + step.Operation.String() + "." + step.Type.String() - if !seenOperations[operationKey] { - seenOperations[operationKey] = true - subResources[mvPath] = append(subResources[mvPath], struct { - operation string - path string - subType string - }{ - operation: step.Operation.String(), - path: step.Path, - subType: step.Type.String(), - }) - } - } - } - } - - // Get all unique materialized view paths (from both direct changes and sub-resources) - allMVs := make(map[string]bool) - for mvPath := range mvOperations { - allMVs[mvPath] = true - } - for mvPath := range subResources { - allMVs[mvPath] = true - } - - // Sort materialized view paths for consistent output - var sortedMVs []string - for mvPath := range allMVs { - sortedMVs = append(sortedMVs, mvPath) - } - sort.Strings(sortedMVs) - - // Display each materialized view once with all its changes - for _, mvPath := range sortedMVs { - var symbol string - if operation, hasDirectChange := mvOperations[mvPath]; hasDirectChange { - // If this path had a "recreate" and now shows "create", treat as modify - if mvsRecreating[mvPath] && operation == "create" { - operation = "alter" - } - // Materialized view has direct changes, use the operation to determine symbol - switch operation { - case "create": - symbol = c.PlanSymbol("add") - case "alter", "recreate": - // Both "alter" and "recreate" are modifications - symbol = c.PlanSymbol("change") - case "drop": - symbol = c.PlanSymbol("destroy") - default: - symbol = c.PlanSymbol("change") - } - } else { - // Materialized view has no direct changes, only sub-resource changes - // Sub-resource changes to existing materialized views should always be considered modifications - symbol = c.PlanSymbol("change") - } - - fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(mvPath)) - - // Show sub-resources for this materialized view - if subResourceList, exists := subResources[mvPath]; exists { - // Sort sub-resources by type then path - sort.Slice(subResourceList, func(i, j int) bool { - if subResourceList[i].subType != subResourceList[j].subType { - return subResourceList[i].subType < subResourceList[j].subType - } - return subResourceList[i].path < subResourceList[j].path - }) - - for _, subRes := range subResourceList { - // Handle online index replacement display - if subRes.subType == diff.DiffTypeMaterializedViewIndex.String() && subRes.operation == diff.DiffOperationAlter.String() { - subSymbol := c.PlanSymbol("change") - displaySubType := strings.TrimPrefix(subRes.subType, "materialized_view.") - fmt.Fprintf(summary, " %s %s (%s - concurrent rebuild)\n", subSymbol, getLastPathComponent(subRes.path), displaySubType) - continue - } - - var subSymbol string - switch subRes.operation { - case "create": - subSymbol = c.PlanSymbol("add") - case "alter": - subSymbol = c.PlanSymbol("change") - case "drop": - subSymbol = c.PlanSymbol("destroy") - default: - subSymbol = c.PlanSymbol("change") - } - // Clean up sub-resource type for display (remove "materialized_view." prefix) - displaySubType := strings.TrimPrefix(subRes.subType, "materialized_view.") - fmt.Fprintf(summary, " %s %s (%s)\n", subSymbol, getLastPathComponent(subRes.path), displaySubType) - } - } +// FromJSON deserializes a Plan from JSON data. +func FromJSON(data []byte) (*Plan, error) { + var raw struct { + Version string `json:"version"` + PgschemaVersion string `json:"pgschema_version"` + CreatedAt time.Time `json:"created_at"` + Schemas map[string]json.RawMessage `json:"schemas"` } -} - -// writeNonTableChanges handles non-table objects with the original logic -func (p *Plan) writeNonTableChanges(summary *strings.Builder, objType string, c *color.Color) { - // Collect changes for this object type - var changes []struct { - operation string - path string + if err := json.Unmarshal(data, &raw); err != nil { + return nil, fmt.Errorf("failed to unmarshal plan JSON: %w", err) } - // Use source diffs for summary calculation - for _, step := range p.SourceDiffs { - // Normalize object type - stepObjTypeStr := step.Type.String() - if !strings.HasSuffix(stepObjTypeStr, "s") { - stepObjTypeStr += "s" - } - // Normalize underscores to spaces to match Type constants - stepObjTypeStr = strings.ReplaceAll(stepObjTypeStr, "_", " ") - - if stepObjTypeStr == objType { - changes = append(changes, struct { - operation string - path string - }{ - operation: step.Operation.String(), - path: step.Path, - }) - } + p := &Plan{ + Version: raw.Version, + PgschemaVersion: raw.PgschemaVersion, + CreatedAt: raw.CreatedAt, + Schemas: make(map[string]*SchemaPlan, len(raw.Schemas)), } - // Sort changes by path for consistent output - sort.Slice(changes, func(i, j int) bool { - return changes[i].path < changes[j].path - }) - - // Write changes with appropriate symbols - for _, change := range changes { - var symbol string - switch change.operation { - case "create": - symbol = c.PlanSymbol("add") - case "alter": - symbol = c.PlanSymbol("change") - case "drop": - symbol = c.PlanSymbol("destroy") - default: - symbol = c.PlanSymbol("change") + for schemaName, schemaData := range raw.Schemas { + var sp SchemaPlan + if err := json.Unmarshal(schemaData, &sp); err != nil { + return nil, fmt.Errorf("failed to unmarshal plan for schema %s: %w", schemaName, err) } - - fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(change.path)) + p.Schemas[schemaName] = &sp } -} - -// isSubResource checks if the given type is a sub-resource of tables, views, or materialized views -func isSubResource(objType string) bool { - return (strings.HasPrefix(objType, "table.") && objType != "table") || - (strings.HasPrefix(objType, "view.") && objType != "view") || - (strings.HasPrefix(objType, "materialized_view.") && objType != "materialized_view") -} -// getLastPathComponent extracts the last component from a dot-separated path -func getLastPathComponent(path string) string { - parts := strings.Split(path, ".") - if len(parts) > 0 { - return parts[len(parts)-1] - } - return path + return p, nil } -// getObjectNameFromSource extracts the object name from the source object. -// This preserves object names that contain dots (e.g., "public.idx_users") -func getObjectNameFromSource(source diff.DiffSource) string { - if source == nil { - return "" - } - return source.GetObjectName() -} - -// extractTablePathFromSubResource extracts the parent table, view, or materialized view path from a sub-resource path -func extractTablePathFromSubResource(subResourcePath, subResourceType string) string { - if strings.HasPrefix(subResourceType, "table.") { - // For sub-resources, the path format depends on the sub-resource type: - // - "schema.table.resource_name" -> "schema.table" (indexes, policies, columns) - // - "schema.table" -> "schema.table" (RLS, table comments) - parts := strings.Split(subResourcePath, ".") - - // Special handling for RLS and table-level changes - if subResourceType == "table.rls" || subResourceType == "table.comment" { - // For RLS and table comments, the path is already the table path - return subResourcePath - } - - if len(parts) >= 2 { - // For other sub-resources, return the first two parts as table path - if len(parts) >= 3 { - return parts[0] + "." + parts[1] - } - // If only 2 parts, it's likely "schema.table" already - return subResourcePath - } - } else if strings.HasPrefix(subResourceType, "view.") { - // For view sub-resources, the path format is similar: - // - "schema.view.resource_name" -> "schema.view" (indexes, comments) - // - "schema.view" -> "schema.view" (view-level comments) - parts := strings.Split(subResourcePath, ".") - - // Special handling for view-level changes - if subResourceType == "view.comment" { - // For view comments, the path is already the view path - return subResourcePath - } - - if len(parts) >= 2 { - // For other sub-resources, return the first two parts as view path - if len(parts) >= 3 { - return parts[0] + "." + parts[1] - } - // If only 2 parts, it's likely "schema.view" already - return subResourcePath - } - } else if strings.HasPrefix(subResourceType, "materialized_view.") { - // For materialized view sub-resources, the path format is similar: - // - "schema.mv.resource_name" -> "schema.mv" (indexes, comments) - // - "schema.mv" -> "schema.mv" (materialized view-level comments) - parts := strings.Split(subResourcePath, ".") - - // Special handling for materialized view-level changes - if subResourceType == "materialized_view.comment" { - // For materialized view comments, the path is already the materialized view path - return subResourcePath - } - - if len(parts) >= 2 { - // For other sub-resources, return the first two parts as materialized view path - if len(parts) >= 3 { - return parts[0] + "." + parts[1] - } - // If only 2 parts, it's likely "schema.materialized_view" already - return subResourcePath +// SummaryString returns a one-line summary of schemas and changes. +func (p *Plan) SummaryString() string { + withChanges := 0 + for _, sp := range p.Schemas { + if sp.HasAnyChanges() { + withChanges++ } } - return "" + return fmt.Sprintf("Summary: %d schemas inspected, %d with changes", len(p.Schemas), withChanges) } diff --git a/internal/plan/plan_test.go b/internal/plan/plan_test.go index 76803ea5..b303af9c 100644 --- a/internal/plan/plan_test.go +++ b/internal/plan/plan_test.go @@ -2,322 +2,308 @@ package plan import ( "encoding/json" - "fmt" - "os" - "path/filepath" - "sort" "strings" "testing" "time" - "github.com/google/go-cmp/cmp" "github.com/pgplex/pgschema/internal/diff" - "github.com/pgplex/pgschema/internal/postgres" - "github.com/pgplex/pgschema/ir" - "github.com/pgplex/pgschema/testutil" + "github.com/pgplex/pgschema/internal/fingerprint" ) -// sharedTestPostgres is the shared embedded postgres instance for all tests in this package -var sharedTestPostgres *postgres.EmbeddedPostgres +func TestPlan_AddSchemaAndHasAnyChanges(t *testing.T) { + p := NewPlan() -// TestMain sets up shared resources for all tests in this package -func TestMain(m *testing.M) { - // Create shared embedded postgres for all tests to dramatically improve performance - sharedTestPostgres = testutil.SetupPostgres(nil) - defer sharedTestPostgres.Stop() + // Empty plan has no changes + if p.HasAnyChanges() { + t.Error("empty plan should have no changes") + } - m.Run() -} + // Add a schema with no changes + emptySP := NewSchemaPlan(nil) + p.AddSchema("tenant_1", emptySP) + if p.HasAnyChanges() { + t.Error("plan with empty schema should have no changes") + } -// discoverTestDataVersions discovers available test data versions in the testdata directory -func discoverTestDataVersions(testdataDir string) ([]string, error) { - entries, err := os.ReadDir(testdataDir) - if err != nil { - return nil, fmt.Errorf("failed to read testdata directory: %w", err) - } - var versions []string - for _, entry := range entries { - if entry.IsDir() { - // Check if the directory contains a plan.json file - planFile := filepath.Join(testdataDir, entry.Name(), "plan.json") - if _, err := os.Stat(planFile); err == nil { - versions = append(versions, entry.Name()) - } - } + // Add a schema with changes + diffs := []diff.Diff{ + { + Type: diff.DiffTypeTable, + Operation: diff.DiffOperationCreate, + Path: "public.users", + Statements: []diff.SQLStatement{ + {SQL: "CREATE TABLE users (id integer);"}, + }, + }, + } + spWithChanges := NewSchemaPlan(diffs) + p.AddSchema("tenant_2", spWithChanges) + if !p.HasAnyChanges() { + t.Error("plan with changes should report has changes") } - // Sort versions to ensure deterministic test execution order - sort.Strings(versions) - return versions, nil } -// parseSQL is a helper function to convert SQL string to IR for tests -// Uses embedded PostgreSQL to ensure tests use the same code path as production -func parseSQL(t *testing.T, sql string) *ir.IR { - t.Helper() - return testutil.ParseSQLToIR(t, sharedTestPostgres, sql, "public") -} +func TestPlan_SortedSchemaNames(t *testing.T) { + p := NewPlan() + p.AddSchema("tenant_c", NewSchemaPlan(nil)) + p.AddSchema("tenant_a", NewSchemaPlan(nil)) + p.AddSchema("tenant_b", NewSchemaPlan(nil)) -func TestPlanSummary(t *testing.T) { - oldSQL := `CREATE TABLE users ( - id integer NOT NULL - );` + names := p.SortedSchemaNames() + expected := []string{"tenant_a", "tenant_b", "tenant_c"} + if len(names) != len(expected) { + t.Fatalf("expected %d names, got %d", len(expected), len(names)) + } + for i, name := range names { + if name != expected[i] { + t.Errorf("names[%d] = %q, want %q", i, name, expected[i]) + } + } +} - newSQL := `CREATE TABLE users ( - id integer NOT NULL, - name text NOT NULL - ); - CREATE TABLE posts ( - id integer NOT NULL, - title text NOT NULL - );` +func TestPlan_ToJSON_RoundTrip(t *testing.T) { + t.Setenv("PGSCHEMA_TEST_TIME", "2025-01-01T00:00:00Z") - oldIR := parseSQL(t, oldSQL) - newIR := parseSQL(t, newSQL) - diffs := diff.GenerateMigration(oldIR, newIR, "public") + p := NewPlan() - plan := NewPlan(diffs) - summary := plan.HumanColored(false) + // Add schema with fingerprint and changes + fp := &fingerprint.SchemaFingerprint{Hash: "abc123"} + diffs := []diff.Diff{ + { + Type: diff.DiffTypeTable, + Operation: diff.DiffOperationCreate, + Path: "public.users", + Statements: []diff.SQLStatement{ + {SQL: "CREATE TABLE users (id integer);"}, + }, + }, + } + sp := NewSchemaPlanWithFingerprint(diffs, fp) + p.AddSchema("tenant_1", sp) - // Debug: print the summary to see what it looks like - t.Logf("Summary output:\n%s", summary) + // Add empty schema + p.AddSchema("tenant_2", NewSchemaPlan(nil)) - if !strings.Contains(summary, "1 to add") { - t.Error("Summary should mention 1 resource to add") + // Serialize + jsonStr, err := p.ToJSON() + if err != nil { + t.Fatalf("ToJSON failed: %v", err) } - if !strings.Contains(summary, "1 to modify") { - t.Error("Summary should mention 1 resource to modify") + // Verify JSON structure + var raw map[string]json.RawMessage + if err := json.Unmarshal([]byte(jsonStr), &raw); err != nil { + t.Fatalf("failed to parse JSON: %v", err) } - // The colored output doesn't show "0 to drop" when there are no drops - if strings.Contains(summary, "to drop") && !strings.Contains(summary, "1 to add, 1 to modify") { - t.Error("Summary should not mention drops when there are none") + // Must have "schemas" key + if _, ok := raw["schemas"]; !ok { + t.Error("JSON should have 'schemas' key") + } + // Must have version fields at top level + if _, ok := raw["version"]; !ok { + t.Error("JSON should have 'version' key") + } + if _, ok := raw["pgschema_version"]; !ok { + t.Error("JSON should have 'pgschema_version' key") + } + // Must NOT have "groups" at top level + if _, ok := raw["groups"]; ok { + t.Error("JSON should NOT have 'groups' key at top level") } -} - -func TestPlanJSONRoundTrip(t *testing.T) { - testDataDir := "../../testdata/diff/migrate" - // Discover available test data versions dynamically - versions, err := discoverTestDataVersions(testDataDir) + // Deserialize + loaded, err := FromJSON([]byte(jsonStr)) if err != nil { - t.Fatalf("Failed to discover test data versions: %v", err) + t.Fatalf("FromJSON failed: %v", err) } - if len(versions) == 0 { - t.Skip("No test data versions found") + if len(loaded.Schemas) != 2 { + t.Fatalf("expected 2 schemas, got %d", len(loaded.Schemas)) } - for _, version := range versions { - t.Run(fmt.Sprintf("version_%s", version), func(t *testing.T) { - planFilePath := filepath.Join(testDataDir, version, "plan.json") + // Verify tenant_1 has changes + sp1, ok := loaded.Schemas["tenant_1"] + if !ok { + t.Fatal("missing tenant_1 schema") + } + if !sp1.HasAnyChanges() { + t.Error("tenant_1 should have changes") + } + if sp1.SourceFingerprint == nil || sp1.SourceFingerprint.Hash != "abc123" { + t.Error("tenant_1 fingerprint not preserved") + } - // Read the original plan.json file - originalJSON, err := os.ReadFile(planFilePath) - if err != nil { - t.Fatalf("Failed to read %s: %v", planFilePath, err) - } + // Verify tenant_2 has no changes + sp2, ok := loaded.Schemas["tenant_2"] + if !ok { + t.Fatal("missing tenant_2 schema") + } + if sp2.HasAnyChanges() { + t.Error("tenant_2 should have no changes") + } - // First FromJSON: Load plan from JSON - plan1, err := FromJSON(originalJSON) - if err != nil { - t.Fatalf("Failed to parse JSON from %s: %v", planFilePath, err) - } + // Verify version fields are populated from parent + if loaded.Version != p.Version { + t.Errorf("version = %q, want %q", loaded.Version, p.Version) + } + if loaded.PgschemaVersion != p.PgschemaVersion { + t.Errorf("pgschema_version = %q, want %q", loaded.PgschemaVersion, p.PgschemaVersion) + } +} - // Check if original JSON has source fields to determine debug mode - hasSourceFields := strings.Contains(string(originalJSON), `"source":`) +func TestPlan_SchemaEntry_ExcludesTopLevelFields(t *testing.T) { + t.Setenv("PGSCHEMA_TEST_TIME", "2025-01-01T00:00:00Z") - // First ToJSON: Convert plan back to JSON with same debug mode as original - json1, err := plan1.ToJSONWithDebug(hasSourceFields) - if err != nil { - t.Fatalf("Failed to convert plan to JSON (first): %v", err) - } + p := NewPlan() + p.AddSchema("test_schema", NewSchemaPlan(nil)) - // Compare original JSON with first round-trip JSON - // Parse both JSON strings into maps to compare structure - var originalMap, roundTripMap map[string]interface{} - if err := json.Unmarshal(originalJSON, &originalMap); err != nil { - t.Fatalf("Failed to unmarshal original JSON: %v", err) - } - if err := json.Unmarshal([]byte(json1), &roundTripMap); err != nil { - t.Fatalf("Failed to unmarshal round-trip JSON: %v", err) - } + jsonStr, err := p.ToJSON() + if err != nil { + t.Fatalf("ToJSON failed: %v", err) + } - // Use go-cmp to show detailed differences - if diff := cmp.Diff(originalMap, roundTripMap); diff != "" { - t.Errorf("JSON round-trip failed for %s: mismatch (-original +roundtrip):\n%s", version, diff) - } + // Parse the schemas section + var parsed struct { + Schemas map[string]json.RawMessage `json:"schemas"` + } + if err := json.Unmarshal([]byte(jsonStr), &parsed); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } - // Second round-trip: FromJSON -> ToJSON again - // This should produce identical string output - plan2, err := FromJSON([]byte(json1)) - if err != nil { - t.Fatalf("Failed to parse JSON from round-trip: %v", err) - } + schemaJSON := string(parsed.Schemas["test_schema"]) - json2, err := plan2.ToJSONWithDebug(hasSourceFields) - if err != nil { - t.Fatalf("Failed to convert plan to JSON (second): %v", err) - } + // Schema entry should NOT contain version, pgschema_version, or created_at + if strings.Contains(schemaJSON, `"version"`) { + t.Error("schema entry should not contain 'version'") + } + if strings.Contains(schemaJSON, `"pgschema_version"`) { + t.Error("schema entry should not contain 'pgschema_version'") + } + if strings.Contains(schemaJSON, `"created_at"`) { + t.Error("schema entry should not contain 'created_at'") + } - // After first round-trip, subsequent round-trips should produce identical strings - if json1 != json2 { - t.Errorf("JSON not stable after first round-trip for %s", version) - t.Logf("First round-trip length: %d", len(json1)) - t.Logf("Second round-trip length: %d", len(json2)) - - // Show structural differences if any - var map1, map2 map[string]interface{} - json.Unmarshal([]byte(json1), &map1) - json.Unmarshal([]byte(json2), &map2) - if diff := cmp.Diff(map1, map2); diff != "" { - t.Errorf("Structural difference in second round-trip (-first +second):\n%s", diff) - } - } - }) + // Should contain "groups" + if !strings.Contains(schemaJSON, `"groups"`) { + t.Error("schema entry should contain 'groups'") } } -func TestPlanNoChanges(t *testing.T) { - sql := `CREATE TABLE users ( - id integer NOT NULL - );` - - oldIR := parseSQL(t, sql) - newIR := parseSQL(t, sql) - diffs := diff.GenerateMigration(oldIR, newIR, "public") - - plan := NewPlan(diffs) - summary := strings.TrimSpace(plan.HumanColored(false)) +func TestFromJSON_ValidPlan(t *testing.T) { + planJSON := `{ + "version": "1.0.0", + "pgschema_version": "1.9.0", + "created_at": "2025-01-01T00:00:00Z", + "schemas": { + "tenant_1": { + "groups": [] + } + } + }` - if summary != "No changes detected." { - t.Errorf("expected %q, got %q", "No changes detected.", summary) + loaded, err := FromJSON([]byte(planJSON)) + if err != nil { + t.Fatalf("FromJSON failed: %v", err) + } + if len(loaded.Schemas) != 1 { + t.Errorf("expected 1 schema, got %d", len(loaded.Schemas)) } } -func TestPlanJSONLoadedSummary(t *testing.T) { - // Test that plans loaded from JSON can generate summaries using Steps metadata - - // Create a plan with steps that have metadata - originalPlan := &Plan{ - Version: "1.0.0", - PgschemaVersion: "1.0.0", - CreatedAt: time.Unix(0, 0).UTC(), - Groups: []ExecutionGroup{ - { - Steps: []Step{ - { - SQL: "CREATE TABLE users (id serial primary key);", - Type: "table", - Operation: "create", - Path: "public.users", - }, - { - SQL: "ALTER TABLE posts ADD COLUMN title text;", - Type: "table.column", - Operation: "create", - Path: "public.posts.title", - }, - }, - }, - }, +func TestFromJSON_InvalidJSON(t *testing.T) { + _, err := FromJSON([]byte(`{invalid`)) + if err == nil { + t.Error("expected error for invalid JSON") } +} - // Serialize to JSON (without SourceDiffs) - jsonData, err := originalPlan.ToJSON() - if err != nil { - t.Fatalf("Failed to serialize plan to JSON: %v", err) +func TestPlan_SummaryString(t *testing.T) { + p := NewPlan() + + // Empty + s := p.SummaryString() + if s != "Summary: 0 schemas inspected, 0 with changes" { + t.Errorf("unexpected summary: %s", s) } - // Load plan from JSON - loadedPlan, err := FromJSON([]byte(jsonData)) - if err != nil { - t.Fatalf("Failed to load plan from JSON: %v", err) + // With schemas + p.AddSchema("s1", NewSchemaPlan(nil)) + diffs := []diff.Diff{ + { + Type: diff.DiffTypeTable, + Operation: diff.DiffOperationCreate, + Path: "public.t", + Statements: []diff.SQLStatement{ + {SQL: "CREATE TABLE t();"}, + }, + }, } + p.AddSchema("s2", NewSchemaPlan(diffs)) - // Verify SourceDiffs is empty (as expected for JSON-loaded plans) - if len(loadedPlan.SourceDiffs) != 0 { - t.Errorf("Expected empty SourceDiffs, got %d", len(loadedPlan.SourceDiffs)) + s = p.SummaryString() + if s != "Summary: 2 schemas inspected, 1 with changes" { + t.Errorf("unexpected summary: %s", s) } +} - // Generate summary - this should work using Steps metadata - summary := loadedPlan.HumanColored(false) +func TestPlan_HumanColored_MultiSchema(t *testing.T) { + p := NewPlan() + p.AddSchema("schema_a", NewSchemaPlan(nil)) + p.AddSchema("schema_b", NewSchemaPlan(nil)) - // Verify summary contains expected information - if !strings.Contains(summary, "1 to add") { - t.Error("Summary should mention 1 resource to add") - } + output := p.HumanColored(false) - if !strings.Contains(summary, "Tables:") { - t.Error("Summary should contain Tables section") + // Should contain schema headers in sorted order + idxA := strings.Index(output, "Schema: schema_a") + idxB := strings.Index(output, "Schema: schema_b") + if idxA == -1 { + t.Error("output should contain 'Schema: schema_a'") } - - if !strings.Contains(summary, "users") { - t.Error("Summary should mention users table") + if idxB == -1 { + t.Error("output should contain 'Schema: schema_b'") } - - if strings.Contains(summary, "No changes detected") { - t.Error("Summary should not say \"No changes detected\" when there are changes") + if idxA >= idxB { + t.Error("schema_a should appear before schema_b") } } -func TestPlanDebugJSONRoundTrip(t *testing.T) { - // Issue #305: Plans generated with --debug produce JSON that cannot be - // deserialized by FromJSON() because the Diff.Source field is a Go interface - // (DiffSource) that json.Unmarshal cannot reconstruct. - oldSQL := `CREATE TABLE users ( - id integer NOT NULL - );` - - newSQL := `CREATE TABLE users ( - id integer NOT NULL, - name text NOT NULL - ); - CREATE TABLE posts ( - id integer NOT NULL, - title text NOT NULL - );` - - oldIR := parseSQL(t, oldSQL) - newIR := parseSQL(t, newSQL) - diffs := diff.GenerateMigration(oldIR, newIR, "public") - - p := NewPlan(diffs) - - // Serialize with debug mode (includes SourceDiffs; Diff.Source is excluded via json:"-") - debugJSON, err := p.ToJSONWithDebug(true) - if err != nil { - t.Fatalf("Failed to serialize plan with debug: %v", err) +func TestPlan_ToSQL_MultiSchema(t *testing.T) { + p := NewPlan() + diffs := []diff.Diff{ + { + Type: diff.DiffTypeTable, + Operation: diff.DiffOperationCreate, + Path: "public.t", + Statements: []diff.SQLStatement{ + {SQL: "CREATE TABLE t (id int)"}, + }, + }, } + p.AddSchema("s1", NewSchemaPlan(diffs)) + p.AddSchema("s2", NewSchemaPlan(nil)) - // Deserialize - this should succeed - loaded, err := FromJSON([]byte(debugJSON)) - if err != nil { - t.Fatalf("Failed to deserialize debug plan JSON: %v", err) - } + sql := p.ToSQL(SQLFormatRaw) - // Verify debug mode actually included SourceDiffs - if len(loaded.SourceDiffs) == 0 { - t.Error("Debug plan should include SourceDiffs") + // Should contain header for s1 (has SQL) + if !strings.Contains(sql, "-- Schema: s1") { + t.Error("should contain schema header for s1") } - - // Verify the loaded plan has valid groups and steps - if len(loaded.Groups) == 0 { - t.Error("Loaded plan should have at least one execution group") + if !strings.Contains(sql, "CREATE TABLE t (id int)") { + t.Error("should contain SQL for s1") } - - // Re-serialize without debug and verify round-trip stability - normalJSON, err := loaded.ToJSON() - if err != nil { - t.Fatalf("Failed to re-serialize loaded plan: %v", err) + // s2 has no SQL, should not have header + if strings.Contains(sql, "-- Schema: s2") { + t.Error("should not contain schema header for s2 (no SQL)") } +} - loaded2, err := FromJSON([]byte(normalJSON)) - if err != nil { - t.Fatalf("Failed to deserialize re-serialized plan: %v", err) - } +func TestPlan_CreatedAt_UsesTestTime(t *testing.T) { + t.Setenv("PGSCHEMA_TEST_TIME", "2024-06-15T12:00:00Z") + p := NewPlan() - if len(loaded2.Groups) != len(loaded.Groups) { - t.Errorf("Group count mismatch: got %d, want %d", len(loaded2.Groups), len(loaded.Groups)) + expected, _ := time.Parse(time.RFC3339, "2024-06-15T12:00:00Z") + if !p.CreatedAt.Equal(expected) { + t.Errorf("created_at = %v, want %v", p.CreatedAt, expected) } } diff --git a/internal/plan/schema_plan.go b/internal/plan/schema_plan.go new file mode 100644 index 00000000..bbe832db --- /dev/null +++ b/internal/plan/schema_plan.go @@ -0,0 +1,1120 @@ +package plan + +import ( + "fmt" + "sort" + "strings" + + "github.com/pgplex/pgschema/internal/color" + "github.com/pgplex/pgschema/internal/diff" + "github.com/pgplex/pgschema/internal/fingerprint" +) + +// DirectiveType represents the different types of directives +type DirectiveType string + +const ( + DirectiveTypeWait DirectiveType = "wait" +) + +// String returns the string representation of DirectiveType +func (dt DirectiveType) String() string { + return string(dt) +} + +// Directive represents a special directive for execution (wait, assert, etc.) +type Directive struct { + Type DirectiveType `json:"type"` // DirectiveTypeWait, etc. + Message string `json:"message"` // Auto-generated descriptive message +} + +// Step represents a single execution step with SQL and optional directive +type Step struct { + SQL string `json:"sql"` + Directive *Directive `json:"directive,omitempty"` + // Metadata for summary generation + Type string `json:"type,omitempty"` // e.g., "table", "index" + Operation string `json:"operation,omitempty"` // e.g., "create", "alter", "drop" + Path string `json:"path,omitempty"` // e.g., "public.users" +} + +// ExecutionGroup represents a group of steps that should be executed together +type ExecutionGroup struct { + Steps []Step `json:"steps"` +} + +// SchemaPlan holds the migration plan for a single schema. +// It contains execution groups, fingerprint, and diff metadata. +// The top-level Plan wraps one or more SchemaPlan entries. +type SchemaPlan struct { + // Source database fingerprint when plan was created + SourceFingerprint *fingerprint.SchemaFingerprint `json:"source_fingerprint,omitempty"` + + // Groups is the ordered list of execution groups + Groups []ExecutionGroup `json:"groups"` + + // SourceDiffs stores original diff information for summary calculation + // This field is only serialized in debug mode + SourceDiffs []diff.Diff `json:"source_diffs,omitempty"` +} + +// PlanSummary provides counts of changes by type +type PlanSummary struct { + Total int `json:"total"` + Add int `json:"add"` + Change int `json:"change"` + Destroy int `json:"destroy"` + ByType map[string]TypeSummary `json:"by_type"` +} + +// TypeSummary provides counts for a specific object type +type TypeSummary struct { + Add int `json:"add"` + Change int `json:"change"` + Destroy int `json:"destroy"` +} + +// Type represents the database object types in dependency order +type Type string + +const ( + TypeSchema Type = "schemas" + TypeType Type = "types" + TypeFunction Type = "functions" + TypeProcedure Type = "procedures" + TypeSequence Type = "sequences" + TypeTable Type = "tables" + TypeView Type = "views" + TypeMaterializedView Type = "materialized views" + TypeIndex Type = "indexes" + TypeTrigger Type = "triggers" + TypePolicy Type = "policies" + TypeColumn Type = "columns" + TypeRLS Type = "rls" + TypeDefaultPrivilege Type = "default privileges" + TypePrivilege Type = "privileges" + TypeColumnPrivilege Type = "column privileges" + TypeRevokedDefaultPrivilege Type = "revoked default privileges" +) + +// SQLFormat represents the different output formats for SQL generation +type SQLFormat string + +const ( + // SQLFormatRaw outputs just the raw SQL statements without additional formatting + SQLFormatRaw SQLFormat = "raw" + // Human-readable format with comments + SQLFormatHuman SQLFormat = "human" +) + +// getObjectOrder returns the dependency order for database objects +func getObjectOrder() []Type { + return []Type{ + TypeSchema, + TypeDefaultPrivilege, + TypeType, + TypeFunction, + TypeProcedure, + TypeSequence, + TypeTable, + TypeView, + TypeMaterializedView, + TypeIndex, + TypeTrigger, + TypePolicy, + TypeColumn, + TypeRLS, + TypePrivilege, + TypeColumnPrivilege, + TypeRevokedDefaultPrivilege, + } +} + +// ========== PUBLIC METHODS ========== + +// groupDiffs groups diffs into execution groups with configurable online operations +func groupDiffs(diffs []diff.Diff) []ExecutionGroup { + if len(diffs) == 0 { + return nil + } + + var groups []ExecutionGroup + var transactionalSteps []Step + + // Track newly created tables/materialized views to avoid concurrent rewrites for their indexes. + // Single-pass: diffs are topologically sorted, so creates come before dependent index operations. + // We build these maps incrementally as we process each diff. + newlyCreatedTables := make(map[string]bool) + newlyCreatedMaterializedViews := make(map[string]bool) + + // Convert diffs to steps + for _, d := range diffs { + // Track creates as we encounter them (before processing dependent operations) + if d.Type == diff.DiffTypeTable && d.Operation == diff.DiffOperationCreate { + newlyCreatedTables[d.Path] = true + } + if d.Type == diff.DiffTypeMaterializedView && d.Operation == diff.DiffOperationCreate { + newlyCreatedMaterializedViews[d.Path] = true + } + // Try to generate rewrites if online operations are enabled + rewriteSteps := generateRewrite(d, newlyCreatedTables, newlyCreatedMaterializedViews) + + if len(rewriteSteps) > 0 { + // For operations with rewrites, create one step per rewrite statement + for _, rewriteStep := range rewriteSteps { + step := Step{ + SQL: rewriteStep.SQL, + Type: d.Type.String(), + Operation: d.Operation.String(), + Path: d.Path, + Directive: rewriteStep.Directive, + } + + // Check if this step needs isolation (has directive or cannot run in transaction) + needsIsolation := step.Directive != nil || !rewriteStep.CanRunInTransaction + + if needsIsolation { + // Flush any pending transactional steps + if len(transactionalSteps) > 0 { + groups = append(groups, ExecutionGroup{Steps: transactionalSteps}) + transactionalSteps = nil + } + + // Add this step in its own group + groups = append(groups, ExecutionGroup{Steps: []Step{step}}) + } else { + // Accumulate transactional steps + transactionalSteps = append(transactionalSteps, step) + } + } + } else { + // For operations without rewrites, create one step per canonical statement + for _, stmt := range d.Statements { + step := Step{ + SQL: stmt.SQL, + Type: d.Type.String(), + Operation: d.Operation.String(), + Path: d.Path, + } + // Canonical statements don't have directives + transactionalSteps = append(transactionalSteps, step) + } + } + } + + // Flush remaining transactional steps + if len(transactionalSteps) > 0 { + groups = append(groups, ExecutionGroup{Steps: transactionalSteps}) + } + + return groups +} + +// NewSchemaPlan creates a new schema plan from a list of diffs with online operations enabled. +func NewSchemaPlan(diffs []diff.Diff) *SchemaPlan { + return &SchemaPlan{ + Groups: groupDiffs(diffs), + SourceDiffs: diffs, + } +} + +// NewSchemaPlanWithFingerprint creates a new schema plan from diffs and includes source fingerprint. +func NewSchemaPlanWithFingerprint(diffs []diff.Diff, sourceFingerprint *fingerprint.SchemaFingerprint) *SchemaPlan { + sp := NewSchemaPlan(diffs) + sp.SourceFingerprint = sourceFingerprint + return sp +} + +// HasAnyChanges checks if the plan contains any changes by examining the groups +func (sp *SchemaPlan) HasAnyChanges() bool { + for _, g := range sp.Groups { + if len(g.Steps) > 0 { + return true + } + } + return false +} + +// HumanColored returns a human-readable summary of the plan with color support +func (sp *SchemaPlan) HumanColored(enableColor bool) string { + c := color.New(enableColor) + var summary strings.Builder + + // Calculate summary from diffs + summaryData := sp.calculateSummaryFromSteps() + + if summaryData.Total == 0 { + summary.WriteString("No changes detected.\n") + return summary.String() + } + + // Write header with overall summary (colored like Terraform) + summary.WriteString(c.FormatPlanHeader(summaryData.Add, summaryData.Change, summaryData.Destroy) + "\n\n") + + // Write summary by type with colors + summary.WriteString(c.Bold("Summary by type:") + "\n") + for _, objType := range getObjectOrder() { + objTypeStr := string(objType) + if typeSummary, exists := summaryData.ByType[objTypeStr]; exists && (typeSummary.Add > 0 || typeSummary.Change > 0 || typeSummary.Destroy > 0) { + line := c.FormatSummaryLine(objTypeStr, typeSummary.Add, typeSummary.Change, typeSummary.Destroy) + summary.WriteString(line + "\n") + } + } + summary.WriteString("\n") + + // Detailed changes by type with symbols + for _, objType := range getObjectOrder() { + objTypeStr := string(objType) + if typeSummary, exists := summaryData.ByType[objTypeStr]; exists && (typeSummary.Add > 0 || typeSummary.Change > 0 || typeSummary.Destroy > 0) { + // Capitalize first letter for display + displayName := strings.ToUpper(objTypeStr[:1]) + objTypeStr[1:] + sp.writeDetailedChangesFromSteps(&summary, displayName, objTypeStr, c) + } + } + + // Add DDL section if there are changes + if summaryData.Total > 0 { + summary.WriteString(c.Bold("DDL to be executed:") + "\n") + summary.WriteString(strings.Repeat("-", 50) + "\n\n") + migrationSQL := sp.ToSQL(SQLFormatHuman) + if migrationSQL != "" { + summary.WriteString(migrationSQL) + if !strings.HasSuffix(migrationSQL, "\n") { + summary.WriteString("\n") + } + } else { + summary.WriteString("-- No DDL statements generated\n") + } + } + + return summary.String() +} + +// ToSQL returns the SQL statements with formatting based on the specified format +func (sp *SchemaPlan) ToSQL(format SQLFormat) string { + // Build SQL output from groups + var sqlOutput strings.Builder + + for groupIdx, group := range sp.Groups { + // Add transaction group comment for human-readable format + if format == SQLFormatHuman && len(sp.Groups) > 1 { + sqlOutput.WriteString(fmt.Sprintf("-- Transaction Group #%d\n", groupIdx+1)) + } + + for stepIdx, step := range group.Steps { + if step.Directive != nil { + // Handle directive statements + sqlOutput.WriteString(fmt.Sprintf("-- pgschema:%s\n", step.Directive.Type.String())) + sqlOutput.WriteString(step.SQL) + sqlOutput.WriteString("\n") + } else { + // Handle regular SQL statements + sqlOutput.WriteString(step.SQL) + sqlOutput.WriteString("\n") + } + + // Add blank line between steps except for the last one in the last group + if stepIdx < len(group.Steps)-1 || groupIdx < len(sp.Groups)-1 { + sqlOutput.WriteString("\n") + } + } + } + + return sqlOutput.String() +} + +// ========== PRIVATE METHODS ========== + +// calculateSummaryFromSteps calculates summary statistics from the plan diffs +func (sp *SchemaPlan) calculateSummaryFromSteps() PlanSummary { + summary := PlanSummary{ + ByType: make(map[string]TypeSummary), + } + + // For tables, we need to group by table path to avoid counting duplicates + // For other object types, count each operation individually + + // Track table operations by table path + tableOperations := make(map[string]string) // table_path -> operation + + // Track tables that have sub-resource changes (these should be counted as modified) + tablesWithSubResources := make(map[string]bool) // table_path -> true + + // Track view operations by view path (regular views only) + viewOperations := make(map[string]string) // view_path -> operation + + // Track views that have sub-resource changes (these should be counted as modified) + viewsWithSubResources := make(map[string]bool) // view_path -> true + + // Track materialized view operations by path + materializedViewOperations := make(map[string]string) // materialized_view_path -> operation + + // Track materialized views that have sub-resource changes + materializedViewsWithSubResources := make(map[string]bool) // materialized_view_path -> true + + // Track materialized views that have "recreate" operations (DROP that will be followed by CREATE) + // These should be counted as modifications, not adds + materializedViewsRecreating := make(map[string]bool) // materialized_view_path -> true + + // Track non-table/non-view/non-materialized-view operations + nonTableOperations := make(map[string][]string) // objType -> []operations + + // Use source diffs for summary calculation if available, + // otherwise use steps metadata (for plans loaded from JSON) + var dataToProcess []struct { + Type string + Operation string + Path string + } + + if len(sp.SourceDiffs) > 0 { + // Use SourceDiffs (for freshly generated plans) + for _, diff := range sp.SourceDiffs { + dataToProcess = append(dataToProcess, struct { + Type string + Operation string + Path string + }{ + Type: diff.Type.String(), + Operation: diff.Operation.String(), + Path: diff.Path, + }) + } + } else { + // Use Steps metadata (for plans loaded from JSON) + for _, group := range sp.Groups { + for _, step := range group.Steps { + if step.Type != "" && step.Operation != "" && step.Path != "" { + dataToProcess = append(dataToProcess, struct { + Type string + Operation string + Path string + }{ + Type: step.Type, + Operation: step.Operation, + Path: step.Path, + }) + } + } + } + } + + // Single-pass: process all steps, determining parent type from step.Type prefix + // Sub-resource types encode their parent: "table.index", "view.index", "materialized_view.index" + for _, step := range dataToProcess { + // Normalize object type to match the expected format (add 's' for plural) + stepObjTypeStr := step.Type + if !strings.HasSuffix(stepObjTypeStr, "s") { + stepObjTypeStr += "s" + } + + if stepObjTypeStr == "tables" { + // For tables, track unique table paths and their primary operation + tableOperations[step.Path] = step.Operation + } else if stepObjTypeStr == "views" { + // For views, track unique view paths and their primary operation + viewOperations[step.Path] = step.Operation + } else if stepObjTypeStr == "materialized_views" { + // For materialized views, track unique paths and their primary operation + // If this is a "recreate" operation, mark it so subsequent "create" is treated as modify + if step.Operation == "recreate" { + materializedViewsRecreating[step.Path] = true + } + materializedViewOperations[step.Path] = step.Operation + } else if isSubResource(step.Type) { + // For sub-resources, determine parent type from step.Type prefix + // Types are: "table.index", "table.column", "view.comment", "materialized_view.index", etc. + parentPath := extractTablePathFromSubResource(step.Path, step.Type) + if parentPath != "" { + if strings.HasPrefix(step.Type, "materialized_view.") { + // Parent is a materialized view + materializedViewsWithSubResources[parentPath] = true + } else if strings.HasPrefix(step.Type, "view.") { + // Parent is a view + viewsWithSubResources[parentPath] = true + } else { + // Parent is a table (table.index, table.column, table.constraint, etc.) + tablesWithSubResources[parentPath] = true + } + } + } else { + // For non-table/non-view objects, track each operation + nonTableOperations[stepObjTypeStr] = append(nonTableOperations[stepObjTypeStr], step.Operation) + } + } + + // Count table operations (one per unique table) + // Include both direct table operations and tables with sub-resource changes + allAffectedTables := make(map[string]string) + + // First, add direct table operations + for tablePath, operation := range tableOperations { + allAffectedTables[tablePath] = operation + } + + // Then, add tables that only have sub-resource changes (count as "alter") + for tablePath := range tablesWithSubResources { + if _, alreadyCounted := allAffectedTables[tablePath]; !alreadyCounted { + allAffectedTables[tablePath] = "alter" // Sub-resource changes = table modification + } + } + + if len(allAffectedTables) > 0 { + stats := summary.ByType["tables"] + for _, operation := range allAffectedTables { + switch operation { + case "create": + stats.Add++ + summary.Add++ + case "alter": + stats.Change++ + summary.Change++ + case "drop": + stats.Destroy++ + summary.Destroy++ + } + } + summary.ByType["tables"] = stats + } + + // Count view operations (one per unique view) + // Include both direct view operations and views with sub-resource changes + allAffectedViews := make(map[string]string) + + // First, add direct view operations + for viewPath, operation := range viewOperations { + allAffectedViews[viewPath] = operation + } + + // Then, add views that only have sub-resource changes (count as "alter") + for viewPath := range viewsWithSubResources { + if _, alreadyCounted := allAffectedViews[viewPath]; !alreadyCounted { + allAffectedViews[viewPath] = "alter" // Sub-resource changes = view modification + } + } + + if len(allAffectedViews) > 0 { + stats := summary.ByType["views"] + for _, operation := range allAffectedViews { + switch operation { + case "create": + stats.Add++ + summary.Add++ + case "alter": + stats.Change++ + summary.Change++ + case "drop": + stats.Destroy++ + summary.Destroy++ + } + } + summary.ByType["views"] = stats + } + + // Count materialized view operations (one per unique materialized view) + // Include both direct materialized view operations and materialized views with sub-resource changes + allAffectedMaterializedViews := make(map[string]string) + + // First, add direct materialized view operations + for mvPath, operation := range materializedViewOperations { + allAffectedMaterializedViews[mvPath] = operation + } + + // Then, add materialized views that only have sub-resource changes (count as "alter") + for mvPath := range materializedViewsWithSubResources { + if _, alreadyCounted := allAffectedMaterializedViews[mvPath]; !alreadyCounted { + allAffectedMaterializedViews[mvPath] = "alter" // Sub-resource changes = materialized view modification + } + } + + if len(allAffectedMaterializedViews) > 0 { + stats := summary.ByType["materialized views"] + for mvPath, operation := range allAffectedMaterializedViews { + // If this path had a "recreate" operation, treat any subsequent "create" as a modify + // because the object existed before and is being recreated due to dependencies + if materializedViewsRecreating[mvPath] && operation == "create" { + operation = "alter" + } + switch operation { + case "create": + stats.Add++ + summary.Add++ + case "alter", "recreate": + // Both "alter" and "recreate" count as modifications + stats.Change++ + summary.Change++ + case "drop": + stats.Destroy++ + summary.Destroy++ + } + } + summary.ByType["materialized views"] = stats + } + + // Count non-table/non-view/non-materialized-view operations (each operation counted individually) + for objType, operations := range nonTableOperations { + // Normalize object type to match the Type constants (replace underscores with spaces) + normalizedObjType := strings.ReplaceAll(objType, "_", " ") + stats := summary.ByType[normalizedObjType] + for _, operation := range operations { + switch operation { + case "create": + stats.Add++ + summary.Add++ + case "alter": + stats.Change++ + summary.Change++ + case "drop": + stats.Destroy++ + summary.Destroy++ + } + } + summary.ByType[normalizedObjType] = stats + } + + summary.Total = summary.Add + summary.Change + summary.Destroy + return summary +} + +// writeDetailedChangesFromSteps writes detailed changes from plan diffs +func (sp *SchemaPlan) writeDetailedChangesFromSteps(summary *strings.Builder, displayName, objType string, c *color.Color) { + fmt.Fprintf(summary, "%s:\n", c.Bold(displayName)) + + if objType == "tables" { + // For tables, group all changes by table path to avoid duplicates + sp.writeTableChanges(summary, c) + } else if objType == "views" { + // For views, group all changes by view path to avoid duplicates + sp.writeViewChanges(summary, c) + } else if objType == "materialized views" { + // For materialized views, group all changes by path to avoid duplicates + sp.writeMaterializedViewChanges(summary, c) + } else { + // For non-table/non-view objects, use the original logic + sp.writeNonTableChanges(summary, objType, c) + } + + summary.WriteString("\n") +} + +// writeTableChanges handles table-specific output with proper grouping +func (sp *SchemaPlan) writeTableChanges(summary *strings.Builder, c *color.Color) { + // Group all changes by table path and track operations + tableOperations := make(map[string]string) // table_path -> operation + subResources := make(map[string][]struct { + operation string + path string + subType string + source diff.DiffSource + }) + + // Track all seen operations globally to avoid duplicates across groups + seenOperations := make(map[string]bool) // "path.operation.subType" -> true + + // Use source diffs for summary calculation + for _, step := range sp.SourceDiffs { + // Normalize object type + stepObjTypeStr := step.Type.String() + if !strings.HasSuffix(stepObjTypeStr, "s") { + stepObjTypeStr += "s" + } + + if stepObjTypeStr == "tables" { + // This is a table-level change, record the operation + tableOperations[step.Path] = step.Operation.String() + } else if isSubResource(step.Type.String()) && strings.HasPrefix(step.Type.String(), "table.") { + // This is a table sub-resource change (skip view sub-resources) + tablePath := extractTablePathFromSubResource(step.Path, step.Type.String()) + if tablePath != "" { + // Deduplicate all operations based on (type, operation, path) triplet + operationKey := step.Path + "." + step.Operation.String() + "." + step.Type.String() + if !seenOperations[operationKey] { + seenOperations[operationKey] = true + subResources[tablePath] = append(subResources[tablePath], struct { + operation string + path string + subType string + source diff.DiffSource + }{ + operation: step.Operation.String(), + path: step.Path, + subType: step.Type.String(), + source: step.Source, + }) + } + } + } + } + + // Get all unique table paths (from both direct table changes and sub-resources) + allTables := make(map[string]bool) + for tablePath := range tableOperations { + allTables[tablePath] = true + } + for tablePath := range subResources { + allTables[tablePath] = true + } + + // Sort table paths for consistent output + var sortedTables []string + for tablePath := range allTables { + sortedTables = append(sortedTables, tablePath) + } + sort.Strings(sortedTables) + + // Display each table once with all its changes + for _, tablePath := range sortedTables { + var symbol string + if operation, hasDirectChange := tableOperations[tablePath]; hasDirectChange { + // Table has direct changes, use the operation to determine symbol + switch operation { + case "create": + symbol = c.PlanSymbol("add") + case "alter": + symbol = c.PlanSymbol("change") + case "drop": + symbol = c.PlanSymbol("destroy") + default: + symbol = c.PlanSymbol("change") + } + } else { + // Table has no direct changes, only sub-resource changes + // Sub-resource changes to existing tables should always be considered modifications + symbol = c.PlanSymbol("change") + } + + fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(tablePath)) + + // Show sub-resources for this table + if subResourceList, exists := subResources[tablePath]; exists { + // Sort sub-resources by type then path + sort.Slice(subResourceList, func(i, j int) bool { + if subResourceList[i].subType != subResourceList[j].subType { + return subResourceList[i].subType < subResourceList[j].subType + } + return subResourceList[i].path < subResourceList[j].path + }) + + for _, subRes := range subResourceList { + // Extract object name from source + objectName := getObjectNameFromSource(subRes.source) + + // Handle online index replacement display + if subRes.subType == diff.DiffTypeTableIndex.String() && subRes.operation == diff.DiffOperationAlter.String() { + subSymbol := c.PlanSymbol("change") + displaySubType := strings.TrimPrefix(subRes.subType, "table.") + fmt.Fprintf(summary, " %s %s (%s - concurrent rebuild)\n", subSymbol, objectName, displaySubType) + continue + } + + var subSymbol string + switch subRes.operation { + case "create": + subSymbol = c.PlanSymbol("add") + case "alter": + subSymbol = c.PlanSymbol("change") + case "drop": + subSymbol = c.PlanSymbol("destroy") + default: + subSymbol = c.PlanSymbol("change") + } + // Clean up sub-resource type for display (remove "table." prefix) + displaySubType := strings.TrimPrefix(subRes.subType, "table.") + fmt.Fprintf(summary, " %s %s (%s)\n", subSymbol, objectName, displaySubType) + } + } + } +} + +// writeViewChanges handles view-specific output with proper grouping +func (sp *SchemaPlan) writeViewChanges(summary *strings.Builder, c *color.Color) { + // Group all changes by view path and track operations + viewOperations := make(map[string]string) // view_path -> operation + subResources := make(map[string][]struct { + operation string + path string + subType string + }) + + // Track all seen operations globally to avoid duplicates across groups + seenOperations := make(map[string]bool) // "path.operation.subType" -> true + + // Use source diffs for summary calculation + for _, step := range sp.SourceDiffs { + // Normalize object type + stepObjTypeStr := step.Type.String() + if !strings.HasSuffix(stepObjTypeStr, "s") { + stepObjTypeStr += "s" + } + + if stepObjTypeStr == "views" { + // This is a view-level change, record the operation + viewOperations[step.Path] = step.Operation.String() + } else if isSubResource(step.Type.String()) && strings.HasPrefix(step.Type.String(), "view.") { + // This is a view sub-resource change + viewPath := extractTablePathFromSubResource(step.Path, step.Type.String()) + if viewPath != "" { + // Deduplicate all operations based on (type, operation, path) triplet + operationKey := step.Path + "." + step.Operation.String() + "." + step.Type.String() + if !seenOperations[operationKey] { + seenOperations[operationKey] = true + subResources[viewPath] = append(subResources[viewPath], struct { + operation string + path string + subType string + }{ + operation: step.Operation.String(), + path: step.Path, + subType: step.Type.String(), + }) + } + } + } + } + + // Get all unique view paths (from both direct view changes and sub-resources) + allViews := make(map[string]bool) + for viewPath := range viewOperations { + allViews[viewPath] = true + } + for viewPath := range subResources { + allViews[viewPath] = true + } + + // Sort view paths for consistent output + var sortedViews []string + for viewPath := range allViews { + sortedViews = append(sortedViews, viewPath) + } + sort.Strings(sortedViews) + + // Display each view once with all its changes + for _, viewPath := range sortedViews { + var symbol string + if operation, hasDirectChange := viewOperations[viewPath]; hasDirectChange { + // View has direct changes, use the operation to determine symbol + switch operation { + case "create": + symbol = c.PlanSymbol("add") + case "alter": + symbol = c.PlanSymbol("change") + case "drop": + symbol = c.PlanSymbol("destroy") + default: + symbol = c.PlanSymbol("change") + } + } else { + // View has no direct changes, only sub-resource changes + // Sub-resource changes to existing views should always be considered modifications + symbol = c.PlanSymbol("change") + } + + fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(viewPath)) + + // Show sub-resources for this view + if subResourceList, exists := subResources[viewPath]; exists { + // Sort sub-resources by type then path + sort.Slice(subResourceList, func(i, j int) bool { + if subResourceList[i].subType != subResourceList[j].subType { + return subResourceList[i].subType < subResourceList[j].subType + } + return subResourceList[i].path < subResourceList[j].path + }) + + for _, subRes := range subResourceList { + var subSymbol string + switch subRes.operation { + case "create": + subSymbol = c.PlanSymbol("add") + case "alter": + subSymbol = c.PlanSymbol("change") + case "drop": + subSymbol = c.PlanSymbol("destroy") + default: + subSymbol = c.PlanSymbol("change") + } + // Clean up sub-resource type for display (remove "view." prefix) + displaySubType := strings.TrimPrefix(subRes.subType, "view.") + fmt.Fprintf(summary, " %s %s (%s)\n", subSymbol, getLastPathComponent(subRes.path), displaySubType) + } + } + } +} + +// writeMaterializedViewChanges handles materialized view-specific output with proper grouping +func (sp *SchemaPlan) writeMaterializedViewChanges(summary *strings.Builder, c *color.Color) { + // Group all changes by materialized view path and track operations + mvOperations := make(map[string]string) // mv_path -> operation + subResources := make(map[string][]struct { + operation string + path string + subType string + }) + + // Track all seen operations globally to avoid duplicates across groups + seenOperations := make(map[string]bool) // "path.operation.subType" -> true + + // Track materialized views that have "recreate" operations + mvsRecreating := make(map[string]bool) + + // Use source diffs for summary calculation + for _, step := range sp.SourceDiffs { + // Normalize object type + stepObjTypeStr := step.Type.String() + if !strings.HasSuffix(stepObjTypeStr, "s") { + stepObjTypeStr += "s" + } + + if stepObjTypeStr == "materialized_views" { + // Track recreate operations so subsequent create is treated as modify + if step.Operation.String() == "recreate" { + mvsRecreating[step.Path] = true + } + // This is a materialized view-level change, record the operation + mvOperations[step.Path] = step.Operation.String() + } else if isSubResource(step.Type.String()) && strings.HasPrefix(step.Type.String(), "materialized_view.") { + // This is a materialized view sub-resource change + mvPath := extractTablePathFromSubResource(step.Path, step.Type.String()) + if mvPath != "" { + // Deduplicate all operations based on (type, operation, path) triplet + operationKey := step.Path + "." + step.Operation.String() + "." + step.Type.String() + if !seenOperations[operationKey] { + seenOperations[operationKey] = true + subResources[mvPath] = append(subResources[mvPath], struct { + operation string + path string + subType string + }{ + operation: step.Operation.String(), + path: step.Path, + subType: step.Type.String(), + }) + } + } + } + } + + // Get all unique materialized view paths (from both direct changes and sub-resources) + allMVs := make(map[string]bool) + for mvPath := range mvOperations { + allMVs[mvPath] = true + } + for mvPath := range subResources { + allMVs[mvPath] = true + } + + // Sort materialized view paths for consistent output + var sortedMVs []string + for mvPath := range allMVs { + sortedMVs = append(sortedMVs, mvPath) + } + sort.Strings(sortedMVs) + + // Display each materialized view once with all its changes + for _, mvPath := range sortedMVs { + var symbol string + if operation, hasDirectChange := mvOperations[mvPath]; hasDirectChange { + // If this path had a "recreate" and now shows "create", treat as modify + if mvsRecreating[mvPath] && operation == "create" { + operation = "alter" + } + // Materialized view has direct changes, use the operation to determine symbol + switch operation { + case "create": + symbol = c.PlanSymbol("add") + case "alter", "recreate": + // Both "alter" and "recreate" are modifications + symbol = c.PlanSymbol("change") + case "drop": + symbol = c.PlanSymbol("destroy") + default: + symbol = c.PlanSymbol("change") + } + } else { + // Materialized view has no direct changes, only sub-resource changes + // Sub-resource changes to existing materialized views should always be considered modifications + symbol = c.PlanSymbol("change") + } + + fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(mvPath)) + + // Show sub-resources for this materialized view + if subResourceList, exists := subResources[mvPath]; exists { + // Sort sub-resources by type then path + sort.Slice(subResourceList, func(i, j int) bool { + if subResourceList[i].subType != subResourceList[j].subType { + return subResourceList[i].subType < subResourceList[j].subType + } + return subResourceList[i].path < subResourceList[j].path + }) + + for _, subRes := range subResourceList { + // Handle online index replacement display + if subRes.subType == diff.DiffTypeMaterializedViewIndex.String() && subRes.operation == diff.DiffOperationAlter.String() { + subSymbol := c.PlanSymbol("change") + displaySubType := strings.TrimPrefix(subRes.subType, "materialized_view.") + fmt.Fprintf(summary, " %s %s (%s - concurrent rebuild)\n", subSymbol, getLastPathComponent(subRes.path), displaySubType) + continue + } + + var subSymbol string + switch subRes.operation { + case "create": + subSymbol = c.PlanSymbol("add") + case "alter": + subSymbol = c.PlanSymbol("change") + case "drop": + subSymbol = c.PlanSymbol("destroy") + default: + subSymbol = c.PlanSymbol("change") + } + // Clean up sub-resource type for display (remove "materialized_view." prefix) + displaySubType := strings.TrimPrefix(subRes.subType, "materialized_view.") + fmt.Fprintf(summary, " %s %s (%s)\n", subSymbol, getLastPathComponent(subRes.path), displaySubType) + } + } + } +} + +// writeNonTableChanges handles non-table objects with the original logic +func (sp *SchemaPlan) writeNonTableChanges(summary *strings.Builder, objType string, c *color.Color) { + // Collect changes for this object type + var changes []struct { + operation string + path string + } + + // Use source diffs for summary calculation + for _, step := range sp.SourceDiffs { + // Normalize object type + stepObjTypeStr := step.Type.String() + if !strings.HasSuffix(stepObjTypeStr, "s") { + stepObjTypeStr += "s" + } + // Normalize underscores to spaces to match Type constants + stepObjTypeStr = strings.ReplaceAll(stepObjTypeStr, "_", " ") + + if stepObjTypeStr == objType { + changes = append(changes, struct { + operation string + path string + }{ + operation: step.Operation.String(), + path: step.Path, + }) + } + } + + // Sort changes by path for consistent output + sort.Slice(changes, func(i, j int) bool { + return changes[i].path < changes[j].path + }) + + // Write changes with appropriate symbols + for _, change := range changes { + var symbol string + switch change.operation { + case "create": + symbol = c.PlanSymbol("add") + case "alter": + symbol = c.PlanSymbol("change") + case "drop": + symbol = c.PlanSymbol("destroy") + default: + symbol = c.PlanSymbol("change") + } + + fmt.Fprintf(summary, " %s %s\n", symbol, getLastPathComponent(change.path)) + } +} + +// isSubResource checks if the given type is a sub-resource of tables, views, or materialized views +func isSubResource(objType string) bool { + return (strings.HasPrefix(objType, "table.") && objType != "table") || + (strings.HasPrefix(objType, "view.") && objType != "view") || + (strings.HasPrefix(objType, "materialized_view.") && objType != "materialized_view") +} + +// getLastPathComponent extracts the last component from a dot-separated path +func getLastPathComponent(path string) string { + parts := strings.Split(path, ".") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} + +// getObjectNameFromSource extracts the object name from the source object. +// This preserves object names that contain dots (e.g., "public.idx_users") +func getObjectNameFromSource(source diff.DiffSource) string { + if source == nil { + return "" + } + return source.GetObjectName() +} + +// extractTablePathFromSubResource extracts the parent table, view, or materialized view path from a sub-resource path +func extractTablePathFromSubResource(subResourcePath, subResourceType string) string { + if strings.HasPrefix(subResourceType, "table.") { + // For sub-resources, the path format depends on the sub-resource type: + // - "schema.table.resource_name" -> "schema.table" (indexes, policies, columns) + // - "schema.table" -> "schema.table" (RLS, table comments) + parts := strings.Split(subResourcePath, ".") + + // Special handling for RLS and table-level changes + if subResourceType == "table.rls" || subResourceType == "table.comment" { + // For RLS and table comments, the path is already the table path + return subResourcePath + } + + if len(parts) >= 2 { + // For other sub-resources, return the first two parts as table path + if len(parts) >= 3 { + return parts[0] + "." + parts[1] + } + // If only 2 parts, it's likely "schema.table" already + return subResourcePath + } + } else if strings.HasPrefix(subResourceType, "view.") { + // For view sub-resources, the path format is similar: + // - "schema.view.resource_name" -> "schema.view" (indexes, comments) + // - "schema.view" -> "schema.view" (view-level comments) + parts := strings.Split(subResourcePath, ".") + + // Special handling for view-level changes + if subResourceType == "view.comment" { + // For view comments, the path is already the view path + return subResourcePath + } + + if len(parts) >= 2 { + // For other sub-resources, return the first two parts as view path + if len(parts) >= 3 { + return parts[0] + "." + parts[1] + } + // If only 2 parts, it's likely "schema.view" already + return subResourcePath + } + } else if strings.HasPrefix(subResourceType, "materialized_view.") { + // For materialized view sub-resources, the path format is similar: + // - "schema.mv.resource_name" -> "schema.mv" (indexes, comments) + // - "schema.mv" -> "schema.mv" (materialized view-level comments) + parts := strings.Split(subResourcePath, ".") + + // Special handling for materialized view-level changes + if subResourceType == "materialized_view.comment" { + // For materialized view comments, the path is already the materialized view path + return subResourcePath + } + + if len(parts) >= 2 { + // For other sub-resources, return the first two parts as materialized view path + if len(parts) >= 3 { + return parts[0] + "." + parts[1] + } + // If only 2 parts, it's likely "schema.materialized_view" already + return subResourcePath + } + } + return "" +} diff --git a/internal/plan/schema_plan_test.go b/internal/plan/schema_plan_test.go new file mode 100644 index 00000000..86fed53c --- /dev/null +++ b/internal/plan/schema_plan_test.go @@ -0,0 +1,344 @@ +package plan + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/pgplex/pgschema/internal/diff" + "github.com/pgplex/pgschema/internal/postgres" + "github.com/pgplex/pgschema/ir" + "github.com/pgplex/pgschema/testutil" +) + +// sharedTestPostgres is the shared embedded postgres instance for all tests in this package +var sharedTestPostgres *postgres.EmbeddedPostgres + +// TestMain sets up shared resources for all tests in this package +func TestMain(m *testing.M) { + // Create shared embedded postgres for all tests to dramatically improve performance + sharedTestPostgres = testutil.SetupPostgres(nil) + defer sharedTestPostgres.Stop() + + m.Run() +} + +// discoverTestDataVersions discovers available test data versions in the testdata directory +func discoverTestDataVersions(testdataDir string) ([]string, error) { + entries, err := os.ReadDir(testdataDir) + if err != nil { + return nil, fmt.Errorf("failed to read testdata directory: %w", err) + } + var versions []string + for _, entry := range entries { + if entry.IsDir() { + // Check if the directory contains a plan.json file + planFile := filepath.Join(testdataDir, entry.Name(), "plan.json") + if _, err := os.Stat(planFile); err == nil { + versions = append(versions, entry.Name()) + } + } + } + // Sort versions to ensure deterministic test execution order + sort.Strings(versions) + return versions, nil +} + +// parseSQL is a helper function to convert SQL string to IR for tests +// Uses embedded PostgreSQL to ensure tests use the same code path as production +func parseSQL(t *testing.T, sql string) *ir.IR { + t.Helper() + return testutil.ParseSQLToIR(t, sharedTestPostgres, sql, "public") +} + +func TestSchemaPlanSummary(t *testing.T) { + oldSQL := `CREATE TABLE users ( + id integer NOT NULL + );` + + newSQL := `CREATE TABLE users ( + id integer NOT NULL, + name text NOT NULL + ); + CREATE TABLE posts ( + id integer NOT NULL, + title text NOT NULL + );` + + oldIR := parseSQL(t, oldSQL) + newIR := parseSQL(t, newSQL) + diffs := diff.GenerateMigration(oldIR, newIR, "public") + + sp := NewSchemaPlan(diffs) + summary := sp.HumanColored(false) + + // Debug: print the summary to see what it looks like + t.Logf("Summary output:\n%s", summary) + + if !strings.Contains(summary, "1 to add") { + t.Error("Summary should mention 1 resource to add") + } + + if !strings.Contains(summary, "1 to modify") { + t.Error("Summary should mention 1 resource to modify") + } + + // The colored output doesn't show "0 to drop" when there are no drops + if strings.Contains(summary, "to drop") && !strings.Contains(summary, "1 to add, 1 to modify") { + t.Error("Summary should not mention drops when there are none") + } +} + +func TestPlanJSONRoundTrip(t *testing.T) { + testDataDir := "../../testdata/diff/migrate" + + // Discover available test data versions dynamically + versions, err := discoverTestDataVersions(testDataDir) + if err != nil { + t.Fatalf("Failed to discover test data versions: %v", err) + } + + if len(versions) == 0 { + t.Skip("No test data versions found") + } + + for _, version := range versions { + t.Run(fmt.Sprintf("version_%s", version), func(t *testing.T) { + planFilePath := filepath.Join(testDataDir, version, "plan.json") + + // Read the original plan.json file + originalJSON, err := os.ReadFile(planFilePath) + if err != nil { + t.Fatalf("Failed to read %s: %v", planFilePath, err) + } + + // First FromJSON: Load plan from JSON + plan1, err := FromJSON(originalJSON) + if err != nil { + t.Fatalf("Failed to parse JSON from %s: %v", planFilePath, err) + } + + // Check if original JSON has source fields to determine debug mode + hasSourceFields := strings.Contains(string(originalJSON), `"source":`) + + // First ToJSON: Convert plan back to JSON with same debug mode as original + json1, err := plan1.ToJSONWithDebug(hasSourceFields) + if err != nil { + t.Fatalf("Failed to convert plan to JSON (first): %v", err) + } + + // Compare original JSON with first round-trip JSON + // Parse both JSON strings into maps to compare structure + var originalMap, roundTripMap map[string]interface{} + if err := json.Unmarshal(originalJSON, &originalMap); err != nil { + t.Fatalf("Failed to unmarshal original JSON: %v", err) + } + if err := json.Unmarshal([]byte(json1), &roundTripMap); err != nil { + t.Fatalf("Failed to unmarshal round-trip JSON: %v", err) + } + + // Use go-cmp to show detailed differences + if diff := cmp.Diff(originalMap, roundTripMap); diff != "" { + t.Errorf("JSON round-trip failed for %s: mismatch (-original +roundtrip):\n%s", version, diff) + } + + // Second round-trip: FromJSON -> ToJSON again + // This should produce identical string output + plan2, err := FromJSON([]byte(json1)) + if err != nil { + t.Fatalf("Failed to parse JSON from round-trip: %v", err) + } + + json2, err := plan2.ToJSONWithDebug(hasSourceFields) + if err != nil { + t.Fatalf("Failed to convert plan to JSON (second): %v", err) + } + + // After first round-trip, subsequent round-trips should produce identical strings + if json1 != json2 { + t.Errorf("JSON not stable after first round-trip for %s", version) + t.Logf("First round-trip length: %d", len(json1)) + t.Logf("Second round-trip length: %d", len(json2)) + + // Show structural differences if any + var map1, map2 map[string]interface{} + json.Unmarshal([]byte(json1), &map1) + json.Unmarshal([]byte(json2), &map2) + if diff := cmp.Diff(map1, map2); diff != "" { + t.Errorf("Structural difference in second round-trip (-first +second):\n%s", diff) + } + } + }) + } +} + +func TestSchemaPlanNoChanges(t *testing.T) { + sql := `CREATE TABLE users ( + id integer NOT NULL + );` + + oldIR := parseSQL(t, sql) + newIR := parseSQL(t, sql) + diffs := diff.GenerateMigration(oldIR, newIR, "public") + + sp := NewSchemaPlan(diffs) + summary := strings.TrimSpace(sp.HumanColored(false)) + + if summary != "No changes detected." { + t.Errorf("expected %q, got %q", "No changes detected.", summary) + } +} + +func TestPlanJSONLoadedSummary(t *testing.T) { + // Test that plans loaded from JSON can generate summaries using Steps metadata + t.Setenv("PGSCHEMA_TEST_TIME", "2025-01-01T00:00:00Z") + + // Create a plan with steps that have metadata + p := NewPlan() + p.AddSchema("public", &SchemaPlan{ + Groups: []ExecutionGroup{ + { + Steps: []Step{ + { + SQL: "CREATE TABLE users (id serial primary key);", + Type: "table", + Operation: "create", + Path: "public.users", + }, + { + SQL: "ALTER TABLE posts ADD COLUMN title text;", + Type: "table.column", + Operation: "create", + Path: "public.posts.title", + }, + }, + }, + }, + }) + + // Serialize to JSON (without SourceDiffs) + jsonData, err := p.ToJSON() + if err != nil { + t.Fatalf("Failed to serialize plan to JSON: %v", err) + } + + // Load plan from JSON + loadedPlan, err := FromJSON([]byte(jsonData)) + if err != nil { + t.Fatalf("Failed to load plan from JSON: %v", err) + } + + // Verify SourceDiffs is empty (as expected for JSON-loaded plans) + loadedSP := loadedPlan.Schemas["public"] + if len(loadedSP.SourceDiffs) != 0 { + t.Errorf("Expected empty SourceDiffs, got %d", len(loadedSP.SourceDiffs)) + } + + // Generate summary - this should work using Steps metadata + summary := loadedSP.HumanColored(false) + + // Verify summary contains expected information + if !strings.Contains(summary, "1 to add") { + t.Error("Summary should mention 1 resource to add") + } + + if !strings.Contains(summary, "Tables:") { + t.Error("Summary should contain Tables section") + } + + if !strings.Contains(summary, "users") { + t.Error("Summary should mention users table") + } + + if strings.Contains(summary, "No changes detected") { + t.Error("Summary should not say \"No changes detected\" when there are changes") + } +} + +func TestPlanDebugJSONRoundTrip(t *testing.T) { + // Issue #305: Plans generated with --debug produce JSON that cannot be + // deserialized by FromJSON() because the Diff.Source field is a Go interface + // (DiffSource) that json.Unmarshal cannot reconstruct. + t.Setenv("PGSCHEMA_TEST_TIME", "2025-01-01T00:00:00Z") + + oldSQL := `CREATE TABLE users ( + id integer NOT NULL + );` + + newSQL := `CREATE TABLE users ( + id integer NOT NULL, + name text NOT NULL + ); + CREATE TABLE posts ( + id integer NOT NULL, + title text NOT NULL + );` + + oldIR := parseSQL(t, oldSQL) + newIR := parseSQL(t, newSQL) + diffs := diff.GenerateMigration(oldIR, newIR, "public") + + sp := NewSchemaPlan(diffs) + p := NewPlan() + p.AddSchema("public", sp) + + // Serialize with debug mode (includes SourceDiffs; Diff.Source is excluded via json:"-") + debugJSON, err := p.ToJSONWithDebug(true) + if err != nil { + t.Fatalf("Failed to serialize plan with debug: %v", err) + } + + // Deserialize - this should succeed + loaded, err := FromJSON([]byte(debugJSON)) + if err != nil { + t.Fatalf("Failed to deserialize debug plan JSON: %v", err) + } + + // Verify debug mode actually included SourceDiffs + loadedSP := loaded.Schemas["public"] + if len(loadedSP.SourceDiffs) == 0 { + t.Error("Debug plan should include SourceDiffs") + } + + // Verify the loaded plan has valid groups and steps + if len(loadedSP.Groups) == 0 { + t.Error("Loaded plan should have at least one execution group") + } + + // Re-serialize without debug and verify round-trip stability + normalJSON, err := loaded.ToJSON() + if err != nil { + t.Fatalf("Failed to re-serialize loaded plan: %v", err) + } + + loaded2, err := FromJSON([]byte(normalJSON)) + if err != nil { + t.Fatalf("Failed to deserialize re-serialized plan: %v", err) + } + + loadedSP2 := loaded2.Schemas["public"] + if len(loadedSP2.Groups) != len(loadedSP.Groups) { + t.Errorf("Group count mismatch: got %d, want %d", len(loadedSP2.Groups), len(loadedSP.Groups)) + } +} + +// TestPlanSingleSchemaOmitsHeader verifies that single-schema plans +// render without a "Schema: ..." header line. +func TestPlanSingleSchemaOmitsHeader(t *testing.T) { + p := NewPlan() + p.AddSchema("public", NewSchemaPlan(nil)) + + output := p.HumanColored(false) + if strings.Contains(output, "Schema:") { + t.Error("single-schema plan should not contain schema header") + } +} + +// ignore unused import warning for time +var _ = time.Now diff --git a/testdata/diff/comment/add_column_comments/plan.json b/testdata/diff/comment/add_column_comments/plan.json index 75a28429..cbe9951d 100644 --- a/testdata/diff/comment/add_column_comments/plan.json +++ b/testdata/diff/comment/add_column_comments/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "1351e4ca7db945af39a49da2b23273d5b33a8b1b9bd3b6a45f3cb4cf2cfce1a2" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "1351e4ca7db945af39a49da2b23273d5b33a8b1b9bd3b6a45f3cb4cf2cfce1a2" + }, + "groups": [ { - "sql": "COMMENT ON COLUMN products.id IS 'Unique product identifier';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.products.id" - }, - { - "sql": "COMMENT ON COLUMN products.name IS 'Product display name';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.products.name" - }, - { - "sql": "COMMENT ON COLUMN products.price IS 'Product price in USD';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.products.price" - }, - { - "sql": "COMMENT ON COLUMN products.created_at IS 'Timestamp when product was added';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.products.created_at" + "steps": [ + { + "sql": "COMMENT ON COLUMN products.id IS 'Unique product identifier';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.products.id" + }, + { + "sql": "COMMENT ON COLUMN products.name IS 'Product display name';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.products.name" + }, + { + "sql": "COMMENT ON COLUMN products.price IS 'Product price in USD';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.products.price" + }, + { + "sql": "COMMENT ON COLUMN products.created_at IS 'Timestamp when product was added';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.products.created_at" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/add_function_comment/plan.json b/testdata/diff/comment/add_function_comment/plan.json index a12030d2..05c2fdc1 100644 --- a/testdata/diff/comment/add_function_comment/plan.json +++ b/testdata/diff/comment/add_function_comment/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "b6bab766c934e98996959773eed9f4a536ad858a300beac3fc74b5edc0359228" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "b6bab766c934e98996959773eed9f4a536ad858a300beac3fc74b5edc0359228" + }, + "groups": [ { - "sql": "COMMENT ON FUNCTION calculate_total(numeric, integer) IS 'Calculates total price from unit price and quantity';", - "type": "function", - "operation": "alter", - "path": "public.calculate_total" + "steps": [ + { + "sql": "COMMENT ON FUNCTION calculate_total(numeric, integer) IS 'Calculates total price from unit price and quantity';", + "type": "function", + "operation": "alter", + "path": "public.calculate_total" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/add_index_comment/plan.json b/testdata/diff/comment/add_index_comment/plan.json index 8a28ef83..6f2b19d8 100644 --- a/testdata/diff/comment/add_index_comment/plan.json +++ b/testdata/diff/comment/add_index_comment/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "a859ebafe82f0638592346ffb79d2bb11c1f0748d86308a87ff66c51abb68592" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "a859ebafe82f0638592346ffb79d2bb11c1f0748d86308a87ff66c51abb68592" + }, + "groups": [ { - "sql": "COMMENT ON INDEX idx_users_created_at IS 'Index for chronological user queries';", - "type": "table.index.comment", - "operation": "alter", - "path": "public.users.idx_users_created_at" - }, - { - "sql": "COMMENT ON INDEX idx_users_email IS 'Index for fast user lookup by email';", - "type": "table.index.comment", - "operation": "alter", - "path": "public.users.idx_users_email" - }, - { - "sql": "COMMENT ON INDEX idx_users_summary_email IS 'Index for email search on summary';", - "type": "materialized_view.index.comment", - "operation": "alter", - "path": "public.users_summary.idx_users_summary_email" + "steps": [ + { + "sql": "COMMENT ON INDEX idx_users_created_at IS 'Index for chronological user queries';", + "type": "table.index.comment", + "operation": "alter", + "path": "public.users.idx_users_created_at" + }, + { + "sql": "COMMENT ON INDEX idx_users_email IS 'Index for fast user lookup by email';", + "type": "table.index.comment", + "operation": "alter", + "path": "public.users.idx_users_email" + }, + { + "sql": "COMMENT ON INDEX idx_users_summary_email IS 'Index for email search on summary';", + "type": "materialized_view.index.comment", + "operation": "alter", + "path": "public.users_summary.idx_users_summary_email" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/add_procedure_comment/plan.json b/testdata/diff/comment/add_procedure_comment/plan.json index 9c2ddf0d..14641375 100644 --- a/testdata/diff/comment/add_procedure_comment/plan.json +++ b/testdata/diff/comment/add_procedure_comment/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "f2a1199280f3ed3bb4f7dad3602c7f0fcacfce9d79e40201b48b5bf7325f5c64" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "f2a1199280f3ed3bb4f7dad3602c7f0fcacfce9d79e40201b48b5bf7325f5c64" + }, + "groups": [ { - "sql": "COMMENT ON PROCEDURE process_order(integer) IS 'Processes a single order by ID';", - "type": "procedure", - "operation": "alter", - "path": "public.process_order" + "steps": [ + { + "sql": "COMMENT ON PROCEDURE process_order(integer) IS 'Processes a single order by ID';", + "type": "procedure", + "operation": "alter", + "path": "public.process_order" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/add_table_comment/plan.json b/testdata/diff/comment/add_table_comment/plan.json index 9d056dfe..7e976a62 100644 --- a/testdata/diff/comment/add_table_comment/plan.json +++ b/testdata/diff/comment/add_table_comment/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "f2623b8934b586c1ae51649bdfdcc295015334ce0d0cd6b7f4d6e2bc077030b3" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "f2623b8934b586c1ae51649bdfdcc295015334ce0d0cd6b7f4d6e2bc077030b3" + }, + "groups": [ { - "sql": "COMMENT ON TABLE users IS 'Stores user account information';", - "type": "table.comment", - "operation": "alter", - "path": "public.users" + "steps": [ + { + "sql": "COMMENT ON TABLE users IS 'Stores user account information';", + "type": "table.comment", + "operation": "alter", + "path": "public.users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/add_view_comment/plan.json b/testdata/diff/comment/add_view_comment/plan.json index db4e7a5d..e42b47a8 100644 --- a/testdata/diff/comment/add_view_comment/plan.json +++ b/testdata/diff/comment/add_view_comment/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "5f72cd96b40f589c3c326b1677cfe598fd18d335c67824a10a730f7179a7e79d" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "5f72cd96b40f589c3c326b1677cfe598fd18d335c67824a10a730f7179a7e79d" + }, + "groups": [ { - "sql": "COMMENT ON VIEW employee_view IS 'Shows all active employees';", - "type": "view", - "operation": "alter", - "path": "public.employee_view" + "steps": [ + { + "sql": "COMMENT ON VIEW employee_view IS 'Shows all active employees';", + "type": "view", + "operation": "alter", + "path": "public.employee_view" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/alter_table_comment/plan.json b/testdata/diff/comment/alter_table_comment/plan.json index 58b9ce34..85821c6d 100644 --- a/testdata/diff/comment/alter_table_comment/plan.json +++ b/testdata/diff/comment/alter_table_comment/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "1f242a84c8de680321c9fc75dcc5a06760ac51cc74d2e6b6affe524e341745f9" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "1f242a84c8de680321c9fc75dcc5a06760ac51cc74d2e6b6affe524e341745f9" + }, + "groups": [ { - "sql": "COMMENT ON TABLE orders IS 'Customer orders with payment and shipping information';", - "type": "table.comment", - "operation": "alter", - "path": "public.orders" + "steps": [ + { + "sql": "COMMENT ON TABLE orders IS 'Customer orders with payment and shipping information';", + "type": "table.comment", + "operation": "alter", + "path": "public.orders" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/column_comment_quoted_identifier/plan.json b/testdata/diff/comment/column_comment_quoted_identifier/plan.json index bc259316..f36e2de0 100644 --- a/testdata/diff/comment/column_comment_quoted_identifier/plan.json +++ b/testdata/diff/comment/column_comment_quoted_identifier/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "9d443bc536153eed8fce077bfacc3d7f42b1a94f02d33868bb78be3b9de05088" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "9d443bc536153eed8fce077bfacc3d7f42b1a94f02d33868bb78be3b9de05088" + }, + "groups": [ { - "sql": "COMMENT ON COLUMN ex.\"ID\" IS 'Primary identifier';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.ex.ID" + "steps": [ + { + "sql": "COMMENT ON COLUMN ex.\"ID\" IS 'Primary identifier';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.ex.ID" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/drop_table_comment/plan.json b/testdata/diff/comment/drop_table_comment/plan.json index 6face916..5a83ede0 100644 --- a/testdata/diff/comment/drop_table_comment/plan.json +++ b/testdata/diff/comment/drop_table_comment/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "2ae68cfa8f7248d127b54e8c0ba366176b4e6ba698ed57883686f134a287ef16" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "2ae68cfa8f7248d127b54e8c0ba366176b4e6ba698ed57883686f134a287ef16" + }, + "groups": [ { - "sql": "COMMENT ON TABLE inventory IS NULL;", - "type": "table.comment", - "operation": "alter", - "path": "public.inventory" + "steps": [ + { + "sql": "COMMENT ON TABLE inventory IS NULL;", + "type": "table.comment", + "operation": "alter", + "path": "public.inventory" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/mixed_comments/plan.json b/testdata/diff/comment/mixed_comments/plan.json index d61f58bf..8ffbcd67 100644 --- a/testdata/diff/comment/mixed_comments/plan.json +++ b/testdata/diff/comment/mixed_comments/plan.json @@ -2,115 +2,119 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "84e9a9b7c080fc6d686f528f11d070d030b1ab82e66a2ad98050868d66d2f98a" - }, - "groups": [ - { - "steps": [ - { - "sql": "COMMENT ON TABLE categories IS 'Hierarchical category system for posts';", - "type": "table.comment", - "operation": "alter", - "path": "public.categories" - }, - { - "sql": "COMMENT ON COLUMN categories.id IS 'Category unique identifier';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.categories.id" - }, - { - "sql": "COMMENT ON COLUMN categories.name IS 'Category display name';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.categories.name" - }, - { - "sql": "COMMENT ON COLUMN categories.description IS 'Optional category description';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.categories.description" - }, - { - "sql": "COMMENT ON COLUMN categories.parent_id IS 'Parent category for hierarchical structure';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.categories.parent_id" - }, - { - "sql": "COMMENT ON COLUMN categories.created_at IS 'Category creation timestamp';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.categories.created_at" - }, - { - "sql": "COMMENT ON INDEX idx_categories_parent IS 'Index for hierarchical category queries';", - "type": "table.index.comment", - "operation": "alter", - "path": "public.categories.idx_categories_parent" - }, - { - "sql": "ALTER TABLE posts ADD COLUMN views integer DEFAULT 0;", - "type": "table.column", - "operation": "create", - "path": "public.posts.views" - }, - { - "sql": "COMMENT ON COLUMN posts.views IS 'Number of post views';", - "type": "table.column.comment", - "operation": "create", - "path": "public.posts.views" - }, - { - "sql": "COMMENT ON TABLE posts IS 'Blog posts and articles';", - "type": "table.comment", - "operation": "alter", - "path": "public.posts" - }, - { - "sql": "COMMENT ON COLUMN posts.id IS 'Unique post identifier';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.posts.id" - }, - { - "sql": "COMMENT ON COLUMN posts.title IS 'Post title, max 200 characters';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.posts.title" - }, - { - "sql": "COMMENT ON COLUMN posts.content IS 'Post body in markdown format';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.posts.content" - }, - { - "sql": "COMMENT ON COLUMN posts.author_id IS 'Foreign key to users table';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.posts.author_id" - }, - { - "sql": "COMMENT ON COLUMN posts.published_at IS 'Publication timestamp, NULL for drafts';", - "type": "table.column.comment", - "operation": "alter", - "path": "public.posts.published_at" - }, - { - "sql": "COMMENT ON INDEX idx_posts_author IS 'Index for finding posts by author';", - "type": "table.index.comment", - "operation": "alter", - "path": "public.posts.idx_posts_author" - }, - { - "sql": "COMMENT ON INDEX idx_posts_published IS 'Partial index for published posts only';", - "type": "table.index.comment", - "operation": "alter", - "path": "public.posts.idx_posts_published" + "schemas": { + "public": { + "source_fingerprint": { + "hash": "84e9a9b7c080fc6d686f528f11d070d030b1ab82e66a2ad98050868d66d2f98a" + }, + "groups": [ + { + "steps": [ + { + "sql": "COMMENT ON TABLE categories IS 'Hierarchical category system for posts';", + "type": "table.comment", + "operation": "alter", + "path": "public.categories" + }, + { + "sql": "COMMENT ON COLUMN categories.id IS 'Category unique identifier';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.categories.id" + }, + { + "sql": "COMMENT ON COLUMN categories.name IS 'Category display name';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.categories.name" + }, + { + "sql": "COMMENT ON COLUMN categories.description IS 'Optional category description';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.categories.description" + }, + { + "sql": "COMMENT ON COLUMN categories.parent_id IS 'Parent category for hierarchical structure';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.categories.parent_id" + }, + { + "sql": "COMMENT ON COLUMN categories.created_at IS 'Category creation timestamp';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.categories.created_at" + }, + { + "sql": "COMMENT ON INDEX idx_categories_parent IS 'Index for hierarchical category queries';", + "type": "table.index.comment", + "operation": "alter", + "path": "public.categories.idx_categories_parent" + }, + { + "sql": "ALTER TABLE posts ADD COLUMN views integer DEFAULT 0;", + "type": "table.column", + "operation": "create", + "path": "public.posts.views" + }, + { + "sql": "COMMENT ON COLUMN posts.views IS 'Number of post views';", + "type": "table.column.comment", + "operation": "create", + "path": "public.posts.views" + }, + { + "sql": "COMMENT ON TABLE posts IS 'Blog posts and articles';", + "type": "table.comment", + "operation": "alter", + "path": "public.posts" + }, + { + "sql": "COMMENT ON COLUMN posts.id IS 'Unique post identifier';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.posts.id" + }, + { + "sql": "COMMENT ON COLUMN posts.title IS 'Post title, max 200 characters';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.posts.title" + }, + { + "sql": "COMMENT ON COLUMN posts.content IS 'Post body in markdown format';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.posts.content" + }, + { + "sql": "COMMENT ON COLUMN posts.author_id IS 'Foreign key to users table';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.posts.author_id" + }, + { + "sql": "COMMENT ON COLUMN posts.published_at IS 'Publication timestamp, NULL for drafts';", + "type": "table.column.comment", + "operation": "alter", + "path": "public.posts.published_at" + }, + { + "sql": "COMMENT ON INDEX idx_posts_author IS 'Index for finding posts by author';", + "type": "table.index.comment", + "operation": "alter", + "path": "public.posts.idx_posts_author" + }, + { + "sql": "COMMENT ON INDEX idx_posts_published IS 'Partial index for published posts only';", + "type": "table.index.comment", + "operation": "alter", + "path": "public.posts.idx_posts_published" + } + ] } ] } - ] + } } diff --git a/testdata/diff/comment/noop_column_comments/plan.json b/testdata/diff/comment/noop_column_comments/plan.json index 8f631365..5504ae05 100644 --- a/testdata/diff/comment/noop_column_comments/plan.json +++ b/testdata/diff/comment/noop_column_comments/plan.json @@ -2,8 +2,12 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "06d2c3351398ca9ab591c3985cf2791ac3ff5b960eadac98470abbc0b611734a" - }, - "groups": null + "schemas": { + "public": { + "source_fingerprint": { + "hash": "06d2c3351398ca9ab591c3985cf2791ac3ff5b960eadac98470abbc0b611734a" + }, + "groups": null + } + } } diff --git a/testdata/diff/create_domain/add_domain/plan.json b/testdata/diff/create_domain/add_domain/plan.json index 0c574034..43313e84 100644 --- a/testdata/diff/create_domain/add_domain/plan.json +++ b/testdata/diff/create_domain/add_domain/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE DOMAIN email_address AS text\n DEFAULT 'example@acme.com'\n NOT NULL\n CONSTRAINT email_address_check CHECK (VALUE ~ '^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$');", - "type": "domain", - "operation": "create", - "path": "public.email_address" + "steps": [ + { + "sql": "CREATE DOMAIN email_address AS text\n DEFAULT 'example@acme.com'\n NOT NULL\n CONSTRAINT email_address_check CHECK (VALUE ~ '^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$');", + "type": "domain", + "operation": "create", + "path": "public.email_address" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_domain/alter_domain/plan.json b/testdata/diff/create_domain/alter_domain/plan.json index 42fbbe96..ecaab221 100644 --- a/testdata/diff/create_domain/alter_domain/plan.json +++ b/testdata/diff/create_domain/alter_domain/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "d570de7a7448f1c04bf2bbb4e3a788a9a6242254255b68b60447dba542f34222" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "d570de7a7448f1c04bf2bbb4e3a788a9a6242254255b68b60447dba542f34222" + }, + "groups": [ { - "sql": "ALTER DOMAIN user_rating SET DEFAULT 3;", - "type": "domain", - "operation": "alter", - "path": "public.user_rating" - }, - { - "sql": "ALTER DOMAIN user_rating DROP CONSTRAINT user_rating_check;", - "type": "domain", - "operation": "alter", - "path": "public.user_rating" - }, - { - "sql": "ALTER DOMAIN user_rating ADD CONSTRAINT user_rating_check CHECK (VALUE >= 1 AND VALUE <= 10);", - "type": "domain", - "operation": "alter", - "path": "public.user_rating" + "steps": [ + { + "sql": "ALTER DOMAIN user_rating SET DEFAULT 3;", + "type": "domain", + "operation": "alter", + "path": "public.user_rating" + }, + { + "sql": "ALTER DOMAIN user_rating DROP CONSTRAINT user_rating_check;", + "type": "domain", + "operation": "alter", + "path": "public.user_rating" + }, + { + "sql": "ALTER DOMAIN user_rating ADD CONSTRAINT user_rating_check CHECK (VALUE >= 1 AND VALUE <= 10);", + "type": "domain", + "operation": "alter", + "path": "public.user_rating" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_domain/domain_function_check_dependency/plan.json b/testdata/diff/create_domain/domain_function_check_dependency/plan.json index 90810aa6..2cf5aeb2 100644 --- a/testdata/diff/create_domain/domain_function_check_dependency/plan.json +++ b/testdata/diff/create_domain/domain_function_check_dependency/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION validate_custom_id(\n val text\n)\nRETURNS boolean\nLANGUAGE plpgsql\nIMMUTABLE\nAS $$\nBEGIN\n -- Simple validation: must start with 'id_' and be at least 5 characters\n RETURN val IS NOT NULL AND val LIKE 'id_%' AND length(val) >= 5;\nEND\n$$;", - "type": "function", - "operation": "create", - "path": "public.validate_custom_id" - }, - { - "sql": "CREATE DOMAIN custom_id AS text\n CONSTRAINT custom_id_check CHECK (validate_custom_id(VALUE));", - "type": "domain", - "operation": "create", - "path": "public.custom_id" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION validate_custom_id(\n val text\n)\nRETURNS boolean\nLANGUAGE plpgsql\nIMMUTABLE\nAS $$\nBEGIN\n -- Simple validation: must start with 'id_' and be at least 5 characters\n RETURN val IS NOT NULL AND val LIKE 'id_%' AND length(val) >= 5;\nEND\n$$;", + "type": "function", + "operation": "create", + "path": "public.validate_custom_id" + }, + { + "sql": "CREATE DOMAIN custom_id AS text\n CONSTRAINT custom_id_check CHECK (validate_custom_id(VALUE));", + "type": "domain", + "operation": "create", + "path": "public.custom_id" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_domain/domain_function_table_dependency/plan.json b/testdata/diff/create_domain/domain_function_table_dependency/plan.json index 461f7535..67d99fed 100644 --- a/testdata/diff/create_domain/domain_function_table_dependency/plan.json +++ b/testdata/diff/create_domain/domain_function_table_dependency/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION validate_custom_id(\n val text\n)\nRETURNS boolean\nLANGUAGE plpgsql\nIMMUTABLE\nAS $$\nBEGIN\n RETURN val IS NOT NULL AND val LIKE 'id_%' AND length(val) >= 5;\nEND\n$$;", - "type": "function", - "operation": "create", - "path": "public.validate_custom_id" - }, - { - "sql": "CREATE DOMAIN custom_id AS text\n CONSTRAINT custom_id_check CHECK (validate_custom_id(VALUE));", - "type": "domain", - "operation": "create", - "path": "public.custom_id" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS example (\n id custom_id,\n CONSTRAINT example_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.example" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION validate_custom_id(\n val text\n)\nRETURNS boolean\nLANGUAGE plpgsql\nIMMUTABLE\nAS $$\nBEGIN\n RETURN val IS NOT NULL AND val LIKE 'id_%' AND length(val) >= 5;\nEND\n$$;", + "type": "function", + "operation": "create", + "path": "public.validate_custom_id" + }, + { + "sql": "CREATE DOMAIN custom_id AS text\n CONSTRAINT custom_id_check CHECK (validate_custom_id(VALUE));", + "type": "domain", + "operation": "create", + "path": "public.custom_id" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS example (\n id custom_id,\n CONSTRAINT example_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.example" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_domain/drop_domain/plan.json b/testdata/diff/create_domain/drop_domain/plan.json index 240fd892..d6172729 100644 --- a/testdata/diff/create_domain/drop_domain/plan.json +++ b/testdata/diff/create_domain/drop_domain/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "88b0ac5110398d27cc662d90483bb8a1616d8bc0a9034e3e57b2ce1d7550a406" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "88b0ac5110398d27cc662d90483bb8a1616d8bc0a9034e3e57b2ce1d7550a406" + }, + "groups": [ { - "sql": "DROP DOMAIN IF EXISTS product_code RESTRICT;", - "type": "domain", - "operation": "drop", - "path": "public.product_code" + "steps": [ + { + "sql": "DROP DOMAIN IF EXISTS product_code RESTRICT;", + "type": "domain", + "operation": "drop", + "path": "public.product_code" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/add_function/plan.json b/testdata/diff/create_function/add_function/plan.json index 79203e9f..7f95608a 100644 --- a/testdata/diff/create_function/add_function/plan.json +++ b/testdata/diff/create_function/add_function/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION add_with_tax(\n amount numeric,\n tax_rate numeric DEFAULT 0.1\n)\nRETURNS numeric\nLANGUAGE sql\nVOLATILE\nBEGIN ATOMIC\n SELECT (amount + (amount * tax_rate));\nEND;", - "type": "function", - "operation": "create", - "path": "public.add_with_tax" - }, - { - "sql": "CREATE OR REPLACE FUNCTION calculate_tax(\n amount numeric,\n rate numeric\n)\nRETURNS numeric\nLANGUAGE sql\nIMMUTABLE\nPARALLEL SAFE\nAS $$\n SELECT amount * rate;\n$$;", - "type": "function", - "operation": "create", - "path": "public.calculate_tax" - }, - { - "sql": "CREATE OR REPLACE FUNCTION mask_sensitive_data(\n input text\n)\nRETURNS text\nLANGUAGE sql\nSTABLE\nLEAKPROOF\nAS $$\n SELECT '***' || substring(input from 4);\n$$;", - "type": "function", - "operation": "create", - "path": "public.mask_sensitive_data" - }, - { - "sql": "CREATE OR REPLACE FUNCTION process_order(\n order_id integer,\n discount_percent numeric DEFAULT 0,\n priority_level integer DEFAULT 1,\n note varchar DEFAULT '',\n status text DEFAULT 'pending',\n apply_tax boolean DEFAULT true,\n is_priority boolean DEFAULT false,\n expiry_date date DEFAULT (CURRENT_DATE + '1 year'::interval)\n)\nRETURNS numeric\nLANGUAGE plpgsql\nVOLATILE\nSTRICT\nSECURITY DEFINER\nLEAKPROOF\nPARALLEL RESTRICTED\nSET search_path = pg_catalog, public\nAS $$\nDECLARE\n total numeric;\nBEGIN\n SELECT amount INTO total FROM orders WHERE id = order_id;\n RETURN total - (total * discount_percent / 100);\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.process_order" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION add_with_tax(\n amount numeric,\n tax_rate numeric DEFAULT 0.1\n)\nRETURNS numeric\nLANGUAGE sql\nVOLATILE\nBEGIN ATOMIC\n SELECT (amount + (amount * tax_rate));\nEND;", + "type": "function", + "operation": "create", + "path": "public.add_with_tax" + }, + { + "sql": "CREATE OR REPLACE FUNCTION calculate_tax(\n amount numeric,\n rate numeric\n)\nRETURNS numeric\nLANGUAGE sql\nIMMUTABLE\nPARALLEL SAFE\nAS $$\n SELECT amount * rate;\n$$;", + "type": "function", + "operation": "create", + "path": "public.calculate_tax" + }, + { + "sql": "CREATE OR REPLACE FUNCTION mask_sensitive_data(\n input text\n)\nRETURNS text\nLANGUAGE sql\nSTABLE\nLEAKPROOF\nAS $$\n SELECT '***' || substring(input from 4);\n$$;", + "type": "function", + "operation": "create", + "path": "public.mask_sensitive_data" + }, + { + "sql": "CREATE OR REPLACE FUNCTION process_order(\n order_id integer,\n discount_percent numeric DEFAULT 0,\n priority_level integer DEFAULT 1,\n note varchar DEFAULT '',\n status text DEFAULT 'pending',\n apply_tax boolean DEFAULT true,\n is_priority boolean DEFAULT false,\n expiry_date date DEFAULT (CURRENT_DATE + '1 year'::interval)\n)\nRETURNS numeric\nLANGUAGE plpgsql\nVOLATILE\nSTRICT\nSECURITY DEFINER\nLEAKPROOF\nPARALLEL RESTRICTED\nSET search_path = pg_catalog, public\nAS $$\nDECLARE\n total numeric;\nBEGIN\n SELECT amount INTO total FROM orders WHERE id = order_id;\n RETURN total - (total * discount_percent / 100);\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.process_order" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/alter_function_attributes/plan.json b/testdata/diff/create_function/alter_function_attributes/plan.json index 653e5881..9eaa9752 100644 --- a/testdata/diff/create_function/alter_function_attributes/plan.json +++ b/testdata/diff/create_function/alter_function_attributes/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "1f121ae09b8a9c9a88444396c16c27b8690f6ff7a123cf72c204103111a49649" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "1f121ae09b8a9c9a88444396c16c27b8690f6ff7a123cf72c204103111a49649" + }, + "groups": [ { - "sql": "ALTER FUNCTION calculate_total(numeric, numeric) PARALLEL SAFE;", - "type": "function", - "operation": "alter", - "path": "public.calculate_total" - }, - { - "sql": "ALTER FUNCTION calculate_total(numeric, numeric) LEAKPROOF;", - "type": "function", - "operation": "alter", - "path": "public.calculate_total" - }, - { - "sql": "ALTER FUNCTION process_data(text) PARALLEL SAFE;", - "type": "function", - "operation": "alter", - "path": "public.process_data" - }, - { - "sql": "ALTER FUNCTION process_data(text) LEAKPROOF;", - "type": "function", - "operation": "alter", - "path": "public.process_data" - }, - { - "sql": "CREATE OR REPLACE FUNCTION secure_lookup(\n id integer\n)\nRETURNS text\nLANGUAGE plpgsql\nVOLATILE\nSET search_path = pg_catalog\nAS $$\nBEGIN\n RETURN 'result';\nEND;\n$$;", - "type": "function", - "operation": "alter", - "path": "public.secure_lookup" + "steps": [ + { + "sql": "ALTER FUNCTION calculate_total(numeric, numeric) PARALLEL SAFE;", + "type": "function", + "operation": "alter", + "path": "public.calculate_total" + }, + { + "sql": "ALTER FUNCTION calculate_total(numeric, numeric) LEAKPROOF;", + "type": "function", + "operation": "alter", + "path": "public.calculate_total" + }, + { + "sql": "ALTER FUNCTION process_data(text) PARALLEL SAFE;", + "type": "function", + "operation": "alter", + "path": "public.process_data" + }, + { + "sql": "ALTER FUNCTION process_data(text) LEAKPROOF;", + "type": "function", + "operation": "alter", + "path": "public.process_data" + }, + { + "sql": "CREATE OR REPLACE FUNCTION secure_lookup(\n id integer\n)\nRETURNS text\nLANGUAGE plpgsql\nVOLATILE\nSET search_path = pg_catalog\nAS $$\nBEGIN\n RETURN 'result';\nEND;\n$$;", + "type": "function", + "operation": "alter", + "path": "public.secure_lookup" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/alter_function_different_signature/plan.json b/testdata/diff/create_function/alter_function_different_signature/plan.json index 6b0b25e4..65a75f55 100644 --- a/testdata/diff/create_function/alter_function_different_signature/plan.json +++ b/testdata/diff/create_function/alter_function_different_signature/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "897bceabded15a8e2f91cc1081213c355d1c54584f1787dd4f7a81f9aa038636" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "897bceabded15a8e2f91cc1081213c355d1c54584f1787dd4f7a81f9aa038636" + }, + "groups": [ { - "sql": "DROP FUNCTION IF EXISTS process_order(integer, numeric);", - "type": "function", - "operation": "drop", - "path": "public.process_order" - }, - { - "sql": "CREATE OR REPLACE FUNCTION process_order(\n customer_email text,\n priority boolean\n)\nRETURNS TABLE(status text, processed_at timestamp)\nLANGUAGE plpgsql\nSTABLE\nSECURITY DEFINER\nAS $$\nBEGIN\n RETURN QUERY\n SELECT 'completed'::text, NOW()\n WHERE priority = true;\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.process_order" + "steps": [ + { + "sql": "DROP FUNCTION IF EXISTS process_order(integer, numeric);", + "type": "function", + "operation": "drop", + "path": "public.process_order" + }, + { + "sql": "CREATE OR REPLACE FUNCTION process_order(\n customer_email text,\n priority boolean\n)\nRETURNS TABLE(status text, processed_at timestamp)\nLANGUAGE plpgsql\nSTABLE\nSECURITY DEFINER\nAS $$\nBEGIN\n RETURN QUERY\n SELECT 'completed'::text, NOW()\n WHERE priority = true;\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.process_order" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/alter_function_same_signature/plan.json b/testdata/diff/create_function/alter_function_same_signature/plan.json index ac056687..caea4ea3 100644 --- a/testdata/diff/create_function/alter_function_same_signature/plan.json +++ b/testdata/diff/create_function/alter_function_same_signature/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "fc335bb328a0b47f89b922eadc006adbeadd205c59890bb5523a57bb00e854b9" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "fc335bb328a0b47f89b922eadc006adbeadd205c59890bb5523a57bb00e854b9" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION process_order(\n order_id integer,\n discount_percent numeric DEFAULT 0,\n status order_status DEFAULT 'pending'::order_status,\n priority utils.priority_level DEFAULT 'medium'::utils.priority_level\n)\nRETURNS numeric\nLANGUAGE plpgsql\nSTABLE\nAS $$\nDECLARE\n base_price numeric;\n tax_rate numeric := 0.08;\nBEGIN\n -- Different logic: calculate with tax instead of just discount\n -- Status and priority parameters are available but not used in this simplified version\n SELECT price INTO base_price FROM products WHERE id = order_id;\n RETURN base_price * (1 - discount_percent / 100) * (1 + tax_rate);\nEND;\n$$;", - "type": "function", - "operation": "alter", - "path": "public.process_order" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION process_order(\n order_id integer,\n discount_percent numeric DEFAULT 0,\n status order_status DEFAULT 'pending'::order_status,\n priority utils.priority_level DEFAULT 'medium'::utils.priority_level\n)\nRETURNS numeric\nLANGUAGE plpgsql\nSTABLE\nAS $$\nDECLARE\n base_price numeric;\n tax_rate numeric := 0.08;\nBEGIN\n -- Different logic: calculate with tax instead of just discount\n -- Status and priority parameters are available but not used in this simplified version\n SELECT price INTO base_price FROM products WHERE id = order_id;\n RETURN base_price * (1 - discount_percent / 100) * (1 + tax_rate);\nEND;\n$$;", + "type": "function", + "operation": "alter", + "path": "public.process_order" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/drop_function/plan.json b/testdata/diff/create_function/drop_function/plan.json index e8289721..99e2db4c 100644 --- a/testdata/diff/create_function/drop_function/plan.json +++ b/testdata/diff/create_function/drop_function/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "755b64b40a0ad2e6918bb3ff23f9b9ac6936c38ff588b5ad7ddc98bbf36315de" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "755b64b40a0ad2e6918bb3ff23f9b9ac6936c38ff588b5ad7ddc98bbf36315de" + }, + "groups": [ { - "sql": "REVOKE EXECUTE ON FUNCTION process_order(order_id integer, discount_percent numeric) FROM api_role;", - "type": "privilege", - "operation": "drop", - "path": "privileges.FUNCTION.process_order(order_id integer, discount_percent numeric).api_role" - }, - { - "sql": "DROP FUNCTION IF EXISTS process_payment(integer, text);", - "type": "function", - "operation": "drop", - "path": "public.process_payment" - }, - { - "sql": "DROP FUNCTION IF EXISTS process_order(integer, numeric);", - "type": "function", - "operation": "drop", - "path": "public.process_order" - }, - { - "sql": "DROP FUNCTION IF EXISTS get_user_stats(integer);", - "type": "function", - "operation": "drop", - "path": "public.get_user_stats" + "steps": [ + { + "sql": "REVOKE EXECUTE ON FUNCTION process_order(order_id integer, discount_percent numeric) FROM api_role;", + "type": "privilege", + "operation": "drop", + "path": "privileges.FUNCTION.process_order(order_id integer, discount_percent numeric).api_role" + }, + { + "sql": "DROP FUNCTION IF EXISTS process_payment(integer, text);", + "type": "function", + "operation": "drop", + "path": "public.process_payment" + }, + { + "sql": "DROP FUNCTION IF EXISTS process_order(integer, numeric);", + "type": "function", + "operation": "drop", + "path": "public.process_order" + }, + { + "sql": "DROP FUNCTION IF EXISTS get_user_stats(integer);", + "type": "function", + "operation": "drop", + "path": "public.get_user_stats" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/issue_326_param_name_change/plan.json b/testdata/diff/create_function/issue_326_param_name_change/plan.json index 317e0d69..9e188ae5 100644 --- a/testdata/diff/create_function/issue_326_param_name_change/plan.json +++ b/testdata/diff/create_function/issue_326_param_name_change/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "d87f2cfffc1d1273ca588466e14557b2698607c55dc7c8a0e44317046e3c95a9" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "d87f2cfffc1d1273ca588466e14557b2698607c55dc7c8a0e44317046e3c95a9" + }, + "groups": [ { - "sql": "DROP FUNCTION IF EXISTS somefunction(text);", - "type": "function", - "operation": "alter", - "path": "public.somefunction" - }, - { - "sql": "CREATE OR REPLACE FUNCTION somefunction(\n new_name text\n)\nRETURNS text\nLANGUAGE sql\nVOLATILE\nAS $$ SELECT new_name;\n$$;", - "type": "function", - "operation": "alter", - "path": "public.somefunction" + "steps": [ + { + "sql": "DROP FUNCTION IF EXISTS somefunction(text);", + "type": "function", + "operation": "alter", + "path": "public.somefunction" + }, + { + "sql": "CREATE OR REPLACE FUNCTION somefunction(\n new_name text\n)\nRETURNS text\nLANGUAGE sql\nVOLATILE\nAS $$ SELECT new_name;\n$$;", + "type": "function", + "operation": "alter", + "path": "public.somefunction" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/issue_326_param_type_change/plan.json b/testdata/diff/create_function/issue_326_param_type_change/plan.json index fbada25e..845a26b0 100644 --- a/testdata/diff/create_function/issue_326_param_type_change/plan.json +++ b/testdata/diff/create_function/issue_326_param_type_change/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "59a96fc0ed0cbfa32f92a9d869bbbc6e38359d50afcd5ea6eb4f388717c48135" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "59a96fc0ed0cbfa32f92a9d869bbbc6e38359d50afcd5ea6eb4f388717c48135" + }, + "groups": [ { - "sql": "DROP FUNCTION IF EXISTS somefunction(text);", - "type": "function", - "operation": "drop", - "path": "public.somefunction" - }, - { - "sql": "CREATE OR REPLACE FUNCTION somefunction(\n param2 uuid\n)\nRETURNS uuid\nLANGUAGE sql\nVOLATILE\nAS $$ SELECT param2;\n$$;", - "type": "function", - "operation": "create", - "path": "public.somefunction" + "steps": [ + { + "sql": "DROP FUNCTION IF EXISTS somefunction(text);", + "type": "function", + "operation": "drop", + "path": "public.somefunction" + }, + { + "sql": "CREATE OR REPLACE FUNCTION somefunction(\n param2 uuid\n)\nRETURNS uuid\nLANGUAGE sql\nVOLATILE\nAS $$ SELECT param2;\n$$;", + "type": "function", + "operation": "create", + "path": "public.somefunction" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/issue_326_return_type_change/plan.json b/testdata/diff/create_function/issue_326_return_type_change/plan.json index b42bd902..c39e40d9 100644 --- a/testdata/diff/create_function/issue_326_return_type_change/plan.json +++ b/testdata/diff/create_function/issue_326_return_type_change/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "59a96fc0ed0cbfa32f92a9d869bbbc6e38359d50afcd5ea6eb4f388717c48135" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "59a96fc0ed0cbfa32f92a9d869bbbc6e38359d50afcd5ea6eb4f388717c48135" + }, + "groups": [ { - "sql": "DROP FUNCTION IF EXISTS somefunction(text);", - "type": "function", - "operation": "alter", - "path": "public.somefunction" - }, - { - "sql": "CREATE OR REPLACE FUNCTION somefunction(\n param1 text\n)\nRETURNS integer\nLANGUAGE sql\nVOLATILE\nAS $$ SELECT length(param1);\n$$;", - "type": "function", - "operation": "alter", - "path": "public.somefunction" + "steps": [ + { + "sql": "DROP FUNCTION IF EXISTS somefunction(text);", + "type": "function", + "operation": "alter", + "path": "public.somefunction" + }, + { + "sql": "CREATE OR REPLACE FUNCTION somefunction(\n param1 text\n)\nRETURNS integer\nLANGUAGE sql\nVOLATILE\nAS $$ SELECT length(param1);\n$$;", + "type": "function", + "operation": "alter", + "path": "public.somefunction" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/issue_335_search_path_rewrite/plan.json b/testdata/diff/create_function/issue_335_search_path_rewrite/plan.json index f1004e89..ed37c816 100644 --- a/testdata/diff/create_function/issue_335_search_path_rewrite/plan.json +++ b/testdata/diff/create_function/issue_335_search_path_rewrite/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS person_accounts (\n id uuid DEFAULT gen_random_uuid(),\n first_name text,\n last_name text,\n email_address text NOT NULL,\n created_at timestamptz DEFAULT now() NOT NULL,\n modified_at timestamptz DEFAULT now() NOT NULL,\n CONSTRAINT person_accounts_pkey PRIMARY KEY (id),\n CONSTRAINT person_accounts_email_address_key UNIQUE (email_address)\n);", - "type": "table", - "operation": "create", - "path": "public.person_accounts" - }, - { - "sql": "CREATE OR REPLACE FUNCTION auth_lookup_account_by_email(\n input_email text\n)\nRETURNS text\nLANGUAGE sql\nSTABLE\nSECURITY DEFINER\nSET search_path = public, pg_temp\nAS $$\n SELECT\n pa.id::text AS person_account_id\n FROM person_accounts pa\n WHERE lower(pa.email_address) = lower(trim(input_email))\n LIMIT 1;\n$$;", - "type": "function", - "operation": "create", - "path": "public.auth_lookup_account_by_email" - }, - { - "sql": "REVOKE EXECUTE ON FUNCTION auth_lookup_account_by_email(input_email text) FROM PUBLIC;", - "type": "revoked_default_privilege", - "operation": "create", - "path": "revoked_default.FUNCTION.auth_lookup_account_by_email(input_email text)" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS person_accounts (\n id uuid DEFAULT gen_random_uuid(),\n first_name text,\n last_name text,\n email_address text NOT NULL,\n created_at timestamptz DEFAULT now() NOT NULL,\n modified_at timestamptz DEFAULT now() NOT NULL,\n CONSTRAINT person_accounts_pkey PRIMARY KEY (id),\n CONSTRAINT person_accounts_email_address_key UNIQUE (email_address)\n);", + "type": "table", + "operation": "create", + "path": "public.person_accounts" + }, + { + "sql": "CREATE OR REPLACE FUNCTION auth_lookup_account_by_email(\n input_email text\n)\nRETURNS text\nLANGUAGE sql\nSTABLE\nSECURITY DEFINER\nSET search_path = public, pg_temp\nAS $$\n SELECT\n pa.id::text AS person_account_id\n FROM person_accounts pa\n WHERE lower(pa.email_address) = lower(trim(input_email))\n LIMIT 1;\n$$;", + "type": "function", + "operation": "create", + "path": "public.auth_lookup_account_by_email" + }, + { + "sql": "REVOKE EXECUTE ON FUNCTION auth_lookup_account_by_email(input_email text) FROM PUBLIC;", + "type": "revoked_default_privilege", + "operation": "create", + "path": "revoked_default.FUNCTION.auth_lookup_account_by_email(input_email text)" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/issue_354_empty_search_path/plan.json b/testdata/diff/create_function/issue_354_empty_search_path/plan.json index 8e2305e4..a1113239 100644 --- a/testdata/diff/create_function/issue_354_empty_search_path/plan.json +++ b/testdata/diff/create_function/issue_354_empty_search_path/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "29f02983bf9ecf6f5f1ec38377f7209ec60f4fe4051d371227ace7d93bddf381" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "29f02983bf9ecf6f5f1ec38377f7209ec60f4fe4051d371227ace7d93bddf381" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION create_hello(\n p_title text\n)\nRETURNS void\nLANGUAGE plpgsql\nVOLATILE\nSET search_path = ''\nAS $$\nBEGIN\n INSERT INTO public.test (title) VALUES (p_title);\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.create_hello" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION create_hello(\n p_title text\n)\nRETURNS void\nLANGUAGE plpgsql\nVOLATILE\nSET search_path = ''\nAS $$\nBEGIN\n INSERT INTO public.test (title) VALUES (p_title);\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.create_hello" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_function/issue_360_returns_table_custom_type/plan.json b/testdata/diff/create_function/issue_360_returns_table_custom_type/plan.json index 7ea749f0..d4907fab 100644 --- a/testdata/diff/create_function/issue_360_returns_table_custom_type/plan.json +++ b/testdata/diff/create_function/issue_360_returns_table_custom_type/plan.json @@ -2,8 +2,12 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "bc4fc478f2d7ae4cc204de3447d992dface8f485a9227504fed99b21817cb888" - }, - "groups": null + "schemas": { + "public": { + "source_fingerprint": { + "hash": "bc4fc478f2d7ae4cc204de3447d992dface8f485a9227504fed99b21817cb888" + }, + "groups": null + } + } } diff --git a/testdata/diff/create_function/issue_399_schema_qualified_body/plan.json b/testdata/diff/create_function/issue_399_schema_qualified_body/plan.json index eb68c191..b99f01fd 100644 --- a/testdata/diff/create_function/issue_399_schema_qualified_body/plan.json +++ b/testdata/diff/create_function/issue_399_schema_qualified_body/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "eb148b37b7b6325bdd5f0c1c120dfe0bd71a062ce69951aa946c452aff2dc662" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "eb148b37b7b6325bdd5f0c1c120dfe0bd71a062ce69951aa946c452aff2dc662" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION role_has_cap(\n p_role role_type,\n p_cap text\n)\nRETURNS boolean\nLANGUAGE sql\nSTABLE\nAS $$\n SELECT EXISTS (\n SELECT 1\n FROM public.role_caps rc\n WHERE rc.role = p_role\n AND rc.capability = p_cap\n );\n$$;", - "type": "function", - "operation": "create", - "path": "public.role_has_cap" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION role_has_cap(\n p_role role_type,\n p_cap text\n)\nRETURNS boolean\nLANGUAGE sql\nSTABLE\nAS $$\n SELECT EXISTS (\n SELECT 1\n FROM public.role_caps rc\n WHERE rc.role = p_role\n AND rc.capability = p_cap\n );\n$$;", + "type": "function", + "operation": "create", + "path": "public.role_has_cap" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_index/add_index/plan.json b/testdata/diff/create_index/add_index/plan.json index 434751d1..4fbf944e 100644 --- a/testdata/diff/create_index/add_index/plan.json +++ b/testdata/diff/create_index/add_index/plan.json @@ -2,55 +2,59 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS users (\n id integer,\n email varchar(255) NOT NULL,\n name varchar(100),\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.users" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_users_email ON users (email varchar_pattern_ops);", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_email" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_users_email_include ON users (email) INCLUDE (name);", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_email_include" - }, - { - "sql": "CREATE UNIQUE INDEX IF NOT EXISTS idx_users_email_unique ON users (email) NULLS NOT DISTINCT;", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_email_unique" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_users_id ON users (id);", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_id" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_users_name ON users (name);", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_name" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS \"public.idx_users\" ON users (email, name);", - "type": "table.index", - "operation": "create", - "path": "public.users.public.idx_users" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS users (\n id integer,\n email varchar(255) NOT NULL,\n name varchar(100),\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.users" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_users_email ON users (email varchar_pattern_ops);", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_email" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_users_email_include ON users (email) INCLUDE (name);", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_email_include" + }, + { + "sql": "CREATE UNIQUE INDEX IF NOT EXISTS idx_users_email_unique ON users (email) NULLS NOT DISTINCT;", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_email_unique" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_users_id ON users (id);", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_id" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_users_name ON users (name);", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_name" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS \"public.idx_users\" ON users (email, name);", + "type": "table.index", + "operation": "create", + "path": "public.users.public.idx_users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_index/drop_index/plan.json b/testdata/diff/create_index/drop_index/plan.json index 51d84c35..88b4a644 100644 --- a/testdata/diff/create_index/drop_index/plan.json +++ b/testdata/diff/create_index/drop_index/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "f2873843c4de053af739e6d0641037ede53c6c78a5c6b7887f30826a4f6dfe34" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "f2873843c4de053af739e6d0641037ede53c6c78a5c6b7887f30826a4f6dfe34" + }, + "groups": [ { - "sql": "DROP INDEX IF EXISTS idx_products_category_price;", - "type": "table.index", - "operation": "drop", - "path": "public.products.idx_products_category_price" - }, - { - "sql": "DROP INDEX IF EXISTS idx_product_summary_price;", - "type": "materialized_view.index", - "operation": "drop", - "path": "public.product_summary.idx_product_summary_price" + "steps": [ + { + "sql": "DROP INDEX IF EXISTS idx_products_category_price;", + "type": "table.index", + "operation": "drop", + "path": "public.products.idx_products_category_price" + }, + { + "sql": "DROP INDEX IF EXISTS idx_product_summary_price;", + "type": "materialized_view.index", + "operation": "drop", + "path": "public.product_summary.idx_product_summary_price" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_materialized_view/add_materialized_view/plan.json b/testdata/diff/create_materialized_view/add_materialized_view/plan.json index 7d57b258..b4326ace 100644 --- a/testdata/diff/create_materialized_view/add_materialized_view/plan.json +++ b/testdata/diff/create_materialized_view/add_materialized_view/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "c080880eeed5c864d9039e5087e56335177c19b37ace103267da30a2ef36775b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "c080880eeed5c864d9039e5087e56335177c19b37ace103267da30a2ef36775b" + }, + "groups": [ { - "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS active_employees AS\n SELECT id,\n name,\n salary\n FROM employees\n WHERE status::text = 'active'::text;", - "type": "materialized_view", - "operation": "create", - "path": "public.active_employees" + "steps": [ + { + "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS active_employees AS\n SELECT id,\n name,\n salary\n FROM employees\n WHERE status::text = 'active'::text;", + "type": "materialized_view", + "operation": "create", + "path": "public.active_employees" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_materialized_view/alter_materialized_view/plan.json b/testdata/diff/create_materialized_view/alter_materialized_view/plan.json index be66a80d..cb266df3 100644 --- a/testdata/diff/create_materialized_view/alter_materialized_view/plan.json +++ b/testdata/diff/create_materialized_view/alter_materialized_view/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "d7265cc266dac8551a3b9f37cf2293f45c601b13dafb6bb301915976389a3927" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "d7265cc266dac8551a3b9f37cf2293f45c601b13dafb6bb301915976389a3927" + }, + "groups": [ { - "sql": "DROP MATERIALIZED VIEW active_employees RESTRICT;", - "type": "materialized_view", - "operation": "alter", - "path": "public.active_employees" - }, - { - "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS active_employees AS\n SELECT id,\n name,\n salary,\n status\n FROM employees\n WHERE status::text = 'active'::text;", - "type": "materialized_view", - "operation": "alter", - "path": "public.active_employees" + "steps": [ + { + "sql": "DROP MATERIALIZED VIEW active_employees RESTRICT;", + "type": "materialized_view", + "operation": "alter", + "path": "public.active_employees" + }, + { + "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS active_employees AS\n SELECT id,\n name,\n salary,\n status\n FROM employees\n WHERE status::text = 'active'::text;", + "type": "materialized_view", + "operation": "alter", + "path": "public.active_employees" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_materialized_view/drop_materialized_view/plan.json b/testdata/diff/create_materialized_view/drop_materialized_view/plan.json index cd45d5e7..8b384f45 100644 --- a/testdata/diff/create_materialized_view/drop_materialized_view/plan.json +++ b/testdata/diff/create_materialized_view/drop_materialized_view/plan.json @@ -2,73 +2,77 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "a90a90090750b18a9aaffb6de253bd12234fe30ccf9cfc35b82c34ac834f1360" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "a90a90090750b18a9aaffb6de253bd12234fe30ccf9cfc35b82c34ac834f1360" + }, + "groups": [ { - "sql": "DROP VIEW IF EXISTS employee_summary RESTRICT;", - "type": "view", - "operation": "recreate", - "path": "public.employee_summary" - }, - { - "sql": "DROP VIEW IF EXISTS employee_ids RESTRICT;", - "type": "view", - "operation": "recreate", - "path": "public.employee_ids" - }, - { - "sql": "DROP VIEW IF EXISTS employee_names RESTRICT;", - "type": "view", - "operation": "recreate", - "path": "public.employee_names" - }, - { - "sql": "DROP MATERIALIZED VIEW active_employees RESTRICT;", - "type": "materialized_view", - "operation": "alter", - "path": "public.active_employees" - }, - { - "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS active_employees AS\n SELECT id,\n name,\n salary,\n 'active'::text AS status_label\n FROM employees\n WHERE status::text = 'active'::text;", - "type": "materialized_view", - "operation": "alter", - "path": "public.active_employees" - }, - { - "sql": "DROP MATERIALIZED VIEW dept_stats RESTRICT;", - "type": "materialized_view", - "operation": "alter", - "path": "public.dept_stats" - }, - { - "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS dept_stats AS\n SELECT department,\n count(*) AS employee_count,\n avg(salary) AS avg_salary\n FROM employees\n GROUP BY department;", - "type": "materialized_view", - "operation": "alter", - "path": "public.dept_stats" - }, - { - "sql": "CREATE OR REPLACE VIEW employee_names AS\n SELECT id,\n name\n FROM active_employees;", - "type": "view", - "operation": "recreate", - "path": "public.employee_names" - }, - { - "sql": "CREATE OR REPLACE VIEW employee_ids AS\n SELECT id\n FROM employee_names;", - "type": "view", - "operation": "recreate", - "path": "public.employee_ids" - }, - { - "sql": "CREATE OR REPLACE VIEW employee_summary AS\n SELECT ae.id,\n ae.name,\n ds.employee_count AS dept_size\n FROM active_employees ae\n CROSS JOIN dept_stats ds\n LIMIT 10;", - "type": "view", - "operation": "recreate", - "path": "public.employee_summary" + "steps": [ + { + "sql": "DROP VIEW IF EXISTS employee_summary RESTRICT;", + "type": "view", + "operation": "recreate", + "path": "public.employee_summary" + }, + { + "sql": "DROP VIEW IF EXISTS employee_ids RESTRICT;", + "type": "view", + "operation": "recreate", + "path": "public.employee_ids" + }, + { + "sql": "DROP VIEW IF EXISTS employee_names RESTRICT;", + "type": "view", + "operation": "recreate", + "path": "public.employee_names" + }, + { + "sql": "DROP MATERIALIZED VIEW active_employees RESTRICT;", + "type": "materialized_view", + "operation": "alter", + "path": "public.active_employees" + }, + { + "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS active_employees AS\n SELECT id,\n name,\n salary,\n 'active'::text AS status_label\n FROM employees\n WHERE status::text = 'active'::text;", + "type": "materialized_view", + "operation": "alter", + "path": "public.active_employees" + }, + { + "sql": "DROP MATERIALIZED VIEW dept_stats RESTRICT;", + "type": "materialized_view", + "operation": "alter", + "path": "public.dept_stats" + }, + { + "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS dept_stats AS\n SELECT department,\n count(*) AS employee_count,\n avg(salary) AS avg_salary\n FROM employees\n GROUP BY department;", + "type": "materialized_view", + "operation": "alter", + "path": "public.dept_stats" + }, + { + "sql": "CREATE OR REPLACE VIEW employee_names AS\n SELECT id,\n name\n FROM active_employees;", + "type": "view", + "operation": "recreate", + "path": "public.employee_names" + }, + { + "sql": "CREATE OR REPLACE VIEW employee_ids AS\n SELECT id\n FROM employee_names;", + "type": "view", + "operation": "recreate", + "path": "public.employee_ids" + }, + { + "sql": "CREATE OR REPLACE VIEW employee_summary AS\n SELECT ae.id,\n ae.name,\n ds.employee_count AS dept_size\n FROM active_employees ae\n CROSS JOIN dept_stats ds\n LIMIT 10;", + "type": "view", + "operation": "recreate", + "path": "public.employee_summary" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/add_policy/plan.json b/testdata/diff/create_policy/add_policy/plan.json index 167b995a..2aca27d4 100644 --- a/testdata/diff/create_policy/add_policy/plan.json +++ b/testdata/diff/create_policy/add_policy/plan.json @@ -2,55 +2,59 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "476200c6123d04c01b912d2935ac4ec2d355278cc5d67529af8ab8ad1c7d2af0" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "476200c6123d04c01b912d2935ac4ec2d355278cc5d67529af8ab8ad1c7d2af0" + }, + "groups": [ { - "sql": "ALTER TABLE orders ENABLE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "create", - "path": "public.orders" - }, - { - "sql": "CREATE POLICY orders_user_access ON orders FOR SELECT TO PUBLIC USING (user_id IN ( SELECT users.id FROM users));", - "type": "table.policy", - "operation": "create", - "path": "public.orders.orders_user_access" - }, - { - "sql": "CREATE POLICY \"UserPolicy\" ON users TO PUBLIC USING (tenant_id = (current_setting('app.current_tenant'))::integer);", - "type": "table.policy", - "operation": "create", - "path": "public.users.UserPolicy" - }, - { - "sql": "CREATE POLICY admin_only ON users FOR DELETE TO PUBLIC USING (is_admin());", - "type": "table.policy", - "operation": "create", - "path": "public.users.admin_only" - }, - { - "sql": "CREATE POLICY \"my-policy\" ON users FOR INSERT TO PUBLIC WITH CHECK ((role)::text = 'user');", - "type": "table.policy", - "operation": "create", - "path": "public.users.my-policy" - }, - { - "sql": "CREATE POLICY \"select\" ON users FOR SELECT TO PUBLIC USING (true);", - "type": "table.policy", - "operation": "create", - "path": "public.users.select" - }, - { - "sql": "CREATE POLICY user_tenant_isolation ON users FOR UPDATE TO PUBLIC USING (tenant_id = (current_setting('app.current_tenant'))::integer);", - "type": "table.policy", - "operation": "create", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "ALTER TABLE orders ENABLE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "create", + "path": "public.orders" + }, + { + "sql": "CREATE POLICY orders_user_access ON orders FOR SELECT TO PUBLIC USING (user_id IN ( SELECT users.id FROM users));", + "type": "table.policy", + "operation": "create", + "path": "public.orders.orders_user_access" + }, + { + "sql": "CREATE POLICY \"UserPolicy\" ON users TO PUBLIC USING (tenant_id = (current_setting('app.current_tenant'))::integer);", + "type": "table.policy", + "operation": "create", + "path": "public.users.UserPolicy" + }, + { + "sql": "CREATE POLICY admin_only ON users FOR DELETE TO PUBLIC USING (is_admin());", + "type": "table.policy", + "operation": "create", + "path": "public.users.admin_only" + }, + { + "sql": "CREATE POLICY \"my-policy\" ON users FOR INSERT TO PUBLIC WITH CHECK ((role)::text = 'user');", + "type": "table.policy", + "operation": "create", + "path": "public.users.my-policy" + }, + { + "sql": "CREATE POLICY \"select\" ON users FOR SELECT TO PUBLIC USING (true);", + "type": "table.policy", + "operation": "create", + "path": "public.users.select" + }, + { + "sql": "CREATE POLICY user_tenant_isolation ON users FOR UPDATE TO PUBLIC USING (tenant_id = (current_setting('app.current_tenant'))::integer);", + "type": "table.policy", + "operation": "create", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/alter_policy_command/plan.json b/testdata/diff/create_policy/alter_policy_command/plan.json index 05c59755..7de76ca0 100644 --- a/testdata/diff/create_policy/alter_policy_command/plan.json +++ b/testdata/diff/create_policy/alter_policy_command/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "829d5dbe4a19b9f96927bfa221e10199ebe0f41d70fdaec9004f1ef8b8c9c73f" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "829d5dbe4a19b9f96927bfa221e10199ebe0f41d70fdaec9004f1ef8b8c9c73f" + }, + "groups": [ { - "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", - "type": "table.policy", - "operation": "drop", - "path": "public.users.user_tenant_isolation" - }, - { - "sql": "CREATE POLICY user_tenant_isolation ON users FOR SELECT TO PUBLIC USING (tenant_id = 1);", - "type": "table.policy", - "operation": "create", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", + "type": "table.policy", + "operation": "drop", + "path": "public.users.user_tenant_isolation" + }, + { + "sql": "CREATE POLICY user_tenant_isolation ON users FOR SELECT TO PUBLIC USING (tenant_id = 1);", + "type": "table.policy", + "operation": "create", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/alter_policy_name/plan.json b/testdata/diff/create_policy/alter_policy_name/plan.json index 67b08249..5a72cd4f 100644 --- a/testdata/diff/create_policy/alter_policy_name/plan.json +++ b/testdata/diff/create_policy/alter_policy_name/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "829d5dbe4a19b9f96927bfa221e10199ebe0f41d70fdaec9004f1ef8b8c9c73f" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "829d5dbe4a19b9f96927bfa221e10199ebe0f41d70fdaec9004f1ef8b8c9c73f" + }, + "groups": [ { - "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", - "type": "table.policy", - "operation": "drop", - "path": "public.users.user_tenant_isolation" - }, - { - "sql": "CREATE POLICY tenant_access_policy ON users TO PUBLIC USING (tenant_id = 1);", - "type": "table.policy", - "operation": "create", - "path": "public.users.tenant_access_policy" + "steps": [ + { + "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", + "type": "table.policy", + "operation": "drop", + "path": "public.users.user_tenant_isolation" + }, + { + "sql": "CREATE POLICY tenant_access_policy ON users TO PUBLIC USING (tenant_id = 1);", + "type": "table.policy", + "operation": "create", + "path": "public.users.tenant_access_policy" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/alter_policy_roles/plan.json b/testdata/diff/create_policy/alter_policy_roles/plan.json index fa4763c2..6a5cdafa 100644 --- a/testdata/diff/create_policy/alter_policy_roles/plan.json +++ b/testdata/diff/create_policy/alter_policy_roles/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ca023a9b3035810c567ac9399fe047ac2a2ac322d00351fe13e143d26bad1b9b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ca023a9b3035810c567ac9399fe047ac2a2ac322d00351fe13e143d26bad1b9b" + }, + "groups": [ { - "sql": "ALTER POLICY user_tenant_isolation ON users TO testuser;", - "type": "table.policy", - "operation": "alter", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "ALTER POLICY user_tenant_isolation ON users TO testuser;", + "type": "table.policy", + "operation": "alter", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/alter_policy_using/plan.json b/testdata/diff/create_policy/alter_policy_using/plan.json index 5ddd4551..0e21333e 100644 --- a/testdata/diff/create_policy/alter_policy_using/plan.json +++ b/testdata/diff/create_policy/alter_policy_using/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "d7858c54fa0b38bfa63c613f6bba0eaaa5827388d53e4548bd9ff419ea357046" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "d7858c54fa0b38bfa63c613f6bba0eaaa5827388d53e4548bd9ff419ea357046" + }, + "groups": [ { - "sql": "ALTER POLICY user_tenant_isolation ON users USING (tenant_id = 2);", - "type": "table.policy", - "operation": "alter", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "ALTER POLICY user_tenant_isolation ON users USING (tenant_id = 2);", + "type": "table.policy", + "operation": "alter", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/disable_rls/plan.json b/testdata/diff/create_policy/disable_rls/plan.json index 6725cf51..05c92c2c 100644 --- a/testdata/diff/create_policy/disable_rls/plan.json +++ b/testdata/diff/create_policy/disable_rls/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ca023a9b3035810c567ac9399fe047ac2a2ac322d00351fe13e143d26bad1b9b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ca023a9b3035810c567ac9399fe047ac2a2ac322d00351fe13e143d26bad1b9b" + }, + "groups": [ { - "sql": "ALTER TABLE users DISABLE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "drop", - "path": "public.users" - }, - { - "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", - "type": "table.policy", - "operation": "drop", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "ALTER TABLE users DISABLE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "drop", + "path": "public.users" + }, + { + "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", + "type": "table.policy", + "operation": "drop", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/drop_policy/plan.json b/testdata/diff/create_policy/drop_policy/plan.json index 3cf00cbe..7d6b47d6 100644 --- a/testdata/diff/create_policy/drop_policy/plan.json +++ b/testdata/diff/create_policy/drop_policy/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ca023a9b3035810c567ac9399fe047ac2a2ac322d00351fe13e143d26bad1b9b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ca023a9b3035810c567ac9399fe047ac2a2ac322d00351fe13e143d26bad1b9b" + }, + "groups": [ { - "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", - "type": "table.policy", - "operation": "drop", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "DROP POLICY IF EXISTS user_tenant_isolation ON users;", + "type": "table.policy", + "operation": "drop", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/enable_rls/plan.json b/testdata/diff/create_policy/enable_rls/plan.json index a4d21a85..740b6244 100644 --- a/testdata/diff/create_policy/enable_rls/plan.json +++ b/testdata/diff/create_policy/enable_rls/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "bc1c9e0c7cb35d93e4a07f8f1490f1fa8ca27e9f7de430d271ab0caa7b4e1690" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "bc1c9e0c7cb35d93e4a07f8f1490f1fa8ca27e9f7de430d271ab0caa7b4e1690" + }, + "groups": [ { - "sql": "ALTER TABLE users ENABLE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "create", - "path": "public.users" - }, - { - "sql": "CREATE POLICY user_tenant_isolation ON users TO PUBLIC USING (tenant_id = 1);", - "type": "table.policy", - "operation": "create", - "path": "public.users.user_tenant_isolation" + "steps": [ + { + "sql": "ALTER TABLE users ENABLE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "create", + "path": "public.users" + }, + { + "sql": "CREATE POLICY user_tenant_isolation ON users TO PUBLIC USING (tenant_id = 1);", + "type": "table.policy", + "operation": "create", + "path": "public.users.user_tenant_isolation" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/force_rls/plan.json b/testdata/diff/create_policy/force_rls/plan.json index acf266ae..6ea7fdad 100644 --- a/testdata/diff/create_policy/force_rls/plan.json +++ b/testdata/diff/create_policy/force_rls/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "4301e0e8cbfb9465b41874d81220ea76d6b7696c79dd0f79b94fee7cb75ca372" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "4301e0e8cbfb9465b41874d81220ea76d6b7696c79dd0f79b94fee7cb75ca372" + }, + "groups": [ { - "sql": "ALTER TABLE users FORCE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "alter", - "path": "public.users" + "steps": [ + { + "sql": "ALTER TABLE users FORCE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "alter", + "path": "public.users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/issue_377_nested_function_in_policy/plan.json b/testdata/diff/create_policy/issue_377_nested_function_in_policy/plan.json index 30b458ed..486fb13b 100644 --- a/testdata/diff/create_policy/issue_377_nested_function_in_policy/plan.json +++ b/testdata/diff/create_policy/issue_377_nested_function_in_policy/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e5af492401964f08c79bb81cc010968b0ed2bd2f12081e194ed15e579dea2aff" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e5af492401964f08c79bb81cc010968b0ed2bd2f12081e194ed15e579dea2aff" + }, + "groups": [ { - "sql": "ALTER TABLE projects ADD COLUMN description text;", - "type": "table.column", - "operation": "create", - "path": "public.projects.description" + "steps": [ + { + "sql": "ALTER TABLE projects ADD COLUMN description text;", + "type": "table.column", + "operation": "create", + "path": "public.projects.description" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_policy/remove_force_rls/plan.json b/testdata/diff/create_policy/remove_force_rls/plan.json index 4cace905..a244442e 100644 --- a/testdata/diff/create_policy/remove_force_rls/plan.json +++ b/testdata/diff/create_policy/remove_force_rls/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7c4d36f4b642982defe19b82256148413f32e6dd9c9d11bfe350bf0beb2375e3" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7c4d36f4b642982defe19b82256148413f32e6dd9c9d11bfe350bf0beb2375e3" + }, + "groups": [ { - "sql": "ALTER TABLE users NO FORCE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "alter", - "path": "public.users" + "steps": [ + { + "sql": "ALTER TABLE users NO FORCE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "alter", + "path": "public.users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_procedure/add_procedure/plan.json b/testdata/diff/create_procedure/add_procedure/plan.json index 91da5056..34ef25c5 100644 --- a/testdata/diff/create_procedure/add_procedure/plan.json +++ b/testdata/diff/create_procedure/add_procedure/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE PROCEDURE example_procedure(\n IN input_value integer,\n OUT output_value integer\n)\nLANGUAGE plpgsql\nAS $$\nBEGIN\n RAISE NOTICE 'Input value is: %', input_value;\n output_value := input_value + 1;\nEND;\n$$;", - "type": "procedure", - "operation": "create", - "path": "public.example_procedure" - }, - { - "sql": "CREATE OR REPLACE PROCEDURE validate_input(\n IN input_value integer\n)\nLANGUAGE sql\nBEGIN ATOMIC\n SELECT (input_value * 2);\nEND;", - "type": "procedure", - "operation": "create", - "path": "public.validate_input" + "steps": [ + { + "sql": "CREATE OR REPLACE PROCEDURE example_procedure(\n IN input_value integer,\n OUT output_value integer\n)\nLANGUAGE plpgsql\nAS $$\nBEGIN\n RAISE NOTICE 'Input value is: %', input_value;\n output_value := input_value + 1;\nEND;\n$$;", + "type": "procedure", + "operation": "create", + "path": "public.example_procedure" + }, + { + "sql": "CREATE OR REPLACE PROCEDURE validate_input(\n IN input_value integer\n)\nLANGUAGE sql\nBEGIN ATOMIC\n SELECT (input_value * 2);\nEND;", + "type": "procedure", + "operation": "create", + "path": "public.validate_input" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_procedure/alter_procedure/plan.json b/testdata/diff/create_procedure/alter_procedure/plan.json index 257eb455..a5bb382e 100644 --- a/testdata/diff/create_procedure/alter_procedure/plan.json +++ b/testdata/diff/create_procedure/alter_procedure/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e4687125bf0145e37b9703d74c5cf09f0e69733284549b1f28498afd7753a102" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e4687125bf0145e37b9703d74c5cf09f0e69733284549b1f28498afd7753a102" + }, + "groups": [ { - "sql": "DROP PROCEDURE IF EXISTS process_payment(IN order_id integer, IN amount numeric);", - "type": "procedure", - "operation": "drop", - "path": "public.process_payment" - }, - { - "sql": "CREATE OR REPLACE PROCEDURE process_payment(\n IN order_id integer,\n IN amount numeric,\n IN payment_method text DEFAULT 'credit_card'\n)\nLANGUAGE plpgsql\nAS $$\nBEGIN\n UPDATE orders \n SET status = 'paid', \n payment_amount = amount,\n payment_method = payment_method,\n processed_at = NOW()\n WHERE id = order_id;\n \n INSERT INTO payment_history (order_id, amount, method, processed_at)\n VALUES (order_id, amount, payment_method, NOW());\n \n COMMIT;\nEND;\n$$;", - "type": "procedure", - "operation": "create", - "path": "public.process_payment" + "steps": [ + { + "sql": "DROP PROCEDURE IF EXISTS process_payment(IN order_id integer, IN amount numeric);", + "type": "procedure", + "operation": "drop", + "path": "public.process_payment" + }, + { + "sql": "CREATE OR REPLACE PROCEDURE process_payment(\n IN order_id integer,\n IN amount numeric,\n IN payment_method text DEFAULT 'credit_card'\n)\nLANGUAGE plpgsql\nAS $$\nBEGIN\n UPDATE orders \n SET status = 'paid', \n payment_amount = amount,\n payment_method = payment_method,\n processed_at = NOW()\n WHERE id = order_id;\n \n INSERT INTO payment_history (order_id, amount, method, processed_at)\n VALUES (order_id, amount, payment_method, NOW());\n \n COMMIT;\nEND;\n$$;", + "type": "procedure", + "operation": "create", + "path": "public.process_payment" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_procedure/drop_procedure/plan.json b/testdata/diff/create_procedure/drop_procedure/plan.json index cbc4aafa..75bbb953 100644 --- a/testdata/diff/create_procedure/drop_procedure/plan.json +++ b/testdata/diff/create_procedure/drop_procedure/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "da1177810da568202dbdcc89a5b9979ff01c589ddcd7618f76f8debccb5f2283" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "da1177810da568202dbdcc89a5b9979ff01c589ddcd7618f76f8debccb5f2283" + }, + "groups": [ { - "sql": "DROP PROCEDURE IF EXISTS cleanup_old_data(IN days_old integer);", - "type": "procedure", - "operation": "drop", - "path": "public.cleanup_old_data" + "steps": [ + { + "sql": "DROP PROCEDURE IF EXISTS cleanup_old_data(IN days_old integer);", + "type": "procedure", + "operation": "drop", + "path": "public.cleanup_old_data" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_sequence/add_sequence/plan.json b/testdata/diff/create_sequence/add_sequence/plan.json index 81833d98..c28efc5c 100644 --- a/testdata/diff/create_sequence/add_sequence/plan.json +++ b/testdata/diff/create_sequence/add_sequence/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE SEQUENCE IF NOT EXISTS big_seq AS bigint MAXVALUE 1000000 CACHE 10;", - "type": "sequence", - "operation": "create", - "path": "public.big_seq" - }, - { - "sql": "CREATE SEQUENCE IF NOT EXISTS int_seq AS integer START WITH 100 CACHE 5;", - "type": "sequence", - "operation": "create", - "path": "public.int_seq" - }, - { - "sql": "CREATE SEQUENCE IF NOT EXISTS order_seq INCREMENT BY 10 CYCLE;", - "type": "sequence", - "operation": "create", - "path": "public.order_seq" - }, - { - "sql": "CREATE SEQUENCE IF NOT EXISTS small_seq AS smallint CACHE 20;", - "type": "sequence", - "operation": "create", - "path": "public.small_seq" - }, - { - "sql": "CREATE SEQUENCE IF NOT EXISTS user_id_seq;", - "type": "sequence", - "operation": "create", - "path": "public.user_id_seq" + "steps": [ + { + "sql": "CREATE SEQUENCE IF NOT EXISTS big_seq AS bigint MAXVALUE 1000000 CACHE 10;", + "type": "sequence", + "operation": "create", + "path": "public.big_seq" + }, + { + "sql": "CREATE SEQUENCE IF NOT EXISTS int_seq AS integer START WITH 100 CACHE 5;", + "type": "sequence", + "operation": "create", + "path": "public.int_seq" + }, + { + "sql": "CREATE SEQUENCE IF NOT EXISTS order_seq INCREMENT BY 10 CYCLE;", + "type": "sequence", + "operation": "create", + "path": "public.order_seq" + }, + { + "sql": "CREATE SEQUENCE IF NOT EXISTS small_seq AS smallint CACHE 20;", + "type": "sequence", + "operation": "create", + "path": "public.small_seq" + }, + { + "sql": "CREATE SEQUENCE IF NOT EXISTS user_id_seq;", + "type": "sequence", + "operation": "create", + "path": "public.user_id_seq" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_sequence/alter_sequence/plan.json b/testdata/diff/create_sequence/alter_sequence/plan.json index a7650335..b819bed9 100644 --- a/testdata/diff/create_sequence/alter_sequence/plan.json +++ b/testdata/diff/create_sequence/alter_sequence/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "a6d455e0695bf779e36a81592e6b9e2cbb29cd0d7778dc021d19ed76feacfe53" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "a6d455e0695bf779e36a81592e6b9e2cbb29cd0d7778dc021d19ed76feacfe53" + }, + "groups": [ { - "sql": "ALTER SEQUENCE user_id_seq INCREMENT BY 5 CYCLE;", - "type": "sequence", - "operation": "alter", - "path": "public.user_id_seq" + "steps": [ + { + "sql": "ALTER SEQUENCE user_id_seq INCREMENT BY 5 CYCLE;", + "type": "sequence", + "operation": "alter", + "path": "public.user_id_seq" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_sequence/drop_sequence/plan.json b/testdata/diff/create_sequence/drop_sequence/plan.json index 6e363821..037c99e8 100644 --- a/testdata/diff/create_sequence/drop_sequence/plan.json +++ b/testdata/diff/create_sequence/drop_sequence/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "95fd2421269a8bc861554d6ebb648bdc3bfc77158848743977e233f933391cfd" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "95fd2421269a8bc861554d6ebb648bdc3bfc77158848743977e233f933391cfd" + }, + "groups": [ { - "sql": "DROP SEQUENCE IF EXISTS order_seq CASCADE;", - "type": "sequence", - "operation": "drop", - "path": "public.order_seq" + "steps": [ + { + "sql": "DROP SEQUENCE IF EXISTS order_seq CASCADE;", + "type": "sequence", + "operation": "drop", + "path": "public.order_seq" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_check/plan.json b/testdata/diff/create_table/add_check/plan.json index 9843b49a..9d99aadd 100644 --- a/testdata/diff/create_table/add_check/plan.json +++ b/testdata/diff/create_table/add_check/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "3b417f67f91d6a00681d82fa91a44bc786c781d4e45fa945af95bd8185f2e750" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "3b417f67f91d6a00681d82fa91a44bc786c781d4e45fa945af95bd8185f2e750" + }, + "groups": [ { - "sql": "ALTER TABLE code\nADD CONSTRAINT code_check CHECK (code > 0 AND code < 255) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.code.code_check" - }, - { - "sql": "ALTER TABLE code VALIDATE CONSTRAINT code_check;", - "type": "table.constraint", - "operation": "create", - "path": "public.code.code_check" + "steps": [ + { + "sql": "ALTER TABLE code\nADD CONSTRAINT code_check CHECK (code > 0 AND code < 255) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.code.code_check" + }, + { + "sql": "ALTER TABLE code VALIDATE CONSTRAINT code_check;", + "type": "table.constraint", + "operation": "create", + "path": "public.code.code_check" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_array/plan.json b/testdata/diff/create_table/add_column_array/plan.json index 48cd7c54..37b8246e 100644 --- a/testdata/diff/create_table/add_column_array/plan.json +++ b/testdata/diff/create_table/add_column_array/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7b7158d4b11af7877f0d41ff337c9cd0b5b856665b689d9bda5ffde771eca15b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7b7158d4b11af7877f0d41ff337c9cd0b5b856665b689d9bda5ffde771eca15b" + }, + "groups": [ { - "sql": "ALTER TABLE articles ADD COLUMN tags text[];", - "type": "table.column", - "operation": "create", - "path": "public.articles.tags" - }, - { - "sql": "ALTER TABLE articles ADD COLUMN statuses status[];", - "type": "table.column", - "operation": "create", - "path": "public.articles.statuses" + "steps": [ + { + "sql": "ALTER TABLE articles ADD COLUMN tags text[];", + "type": "table.column", + "operation": "create", + "path": "public.articles.tags" + }, + { + "sql": "ALTER TABLE articles ADD COLUMN statuses status[];", + "type": "table.column", + "operation": "create", + "path": "public.articles.statuses" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_boolean/plan.json b/testdata/diff/create_table/add_column_boolean/plan.json index c0f1676c..8760b877 100644 --- a/testdata/diff/create_table/add_column_boolean/plan.json +++ b/testdata/diff/create_table/add_column_boolean/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "fb883747108147290c30c7102443c661074dde58eb54757980a466c99fc2547e" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "fb883747108147290c30c7102443c661074dde58eb54757980a466c99fc2547e" + }, + "groups": [ { - "sql": "ALTER TABLE orders ADD COLUMN is_paid boolean;", - "type": "table.column", - "operation": "create", - "path": "public.orders.is_paid" + "steps": [ + { + "sql": "ALTER TABLE orders ADD COLUMN is_paid boolean;", + "type": "table.column", + "operation": "create", + "path": "public.orders.is_paid" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_cross_schema_custom_type/plan.json b/testdata/diff/create_table/add_column_cross_schema_custom_type/plan.json index 215bca72..fa6321ac 100644 --- a/testdata/diff/create_table/add_column_cross_schema_custom_type/plan.json +++ b/testdata/diff/create_table/add_column_cross_schema_custom_type/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "0d7287d9b46eed0ab41e966b5101db966c24ad8f09f5b52f20c5c04a2132ea64" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "0d7287d9b46eed0ab41e966b5101db966c24ad8f09f5b52f20c5c04a2132ea64" + }, + "groups": [ { - "sql": "ALTER TABLE users ADD COLUMN fqdn citext NOT NULL;", - "type": "table.column", - "operation": "create", - "path": "public.users.fqdn" - }, - { - "sql": "ALTER TABLE users ADD COLUMN metadata utils.hstore;", - "type": "table.column", - "operation": "create", - "path": "public.users.metadata" - }, - { - "sql": "ALTER TABLE users ADD COLUMN description utils.custom_text;", - "type": "table.column", - "operation": "create", - "path": "public.users.description" - }, - { - "sql": "ALTER TABLE users ADD COLUMN status utils.custom_enum DEFAULT 'active'::utils.custom_enum;", - "type": "table.column", - "operation": "create", - "path": "public.users.status" + "steps": [ + { + "sql": "ALTER TABLE users ADD COLUMN fqdn citext NOT NULL;", + "type": "table.column", + "operation": "create", + "path": "public.users.fqdn" + }, + { + "sql": "ALTER TABLE users ADD COLUMN metadata utils.hstore;", + "type": "table.column", + "operation": "create", + "path": "public.users.metadata" + }, + { + "sql": "ALTER TABLE users ADD COLUMN description utils.custom_text;", + "type": "table.column", + "operation": "create", + "path": "public.users.description" + }, + { + "sql": "ALTER TABLE users ADD COLUMN status utils.custom_enum DEFAULT 'active'::utils.custom_enum;", + "type": "table.column", + "operation": "create", + "path": "public.users.status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_default/plan.json b/testdata/diff/create_table/add_column_default/plan.json index 4f46bd78..4b2203ce 100644 --- a/testdata/diff/create_table/add_column_default/plan.json +++ b/testdata/diff/create_table/add_column_default/plan.json @@ -2,67 +2,71 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "2f64297440983715c85dd0e94e5e6990670d02d31d18fef2170a27df2e2176dc" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "2f64297440983715c85dd0e94e5e6990670d02d31d18fef2170a27df2e2176dc" + }, + "groups": [ { - "sql": "ALTER TABLE events ADD COLUMN status text DEFAULT 'active' NOT NULL;", - "type": "table.column", - "operation": "create", - "path": "public.events.status" - }, - { - "sql": "ALTER TABLE events ADD COLUMN priority integer DEFAULT 0;", - "type": "table.column", - "operation": "create", - "path": "public.events.priority" - }, - { - "sql": "ALTER TABLE events ADD COLUMN score numeric DEFAULT 0.0;", - "type": "table.column", - "operation": "create", - "path": "public.events.score" - }, - { - "sql": "ALTER TABLE events ADD COLUMN is_active boolean DEFAULT true;", - "type": "table.column", - "operation": "create", - "path": "public.events.is_active" - }, - { - "sql": "ALTER TABLE events ADD COLUMN created_at timestamp DEFAULT CURRENT_TIMESTAMP;", - "type": "table.column", - "operation": "create", - "path": "public.events.created_at" - }, - { - "sql": "ALTER TABLE events ADD COLUMN updated_at timestamp DEFAULT now();", - "type": "table.column", - "operation": "create", - "path": "public.events.updated_at" - }, - { - "sql": "ALTER TABLE events ADD COLUMN config jsonb DEFAULT '{}';", - "type": "table.column", - "operation": "create", - "path": "public.events.config" - }, - { - "sql": "ALTER TABLE events ADD COLUMN tags text[] DEFAULT '{}';", - "type": "table.column", - "operation": "create", - "path": "public.events.tags" - }, - { - "sql": "ALTER TABLE events ADD COLUMN created_at_utc timestamp DEFAULT (now() AT TIME ZONE 'utc') NOT NULL;", - "type": "table.column", - "operation": "create", - "path": "public.events.created_at_utc" + "steps": [ + { + "sql": "ALTER TABLE events ADD COLUMN status text DEFAULT 'active' NOT NULL;", + "type": "table.column", + "operation": "create", + "path": "public.events.status" + }, + { + "sql": "ALTER TABLE events ADD COLUMN priority integer DEFAULT 0;", + "type": "table.column", + "operation": "create", + "path": "public.events.priority" + }, + { + "sql": "ALTER TABLE events ADD COLUMN score numeric DEFAULT 0.0;", + "type": "table.column", + "operation": "create", + "path": "public.events.score" + }, + { + "sql": "ALTER TABLE events ADD COLUMN is_active boolean DEFAULT true;", + "type": "table.column", + "operation": "create", + "path": "public.events.is_active" + }, + { + "sql": "ALTER TABLE events ADD COLUMN created_at timestamp DEFAULT CURRENT_TIMESTAMP;", + "type": "table.column", + "operation": "create", + "path": "public.events.created_at" + }, + { + "sql": "ALTER TABLE events ADD COLUMN updated_at timestamp DEFAULT now();", + "type": "table.column", + "operation": "create", + "path": "public.events.updated_at" + }, + { + "sql": "ALTER TABLE events ADD COLUMN config jsonb DEFAULT '{}';", + "type": "table.column", + "operation": "create", + "path": "public.events.config" + }, + { + "sql": "ALTER TABLE events ADD COLUMN tags text[] DEFAULT '{}';", + "type": "table.column", + "operation": "create", + "path": "public.events.tags" + }, + { + "sql": "ALTER TABLE events ADD COLUMN created_at_utc timestamp DEFAULT (now() AT TIME ZONE 'utc') NOT NULL;", + "type": "table.column", + "operation": "create", + "path": "public.events.created_at_utc" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_generated/plan.json b/testdata/diff/create_table/add_column_generated/plan.json index b1b15f6c..5a4a191a 100644 --- a/testdata/diff/create_table/add_column_generated/plan.json +++ b/testdata/diff/create_table/add_column_generated/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "6cda4aebfc751d3647bfc3516385820553a2268952b54c3bc8a378ac774d26af" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "6cda4aebfc751d3647bfc3516385820553a2268952b54c3bc8a378ac774d26af" + }, + "groups": [ { - "sql": "ALTER TABLE merge_request\nADD COLUMN iid integer GENERATED ALWAYS AS (((data ->> 'iid'::text))::integer) STORED CONSTRAINT pk_merge_request_iid PRIMARY KEY;", - "type": "table.column", - "operation": "create", - "path": "public.merge_request.iid" - }, - { - "sql": "ALTER TABLE merge_request ADD COLUMN title text GENERATED ALWAYS AS ((data ->> 'title'::text)) STORED;", - "type": "table.column", - "operation": "create", - "path": "public.merge_request.title" - }, - { - "sql": "ALTER TABLE merge_request ADD COLUMN cleaned_title varchar(255) GENERATED ALWAYS AS (lower((data ->> 'title'::text))) STORED NOT NULL;", - "type": "table.column", - "operation": "create", - "path": "public.merge_request.cleaned_title" + "steps": [ + { + "sql": "ALTER TABLE merge_request\nADD COLUMN iid integer GENERATED ALWAYS AS (((data ->> 'iid'::text))::integer) STORED CONSTRAINT pk_merge_request_iid PRIMARY KEY;", + "type": "table.column", + "operation": "create", + "path": "public.merge_request.iid" + }, + { + "sql": "ALTER TABLE merge_request ADD COLUMN title text GENERATED ALWAYS AS ((data ->> 'title'::text)) STORED;", + "type": "table.column", + "operation": "create", + "path": "public.merge_request.title" + }, + { + "sql": "ALTER TABLE merge_request ADD COLUMN cleaned_title varchar(255) GENERATED ALWAYS AS (lower((data ->> 'title'::text))) STORED NOT NULL;", + "type": "table.column", + "operation": "create", + "path": "public.merge_request.cleaned_title" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_identity/plan.json b/testdata/diff/create_table/add_column_identity/plan.json index 1b697f29..89a6205d 100644 --- a/testdata/diff/create_table/add_column_identity/plan.json +++ b/testdata/diff/create_table/add_column_identity/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "4223eb048f2842a48f628dc3c914934d49a272e12db7c30b0e9e8da2c4d8ea82" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "4223eb048f2842a48f628dc3c914934d49a272e12db7c30b0e9e8da2c4d8ea82" + }, + "groups": [ { - "sql": "ALTER TABLE products ADD COLUMN id bigint GENERATED ALWAYS AS IDENTITY;", - "type": "table.column", - "operation": "create", - "path": "public.products.id" + "steps": [ + { + "sql": "ALTER TABLE products ADD COLUMN id bigint GENERATED ALWAYS AS IDENTITY;", + "type": "table.column", + "operation": "create", + "path": "public.products.id" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_integer/plan.json b/testdata/diff/create_table/add_column_integer/plan.json index 39da7873..ad5d2535 100644 --- a/testdata/diff/create_table/add_column_integer/plan.json +++ b/testdata/diff/create_table/add_column_integer/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "4223eb048f2842a48f628dc3c914934d49a272e12db7c30b0e9e8da2c4d8ea82" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "4223eb048f2842a48f628dc3c914934d49a272e12db7c30b0e9e8da2c4d8ea82" + }, + "groups": [ { - "sql": "ALTER TABLE products ADD COLUMN quantity integer;", - "type": "table.column", - "operation": "create", - "path": "public.products.quantity" + "steps": [ + { + "sql": "ALTER TABLE products ADD COLUMN quantity integer;", + "type": "table.column", + "operation": "create", + "path": "public.products.quantity" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_jsonb/plan.json b/testdata/diff/create_table/add_column_jsonb/plan.json index bb7b33c7..57da43bb 100644 --- a/testdata/diff/create_table/add_column_jsonb/plan.json +++ b/testdata/diff/create_table/add_column_jsonb/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "210e07c9faeb5a5172be65242cec049cfc48c6411495e5e600c6066f290714a0" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "210e07c9faeb5a5172be65242cec049cfc48c6411495e5e600c6066f290714a0" + }, + "groups": [ { - "sql": "ALTER TABLE documents ADD COLUMN metadata jsonb;", - "type": "table.column", - "operation": "create", - "path": "public.documents.metadata" + "steps": [ + { + "sql": "ALTER TABLE documents ADD COLUMN metadata jsonb;", + "type": "table.column", + "operation": "create", + "path": "public.documents.metadata" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_numeric/plan.json b/testdata/diff/create_table/add_column_numeric/plan.json index 72991f84..157580d8 100644 --- a/testdata/diff/create_table/add_column_numeric/plan.json +++ b/testdata/diff/create_table/add_column_numeric/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "af34cb4dcbb9213ae95beb30b519da9273bee69285edb598f85a91b3f7a6a3f5" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "af34cb4dcbb9213ae95beb30b519da9273bee69285edb598f85a91b3f7a6a3f5" + }, + "groups": [ { - "sql": "ALTER TABLE transactions ADD COLUMN amount numeric(15,4);", - "type": "table.column", - "operation": "create", - "path": "public.transactions.amount" + "steps": [ + { + "sql": "ALTER TABLE transactions ADD COLUMN amount numeric(15,4);", + "type": "table.column", + "operation": "create", + "path": "public.transactions.amount" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_serial/plan.json b/testdata/diff/create_table/add_column_serial/plan.json index 12171e4e..eea688fc 100644 --- a/testdata/diff/create_table/add_column_serial/plan.json +++ b/testdata/diff/create_table/add_column_serial/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "438e37c99fc4d0f72c8e7c11cfd85e1c2312835ef564a94c93d0793802df9b08" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "438e37c99fc4d0f72c8e7c11cfd85e1c2312835ef564a94c93d0793802df9b08" + }, + "groups": [ { - "sql": "ALTER TABLE employees ADD COLUMN employee_id serial;", - "type": "table.column", - "operation": "create", - "path": "public.employees.employee_id" + "steps": [ + { + "sql": "ALTER TABLE employees ADD COLUMN employee_id serial;", + "type": "table.column", + "operation": "create", + "path": "public.employees.employee_id" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_text/plan.json b/testdata/diff/create_table/add_column_text/plan.json index 3c9759ca..3cc368be 100644 --- a/testdata/diff/create_table/add_column_text/plan.json +++ b/testdata/diff/create_table/add_column_text/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "04591b0a2b787055914f690f4f6c8d5c12bc6b4455b61bc8687625b00c8166d9" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "04591b0a2b787055914f690f4f6c8d5c12bc6b4455b61bc8687625b00c8166d9" + }, + "groups": [ { - "sql": "ALTER TABLE users ADD COLUMN email text;", - "type": "table.column", - "operation": "create", - "path": "public.users.email" + "steps": [ + { + "sql": "ALTER TABLE users ADD COLUMN email text;", + "type": "table.column", + "operation": "create", + "path": "public.users.email" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_timestamp/plan.json b/testdata/diff/create_table/add_column_timestamp/plan.json index cf94343b..525f5719 100644 --- a/testdata/diff/create_table/add_column_timestamp/plan.json +++ b/testdata/diff/create_table/add_column_timestamp/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "2f64297440983715c85dd0e94e5e6990670d02d31d18fef2170a27df2e2176dc" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "2f64297440983715c85dd0e94e5e6990670d02d31d18fef2170a27df2e2176dc" + }, + "groups": [ { - "sql": "ALTER TABLE events ADD COLUMN created_at timestamptz;", - "type": "table.column", - "operation": "create", - "path": "public.events.created_at" + "steps": [ + { + "sql": "ALTER TABLE events ADD COLUMN created_at timestamptz;", + "type": "table.column", + "operation": "create", + "path": "public.events.created_at" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_uuid/plan.json b/testdata/diff/create_table/add_column_uuid/plan.json index 5531cb41..e338d336 100644 --- a/testdata/diff/create_table/add_column_uuid/plan.json +++ b/testdata/diff/create_table/add_column_uuid/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "a030caa57fd73850952135aeee1017103f24f248ca134a745c37c1f845b61946" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "a030caa57fd73850952135aeee1017103f24f248ca134a745c37c1f845b61946" + }, + "groups": [ { - "sql": "ALTER TABLE sessions ADD COLUMN token uuid;", - "type": "table.column", - "operation": "create", - "path": "public.sessions.token" + "steps": [ + { + "sql": "ALTER TABLE sessions ADD COLUMN token uuid;", + "type": "table.column", + "operation": "create", + "path": "public.sessions.token" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_column_varchar/plan.json b/testdata/diff/create_table/add_column_varchar/plan.json index 27b5999a..960b1091 100644 --- a/testdata/diff/create_table/add_column_varchar/plan.json +++ b/testdata/diff/create_table/add_column_varchar/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "3638e006d46b96e8180e757cfe55f3fca54d1bb7c827893e3082393394f71606" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "3638e006d46b96e8180e757cfe55f3fca54d1bb7c827893e3082393394f71606" + }, + "groups": [ { - "sql": "ALTER TABLE customers ADD COLUMN phone varchar(20);", - "type": "table.column", - "operation": "create", - "path": "public.customers.phone" + "steps": [ + { + "sql": "ALTER TABLE customers ADD COLUMN phone varchar(20);", + "type": "table.column", + "operation": "create", + "path": "public.customers.phone" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_default_not_null/plan.json b/testdata/diff/create_table/add_default_not_null/plan.json index 5fa07c00..0ef0e862 100644 --- a/testdata/diff/create_table/add_default_not_null/plan.json +++ b/testdata/diff/create_table/add_default_not_null/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "01cf16fec8a51600f8cb61a4e18fb339de480aaa256c4203a10ce9448584d25d" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "01cf16fec8a51600f8cb61a4e18fb339de480aaa256c4203a10ce9448584d25d" + }, + "groups": [ { - "sql": "ALTER TABLE people ADD CONSTRAINT created_at_not_null CHECK (created_at IS NOT NULL) NOT VALID;", - "type": "table.column", - "operation": "alter", - "path": "public.people.created_at" - }, - { - "sql": "ALTER TABLE people VALIDATE CONSTRAINT created_at_not_null;", - "type": "table.column", - "operation": "alter", - "path": "public.people.created_at" - }, - { - "sql": "ALTER TABLE people ALTER COLUMN created_at SET NOT NULL;", - "type": "table.column", - "operation": "alter", - "path": "public.people.created_at" - }, - { - "sql": "ALTER TABLE people DROP CONSTRAINT created_at_not_null;", - "type": "table.column", - "operation": "alter", - "path": "public.people.created_at" - }, - { - "sql": "ALTER TABLE people ALTER COLUMN created_at SET DEFAULT now();", - "type": "table.column", - "operation": "alter", - "path": "public.people.created_at" + "steps": [ + { + "sql": "ALTER TABLE people ADD CONSTRAINT created_at_not_null CHECK (created_at IS NOT NULL) NOT VALID;", + "type": "table.column", + "operation": "alter", + "path": "public.people.created_at" + }, + { + "sql": "ALTER TABLE people VALIDATE CONSTRAINT created_at_not_null;", + "type": "table.column", + "operation": "alter", + "path": "public.people.created_at" + }, + { + "sql": "ALTER TABLE people ALTER COLUMN created_at SET NOT NULL;", + "type": "table.column", + "operation": "alter", + "path": "public.people.created_at" + }, + { + "sql": "ALTER TABLE people DROP CONSTRAINT created_at_not_null;", + "type": "table.column", + "operation": "alter", + "path": "public.people.created_at" + }, + { + "sql": "ALTER TABLE people ALTER COLUMN created_at SET DEFAULT now();", + "type": "table.column", + "operation": "alter", + "path": "public.people.created_at" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_fk/plan.json b/testdata/diff/create_table/add_fk/plan.json index 37357a83..c8dab9bb 100644 --- a/testdata/diff/create_table/add_fk/plan.json +++ b/testdata/diff/create_table/add_fk/plan.json @@ -2,145 +2,149 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "9b85e377059726a1c480a565f28fa2caf1e39ba1512de5c1adc8efbabc669dc1" - }, - "groups": [ - { - "steps": [ - { - "sql": "ALTER TABLE books\nADD CONSTRAINT books_author_id_fkey FOREIGN KEY (author_id) REFERENCES authors (id) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.books.books_author_id_fkey" - }, - { - "sql": "ALTER TABLE books VALIDATE CONSTRAINT books_author_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.books.books_author_id_fkey" - }, - { - "sql": "ALTER TABLE employees\nADD CONSTRAINT employees_department_id_fkey FOREIGN KEY (department_id) REFERENCES departments (id) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.employees.employees_department_id_fkey" - }, - { - "sql": "ALTER TABLE employees VALIDATE CONSTRAINT employees_department_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.employees.employees_department_id_fkey" - }, - { - "sql": "ALTER TABLE nodes\nADD CONSTRAINT nodes_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES nodes (id) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.nodes.nodes_parent_id_fkey" - }, - { - "sql": "ALTER TABLE nodes VALIDATE CONSTRAINT nodes_parent_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.nodes.nodes_parent_id_fkey" - }, - { - "sql": "ALTER TABLE orders\nADD CONSTRAINT orders_customer_id_fkey FOREIGN KEY (customer_id) REFERENCES customers (id) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.orders_customer_id_fkey" - }, - { - "sql": "ALTER TABLE orders VALIDATE CONSTRAINT orders_customer_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.orders_customer_id_fkey" - }, - { - "sql": "ALTER TABLE orders\nADD CONSTRAINT orders_manager_id_fkey FOREIGN KEY (manager_id) REFERENCES managers (id) ON DELETE SET NULL NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.orders_manager_id_fkey" - }, - { - "sql": "ALTER TABLE orders VALIDATE CONSTRAINT orders_manager_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.orders_manager_id_fkey" - }, - { - "sql": "ALTER TABLE orders\nADD CONSTRAINT orders_product_id_fkey FOREIGN KEY (product_id) REFERENCES products (id) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.orders_product_id_fkey" - }, - { - "sql": "ALTER TABLE orders VALIDATE CONSTRAINT orders_product_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.orders_product_id_fkey" - }, - { - "sql": "ALTER TABLE price_adjustments\nADD CONSTRAINT price_adjustments_product_fkey FOREIGN KEY (product_id, PERIOD adjustment_period) REFERENCES price_history (product_id, PERIOD valid_period) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.price_adjustments.price_adjustments_product_fkey" - }, - { - "sql": "ALTER TABLE price_adjustments VALIDATE CONSTRAINT price_adjustments_product_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.price_adjustments.price_adjustments_product_fkey" - }, - { - "sql": "ALTER TABLE products\nADD CONSTRAINT products_category_code_fkey FOREIGN KEY (category_code) REFERENCES categories (code) ON UPDATE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.products.products_category_code_fkey" - }, - { - "sql": "ALTER TABLE products VALIDATE CONSTRAINT products_category_code_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.products.products_category_code_fkey" - }, - { - "sql": "ALTER TABLE projects\nADD CONSTRAINT projects_tenant_id_org_id_fkey FOREIGN KEY (tenant_id, org_id) REFERENCES organizations (tenant_id, org_id) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.projects.projects_tenant_id_org_id_fkey" - }, - { - "sql": "ALTER TABLE projects VALIDATE CONSTRAINT projects_tenant_id_org_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.projects.projects_tenant_id_org_id_fkey" - }, - { - "sql": "ALTER TABLE teams\nADD CONSTRAINT teams_manager_id_fkey FOREIGN KEY (manager_id) REFERENCES managers (id) ON DELETE SET NULL NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.teams.teams_manager_id_fkey" - }, - { - "sql": "ALTER TABLE teams VALIDATE CONSTRAINT teams_manager_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.teams.teams_manager_id_fkey" - }, - { - "sql": "ALTER TABLE user_profiles\nADD CONSTRAINT user_profiles_user_id_fkey FOREIGN KEY (user_id) REFERENCES users (id) DEFERRABLE INITIALLY DEFERRED NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.user_profiles.user_profiles_user_id_fkey" - }, - { - "sql": "ALTER TABLE user_profiles VALIDATE CONSTRAINT user_profiles_user_id_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.user_profiles.user_profiles_user_id_fkey" + "schemas": { + "public": { + "source_fingerprint": { + "hash": "9b85e377059726a1c480a565f28fa2caf1e39ba1512de5c1adc8efbabc669dc1" + }, + "groups": [ + { + "steps": [ + { + "sql": "ALTER TABLE books\nADD CONSTRAINT books_author_id_fkey FOREIGN KEY (author_id) REFERENCES authors (id) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.books.books_author_id_fkey" + }, + { + "sql": "ALTER TABLE books VALIDATE CONSTRAINT books_author_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.books.books_author_id_fkey" + }, + { + "sql": "ALTER TABLE employees\nADD CONSTRAINT employees_department_id_fkey FOREIGN KEY (department_id) REFERENCES departments (id) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.employees.employees_department_id_fkey" + }, + { + "sql": "ALTER TABLE employees VALIDATE CONSTRAINT employees_department_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.employees.employees_department_id_fkey" + }, + { + "sql": "ALTER TABLE nodes\nADD CONSTRAINT nodes_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES nodes (id) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.nodes.nodes_parent_id_fkey" + }, + { + "sql": "ALTER TABLE nodes VALIDATE CONSTRAINT nodes_parent_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.nodes.nodes_parent_id_fkey" + }, + { + "sql": "ALTER TABLE orders\nADD CONSTRAINT orders_customer_id_fkey FOREIGN KEY (customer_id) REFERENCES customers (id) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.orders_customer_id_fkey" + }, + { + "sql": "ALTER TABLE orders VALIDATE CONSTRAINT orders_customer_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.orders_customer_id_fkey" + }, + { + "sql": "ALTER TABLE orders\nADD CONSTRAINT orders_manager_id_fkey FOREIGN KEY (manager_id) REFERENCES managers (id) ON DELETE SET NULL NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.orders_manager_id_fkey" + }, + { + "sql": "ALTER TABLE orders VALIDATE CONSTRAINT orders_manager_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.orders_manager_id_fkey" + }, + { + "sql": "ALTER TABLE orders\nADD CONSTRAINT orders_product_id_fkey FOREIGN KEY (product_id) REFERENCES products (id) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.orders_product_id_fkey" + }, + { + "sql": "ALTER TABLE orders VALIDATE CONSTRAINT orders_product_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.orders_product_id_fkey" + }, + { + "sql": "ALTER TABLE price_adjustments\nADD CONSTRAINT price_adjustments_product_fkey FOREIGN KEY (product_id, PERIOD adjustment_period) REFERENCES price_history (product_id, PERIOD valid_period) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.price_adjustments.price_adjustments_product_fkey" + }, + { + "sql": "ALTER TABLE price_adjustments VALIDATE CONSTRAINT price_adjustments_product_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.price_adjustments.price_adjustments_product_fkey" + }, + { + "sql": "ALTER TABLE products\nADD CONSTRAINT products_category_code_fkey FOREIGN KEY (category_code) REFERENCES categories (code) ON UPDATE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.products.products_category_code_fkey" + }, + { + "sql": "ALTER TABLE products VALIDATE CONSTRAINT products_category_code_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.products.products_category_code_fkey" + }, + { + "sql": "ALTER TABLE projects\nADD CONSTRAINT projects_tenant_id_org_id_fkey FOREIGN KEY (tenant_id, org_id) REFERENCES organizations (tenant_id, org_id) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.projects.projects_tenant_id_org_id_fkey" + }, + { + "sql": "ALTER TABLE projects VALIDATE CONSTRAINT projects_tenant_id_org_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.projects.projects_tenant_id_org_id_fkey" + }, + { + "sql": "ALTER TABLE teams\nADD CONSTRAINT teams_manager_id_fkey FOREIGN KEY (manager_id) REFERENCES managers (id) ON DELETE SET NULL NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.teams.teams_manager_id_fkey" + }, + { + "sql": "ALTER TABLE teams VALIDATE CONSTRAINT teams_manager_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.teams.teams_manager_id_fkey" + }, + { + "sql": "ALTER TABLE user_profiles\nADD CONSTRAINT user_profiles_user_id_fkey FOREIGN KEY (user_id) REFERENCES users (id) DEFERRABLE INITIALLY DEFERRED NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.user_profiles.user_profiles_user_id_fkey" + }, + { + "sql": "ALTER TABLE user_profiles VALIDATE CONSTRAINT user_profiles_user_id_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.user_profiles.user_profiles_user_id_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_pk/plan.json b/testdata/diff/create_table/add_pk/plan.json index 14a20357..fa8c7d4c 100644 --- a/testdata/diff/create_table/add_pk/plan.json +++ b/testdata/diff/create_table/add_pk/plan.json @@ -2,55 +2,59 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "aa2ee5caf6ab616cedd6523e73c94e4acadf4435da3f5f31f9a89c08ac17b289" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "aa2ee5caf6ab616cedd6523e73c94e4acadf4435da3f5f31f9a89c08ac17b289" + }, + "groups": [ { - "sql": "ALTER TABLE categories\nADD COLUMN code text CONSTRAINT categories_pkey PRIMARY KEY;", - "type": "table.column", - "operation": "create", - "path": "public.categories.code" - }, - { - "sql": "ALTER TABLE orders\nADD COLUMN id serial CONSTRAINT orders_pkey PRIMARY KEY;", - "type": "table.column", - "operation": "create", - "path": "public.orders.id" - }, - { - "sql": "ALTER TABLE products\nADD COLUMN id integer GENERATED ALWAYS AS IDENTITY CONSTRAINT products_pkey PRIMARY KEY;", - "type": "table.column", - "operation": "create", - "path": "public.products.id" - }, - { - "sql": "ALTER TABLE reservations\nADD CONSTRAINT reservations_pkey PRIMARY KEY (id, valid_period WITHOUT OVERLAPS);", - "type": "table.constraint", - "operation": "create", - "path": "public.reservations.reservations_pkey" - }, - { - "sql": "ALTER TABLE sessions\nADD COLUMN id uuid CONSTRAINT sessions_pkey PRIMARY KEY;", - "type": "table.column", - "operation": "create", - "path": "public.sessions.id" - }, - { - "sql": "ALTER TABLE user_permissions\nADD CONSTRAINT user_permissions_pkey PRIMARY KEY (user_id, resource_id, permission_type);", - "type": "table.constraint", - "operation": "create", - "path": "public.user_permissions.user_permissions_pkey" - }, - { - "sql": "ALTER TABLE users\nADD COLUMN id integer CONSTRAINT users_pkey PRIMARY KEY;", - "type": "table.column", - "operation": "create", - "path": "public.users.id" + "steps": [ + { + "sql": "ALTER TABLE categories\nADD COLUMN code text CONSTRAINT categories_pkey PRIMARY KEY;", + "type": "table.column", + "operation": "create", + "path": "public.categories.code" + }, + { + "sql": "ALTER TABLE orders\nADD COLUMN id serial CONSTRAINT orders_pkey PRIMARY KEY;", + "type": "table.column", + "operation": "create", + "path": "public.orders.id" + }, + { + "sql": "ALTER TABLE products\nADD COLUMN id integer GENERATED ALWAYS AS IDENTITY CONSTRAINT products_pkey PRIMARY KEY;", + "type": "table.column", + "operation": "create", + "path": "public.products.id" + }, + { + "sql": "ALTER TABLE reservations\nADD CONSTRAINT reservations_pkey PRIMARY KEY (id, valid_period WITHOUT OVERLAPS);", + "type": "table.constraint", + "operation": "create", + "path": "public.reservations.reservations_pkey" + }, + { + "sql": "ALTER TABLE sessions\nADD COLUMN id uuid CONSTRAINT sessions_pkey PRIMARY KEY;", + "type": "table.column", + "operation": "create", + "path": "public.sessions.id" + }, + { + "sql": "ALTER TABLE user_permissions\nADD CONSTRAINT user_permissions_pkey PRIMARY KEY (user_id, resource_id, permission_type);", + "type": "table.constraint", + "operation": "create", + "path": "public.user_permissions.user_permissions_pkey" + }, + { + "sql": "ALTER TABLE users\nADD COLUMN id integer CONSTRAINT users_pkey PRIMARY KEY;", + "type": "table.column", + "operation": "create", + "path": "public.users.id" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table/plan.json b/testdata/diff/create_table/add_table/plan.json index c7d55635..8ef9f7e4 100644 --- a/testdata/diff/create_table/add_table/plan.json +++ b/testdata/diff/create_table/add_table/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS departments (\n id integer,\n name text NOT NULL,\n CONSTRAINT departments_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.departments" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS departments (\n id integer,\n name text NOT NULL,\n CONSTRAINT departments_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.departments" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_composite_keys/plan.json b/testdata/diff/create_table/add_table_composite_keys/plan.json index b44b0884..d88d3553 100644 --- a/testdata/diff/create_table/add_table_composite_keys/plan.json +++ b/testdata/diff/create_table/add_table_composite_keys/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS organizations (\n tenant_id integer,\n org_id integer,\n org_name text NOT NULL,\n org_type text NOT NULL,\n CONSTRAINT organizations_pkey PRIMARY KEY (tenant_id, org_id),\n CONSTRAINT organizations_org_type_org_id_tenant_id_key UNIQUE (org_type, org_id, tenant_id)\n);", - "type": "table", - "operation": "create", - "path": "public.organizations" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS projects (\n tenant_id integer,\n org_id integer,\n project_id integer,\n project_name text NOT NULL,\n project_code text NOT NULL,\n description text,\n CONSTRAINT projects_pkey PRIMARY KEY (tenant_id, org_id, project_id),\n CONSTRAINT projects_project_name_tenant_id_project_id_key UNIQUE (project_name, tenant_id, project_id),\n CONSTRAINT projects_tenant_id_org_id_fkey FOREIGN KEY (tenant_id, org_id) REFERENCES organizations (tenant_id, org_id)\n);", - "type": "table", - "operation": "create", - "path": "public.projects" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS organizations (\n tenant_id integer,\n org_id integer,\n org_name text NOT NULL,\n org_type text NOT NULL,\n CONSTRAINT organizations_pkey PRIMARY KEY (tenant_id, org_id),\n CONSTRAINT organizations_org_type_org_id_tenant_id_key UNIQUE (org_type, org_id, tenant_id)\n);", + "type": "table", + "operation": "create", + "path": "public.organizations" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS projects (\n tenant_id integer,\n org_id integer,\n project_id integer,\n project_name text NOT NULL,\n project_code text NOT NULL,\n description text,\n CONSTRAINT projects_pkey PRIMARY KEY (tenant_id, org_id, project_id),\n CONSTRAINT projects_project_name_tenant_id_project_id_key UNIQUE (project_name, tenant_id, project_id),\n CONSTRAINT projects_tenant_id_org_id_fkey FOREIGN KEY (tenant_id, org_id) REFERENCES organizations (tenant_id, org_id)\n);", + "type": "table", + "operation": "create", + "path": "public.projects" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_like/plan.json b/testdata/diff/create_table/add_table_like/plan.json index 06035784..a2903b98 100644 --- a/testdata/diff/create_table/add_table_like/plan.json +++ b/testdata/diff/create_table/add_table_like/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "611525ee5214cced7a4f007fb4c9124e073a61481e232445a218c9cbece0f6ad" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "611525ee5214cced7a4f007fb4c9124e073a61481e232445a218c9cbece0f6ad" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS products (\n id SERIAL,\n created_at timestamptz DEFAULT now() NOT NULL,\n updated_at timestamptz DEFAULT now() NOT NULL,\n deleted_at timestamptz,\n CONSTRAINT products_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.products" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS users (\n id SERIAL,\n created_at timestamptz DEFAULT now() NOT NULL,\n updated_at timestamptz DEFAULT now() NOT NULL,\n deleted_at timestamptz,\n CONSTRAINT users_pkey PRIMARY KEY (id),\n CONSTRAINT _template_timestamps_check CHECK (created_at <= updated_at)\n);", - "type": "table", - "operation": "create", - "path": "public.users" - }, - { - "sql": "COMMENT ON COLUMN users.created_at IS 'Record creation time';", - "type": "table.column.comment", - "operation": "create", - "path": "public.users.created_at" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS users_created_at_idx ON users (created_at);", - "type": "table.index", - "operation": "create", - "path": "public.users.users_created_at_idx" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS products (\n id SERIAL,\n created_at timestamptz DEFAULT now() NOT NULL,\n updated_at timestamptz DEFAULT now() NOT NULL,\n deleted_at timestamptz,\n CONSTRAINT products_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.products" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS users (\n id SERIAL,\n created_at timestamptz DEFAULT now() NOT NULL,\n updated_at timestamptz DEFAULT now() NOT NULL,\n deleted_at timestamptz,\n CONSTRAINT users_pkey PRIMARY KEY (id),\n CONSTRAINT _template_timestamps_check CHECK (created_at <= updated_at)\n);", + "type": "table", + "operation": "create", + "path": "public.users" + }, + { + "sql": "COMMENT ON COLUMN users.created_at IS 'Record creation time';", + "type": "table.column.comment", + "operation": "create", + "path": "public.users.created_at" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS users_created_at_idx ON users (created_at);", + "type": "table.index", + "operation": "create", + "path": "public.users.users_created_at_idx" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_like_forward_ref/plan.json b/testdata/diff/create_table/add_table_like_forward_ref/plan.json index 4f8a9f78..76d0cb93 100644 --- a/testdata/diff/create_table/add_table_like_forward_ref/plan.json +++ b/testdata/diff/create_table/add_table_like_forward_ref/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS customers (\n customer_id integer NOT NULL,\n name varchar(100) NOT NULL,\n email varchar(255),\n created_at timestamp DEFAULT now(),\n updated_at timestamp DEFAULT now(),\n CONSTRAINT customers_email_key UNIQUE (email)\n);", - "type": "table", - "operation": "create", - "path": "public.customers" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS orders (\n id SERIAL,\n order_date date NOT NULL,\n customer_id integer NOT NULL,\n name varchar(100) NOT NULL,\n email varchar(255),\n created_at timestamp DEFAULT now(),\n updated_at timestamp DEFAULT now(),\n CONSTRAINT orders_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.orders" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS customers (\n customer_id integer NOT NULL,\n name varchar(100) NOT NULL,\n email varchar(255),\n created_at timestamp DEFAULT now(),\n updated_at timestamp DEFAULT now(),\n CONSTRAINT customers_email_key UNIQUE (email)\n);", + "type": "table", + "operation": "create", + "path": "public.customers" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS orders (\n id SERIAL,\n order_date date NOT NULL,\n customer_id integer NOT NULL,\n name varchar(100) NOT NULL,\n email varchar(255),\n created_at timestamp DEFAULT now(),\n updated_at timestamp DEFAULT now(),\n CONSTRAINT orders_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.orders" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_no_online_rewrite/plan.json b/testdata/diff/create_table/add_table_no_online_rewrite/plan.json index ae54191e..fe9d2e94 100644 --- a/testdata/diff/create_table/add_table_no_online_rewrite/plan.json +++ b/testdata/diff/create_table/add_table_no_online_rewrite/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS companies (\n id integer,\n name text NOT NULL,\n CONSTRAINT companies_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.companies" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS departments (\n id integer,\n name text NOT NULL,\n company_id integer NOT NULL,\n budget numeric(10,2),\n created_at timestamp DEFAULT now(),\n CONSTRAINT departments_pkey PRIMARY KEY (id),\n CONSTRAINT departments_company_id_fkey FOREIGN KEY (company_id) REFERENCES companies (id),\n CONSTRAINT departments_budget_check CHECK (budget > 0::numeric)\n);", - "type": "table", - "operation": "create", - "path": "public.departments" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_departments_name ON departments (name);", - "type": "table.index", - "operation": "create", - "path": "public.departments.idx_departments_name" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS companies (\n id integer,\n name text NOT NULL,\n CONSTRAINT companies_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.companies" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS departments (\n id integer,\n name text NOT NULL,\n company_id integer NOT NULL,\n budget numeric(10,2),\n created_at timestamp DEFAULT now(),\n CONSTRAINT departments_pkey PRIMARY KEY (id),\n CONSTRAINT departments_company_id_fkey FOREIGN KEY (company_id) REFERENCES companies (id),\n CONSTRAINT departments_budget_check CHECK (budget > 0::numeric)\n);", + "type": "table", + "operation": "create", + "path": "public.departments" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_departments_name ON departments (name);", + "type": "table.index", + "operation": "create", + "path": "public.departments.idx_departments_name" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_partitioned/plan.json b/testdata/diff/create_table/add_table_partitioned/plan.json index 35c99b36..8acf2f50 100644 --- a/testdata/diff/create_table/add_table_partitioned/plan.json +++ b/testdata/diff/create_table/add_table_partitioned/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS orders (\n id integer NOT NULL,\n order_date date NOT NULL,\n customer_id integer\n) PARTITION BY RANGE (order_date);", - "type": "table", - "operation": "create", - "path": "public.orders" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS orders (\n id integer NOT NULL,\n order_date date NOT NULL,\n customer_id integer\n) PARTITION BY RANGE (order_date);", + "type": "table", + "operation": "create", + "path": "public.orders" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_serial_pk/plan.json b/testdata/diff/create_table/add_table_serial_pk/plan.json index c7c66aa0..51964fbd 100644 --- a/testdata/diff/create_table/add_table_serial_pk/plan.json +++ b/testdata/diff/create_table/add_table_serial_pk/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS users (\n id SERIAL,\n name text,\n email text,\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.users" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS users (\n id SERIAL,\n name text,\n email text,\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_table_unlogged/plan.json b/testdata/diff/create_table/add_table_unlogged/plan.json index 34600bd3..6c0276f0 100644 --- a/testdata/diff/create_table/add_table_unlogged/plan.json +++ b/testdata/diff/create_table/add_table_unlogged/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE UNLOGGED TABLE IF NOT EXISTS events (\n id integer,\n payload text NOT NULL,\n CONSTRAINT events_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.events" + "steps": [ + { + "sql": "CREATE UNLOGGED TABLE IF NOT EXISTS events (\n id integer,\n payload text NOT NULL,\n CONSTRAINT events_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.events" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_uk/plan.json b/testdata/diff/create_table/add_uk/plan.json index 2b838fe6..68008740 100644 --- a/testdata/diff/create_table/add_uk/plan.json +++ b/testdata/diff/create_table/add_uk/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "d3aee4e18d89d1f6e49492c3805b14ef9db5c94307292d66727bf75b3ce0a1e1" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "d3aee4e18d89d1f6e49492c3805b14ef9db5c94307292d66727bf75b3ce0a1e1" + }, + "groups": [ { - "sql": "ALTER TABLE orders\nADD COLUMN id serial CONSTRAINT orders_id_key UNIQUE;", - "type": "table.column", - "operation": "create", - "path": "public.orders.id" - }, - { - "sql": "ALTER TABLE products\nADD COLUMN id integer GENERATED ALWAYS AS IDENTITY CONSTRAINT products_id_key UNIQUE;", - "type": "table.column", - "operation": "create", - "path": "public.products.id" - }, - { - "sql": "ALTER TABLE user_permissions\nADD CONSTRAINT user_permissions_user_id_resource_id_permission_type_key UNIQUE (user_id, resource_id, permission_type);", - "type": "table.constraint", - "operation": "create", - "path": "public.user_permissions.user_permissions_user_id_resource_id_permission_type_key" - }, - { - "sql": "ALTER TABLE users\nADD COLUMN id integer CONSTRAINT users_id_key UNIQUE;", - "type": "table.column", - "operation": "create", - "path": "public.users.id" + "steps": [ + { + "sql": "ALTER TABLE orders\nADD COLUMN id serial CONSTRAINT orders_id_key UNIQUE;", + "type": "table.column", + "operation": "create", + "path": "public.orders.id" + }, + { + "sql": "ALTER TABLE products\nADD COLUMN id integer GENERATED ALWAYS AS IDENTITY CONSTRAINT products_id_key UNIQUE;", + "type": "table.column", + "operation": "create", + "path": "public.products.id" + }, + { + "sql": "ALTER TABLE user_permissions\nADD CONSTRAINT user_permissions_user_id_resource_id_permission_type_key UNIQUE (user_id, resource_id, permission_type);", + "type": "table.constraint", + "operation": "create", + "path": "public.user_permissions.user_permissions_user_id_resource_id_permission_type_key" + }, + { + "sql": "ALTER TABLE users\nADD COLUMN id integer CONSTRAINT users_id_key UNIQUE;", + "type": "table.column", + "operation": "create", + "path": "public.users.id" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_unique_constraint/plan.json b/testdata/diff/create_table/add_unique_constraint/plan.json index 0528665d..74d6cddc 100644 --- a/testdata/diff/create_table/add_unique_constraint/plan.json +++ b/testdata/diff/create_table/add_unique_constraint/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "5b049e926c4be3b7398c0f37d3f1c3b84c6721f3e07be244cfc34b568000e803" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "5b049e926c4be3b7398c0f37d3f1c3b84c6721f3e07be244cfc34b568000e803" + }, + "groups": [ { - "sql": "ALTER TABLE user_sessions\nADD CONSTRAINT user_sessions_token_device_key UNIQUE (session_token, device_fingerprint);", - "type": "table.constraint", - "operation": "create", - "path": "public.user_sessions.user_sessions_token_device_key" - }, - { - "sql": "ALTER TABLE user_sessions\nADD CONSTRAINT user_sessions_user_device_key UNIQUE (user_id, device_fingerprint);", - "type": "table.constraint", - "operation": "create", - "path": "public.user_sessions.user_sessions_user_device_key" + "steps": [ + { + "sql": "ALTER TABLE user_sessions\nADD CONSTRAINT user_sessions_token_device_key UNIQUE (session_token, device_fingerprint);", + "type": "table.constraint", + "operation": "create", + "path": "public.user_sessions.user_sessions_token_device_key" + }, + { + "sql": "ALTER TABLE user_sessions\nADD CONSTRAINT user_sessions_user_device_key UNIQUE (user_id, device_fingerprint);", + "type": "table.constraint", + "operation": "create", + "path": "public.user_sessions.user_sessions_user_device_key" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/add_unique_constraint_nulls_not_distinct/plan.json b/testdata/diff/create_table/add_unique_constraint_nulls_not_distinct/plan.json index 726fb0e3..6cb9a7c6 100644 --- a/testdata/diff/create_table/add_unique_constraint_nulls_not_distinct/plan.json +++ b/testdata/diff/create_table/add_unique_constraint_nulls_not_distinct/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "70a4465367d5d40d0149eadc73c423f9eb954838b6602f00ca3496b264baf2e9" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "70a4465367d5d40d0149eadc73c423f9eb954838b6602f00ca3496b264baf2e9" + }, + "groups": [ { - "sql": "ALTER TABLE pgschema_repro_nulls\nADD CONSTRAINT pgschema_repro_nulls_uniq UNIQUE NULLS NOT DISTINCT (a, b);", - "type": "table.constraint", - "operation": "create", - "path": "public.pgschema_repro_nulls.pgschema_repro_nulls_uniq" + "steps": [ + { + "sql": "ALTER TABLE pgschema_repro_nulls\nADD CONSTRAINT pgschema_repro_nulls_uniq UNIQUE NULLS NOT DISTINCT (a, b);", + "type": "table.constraint", + "operation": "create", + "path": "public.pgschema_repro_nulls.pgschema_repro_nulls_uniq" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/alter_column_quoted_identifier/plan.json b/testdata/diff/create_table/alter_column_quoted_identifier/plan.json index 6d093997..79030188 100644 --- a/testdata/diff/create_table/alter_column_quoted_identifier/plan.json +++ b/testdata/diff/create_table/alter_column_quoted_identifier/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "9d443bc536153eed8fce077bfacc3d7f42b1a94f02d33868bb78be3b9de05088" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "9d443bc536153eed8fce077bfacc3d7f42b1a94f02d33868bb78be3b9de05088" + }, + "groups": [ { - "sql": "ALTER TABLE ex ALTER COLUMN \"ID\" TYPE bigint;", - "type": "table.column", - "operation": "alter", - "path": "public.ex.ID" + "steps": [ + { + "sql": "ALTER TABLE ex ALTER COLUMN \"ID\" TYPE bigint;", + "type": "table.column", + "operation": "alter", + "path": "public.ex.ID" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/alter_column_types/plan.json b/testdata/diff/create_table/alter_column_types/plan.json index 0dfa6be4..262c080e 100644 --- a/testdata/diff/create_table/alter_column_types/plan.json +++ b/testdata/diff/create_table/alter_column_types/plan.json @@ -2,67 +2,71 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "3af9d137d24bdf3ddf4b191cf2393ada9a71adb61e7fa88d779e2e751933a409" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "3af9d137d24bdf3ddf4b191cf2393ada9a71adb61e7fa88d779e2e751933a409" + }, + "groups": [ { - "sql": "CREATE TYPE action_type AS ENUM (\n 'pending',\n 'approved',\n 'rejected'\n);", - "type": "type", - "operation": "create", - "path": "public.action_type" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN id TYPE bigint;", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.id" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN user_id TYPE bigint;", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.user_id" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN object_ids_ints TYPE bigint[];", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.object_ids_ints" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN action TYPE action_type USING action::action_type;", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.action" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN status DROP DEFAULT;", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.status" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN status TYPE action_type USING status::action_type;", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.status" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN status SET DEFAULT 'pending'::action_type;", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.status" - }, - { - "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN tags TYPE action_type[] USING tags::action_type[];", - "type": "table.column", - "operation": "alter", - "path": "public.user_pending_permissions.tags" + "steps": [ + { + "sql": "CREATE TYPE action_type AS ENUM (\n 'pending',\n 'approved',\n 'rejected'\n);", + "type": "type", + "operation": "create", + "path": "public.action_type" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN id TYPE bigint;", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.id" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN user_id TYPE bigint;", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.user_id" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN object_ids_ints TYPE bigint[];", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.object_ids_ints" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN action TYPE action_type USING action::action_type;", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.action" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN status DROP DEFAULT;", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.status" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN status TYPE action_type USING status::action_type;", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.status" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN status SET DEFAULT 'pending'::action_type;", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.status" + }, + { + "sql": "ALTER TABLE user_pending_permissions ALTER COLUMN tags TYPE action_type[] USING tags::action_type[];", + "type": "table.column", + "operation": "alter", + "path": "public.user_pending_permissions.tags" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/alter_defaults/plan.json b/testdata/diff/create_table/alter_defaults/plan.json index 6043241b..e31d04ac 100644 --- a/testdata/diff/create_table/alter_defaults/plan.json +++ b/testdata/diff/create_table/alter_defaults/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "39b71e962db9b406f7f1d8bb6fceefde31e08b6f30601e8858d8f20cb4efc22d" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "39b71e962db9b406f7f1d8bb6fceefde31e08b6f30601e8858d8f20cb4efc22d" + }, + "groups": [ { - "sql": "ALTER TABLE users ALTER COLUMN name SET DEFAULT 'Unknown';", - "type": "table.column", - "operation": "alter", - "path": "public.users.name" - }, - { - "sql": "ALTER TABLE users ALTER COLUMN created_at SET DEFAULT CURRENT_TIMESTAMP;", - "type": "table.column", - "operation": "alter", - "path": "public.users.created_at" + "steps": [ + { + "sql": "ALTER TABLE users ALTER COLUMN name SET DEFAULT 'Unknown';", + "type": "table.column", + "operation": "alter", + "path": "public.users.name" + }, + { + "sql": "ALTER TABLE users ALTER COLUMN created_at SET DEFAULT CURRENT_TIMESTAMP;", + "type": "table.column", + "operation": "alter", + "path": "public.users.created_at" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/alter_identity/plan.json b/testdata/diff/create_table/alter_identity/plan.json index b7413d52..19369a3c 100644 --- a/testdata/diff/create_table/alter_identity/plan.json +++ b/testdata/diff/create_table/alter_identity/plan.json @@ -2,61 +2,65 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "5b91475214f7a1b4e4928c9480533b61f841d70494784aff431f1f392fba1e58" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "5b91475214f7a1b4e4928c9480533b61f841d70494784aff431f1f392fba1e58" + }, + "groups": [ { - "sql": "DROP SEQUENCE IF EXISTS table1_c2_seq CASCADE;", - "type": "sequence", - "operation": "drop", - "path": "public.table1_c2_seq" - }, - { - "sql": "CREATE SEQUENCE IF NOT EXISTS table1_c1_seq AS integer OWNED BY table1.c1;", - "type": "sequence", - "operation": "create", - "path": "public.table1_c1_seq" - }, - { - "sql": "ALTER TABLE table1 ALTER COLUMN c1 SET DEFAULT nextval('table1_c1_seq'::regclass);", - "type": "table.column", - "operation": "alter", - "path": "public.table1.c1" - }, - { - "sql": "ALTER TABLE table1 ALTER COLUMN c2 ADD GENERATED ALWAYS AS IDENTITY;", - "type": "table.column", - "operation": "alter", - "path": "public.table1.c2" - }, - { - "sql": "SELECT setval(pg_get_serial_sequence('table1', 'c2'), COALESCE(MAX(c2), 0) + 1) FROM table1;", - "type": "table.column", - "operation": "alter", - "path": "public.table1.c2" - }, - { - "sql": "ALTER TABLE table1 ALTER COLUMN c3 DROP IDENTITY;", - "type": "table.column", - "operation": "alter", - "path": "public.table1.c3" - }, - { - "sql": "ALTER TABLE table1 ALTER COLUMN c3 ADD GENERATED BY DEFAULT AS IDENTITY;", - "type": "table.column", - "operation": "alter", - "path": "public.table1.c3" - }, - { - "sql": "SELECT setval(pg_get_serial_sequence('table1', 'c3'), COALESCE(MAX(c3), 0) + 1) FROM table1;", - "type": "table.column", - "operation": "alter", - "path": "public.table1.c3" + "steps": [ + { + "sql": "DROP SEQUENCE IF EXISTS table1_c2_seq CASCADE;", + "type": "sequence", + "operation": "drop", + "path": "public.table1_c2_seq" + }, + { + "sql": "CREATE SEQUENCE IF NOT EXISTS table1_c1_seq AS integer OWNED BY table1.c1;", + "type": "sequence", + "operation": "create", + "path": "public.table1_c1_seq" + }, + { + "sql": "ALTER TABLE table1 ALTER COLUMN c1 SET DEFAULT nextval('table1_c1_seq'::regclass);", + "type": "table.column", + "operation": "alter", + "path": "public.table1.c1" + }, + { + "sql": "ALTER TABLE table1 ALTER COLUMN c2 ADD GENERATED ALWAYS AS IDENTITY;", + "type": "table.column", + "operation": "alter", + "path": "public.table1.c2" + }, + { + "sql": "SELECT setval(pg_get_serial_sequence('table1', 'c2'), COALESCE(MAX(c2), 0) + 1) FROM table1;", + "type": "table.column", + "operation": "alter", + "path": "public.table1.c2" + }, + { + "sql": "ALTER TABLE table1 ALTER COLUMN c3 DROP IDENTITY;", + "type": "table.column", + "operation": "alter", + "path": "public.table1.c3" + }, + { + "sql": "ALTER TABLE table1 ALTER COLUMN c3 ADD GENERATED BY DEFAULT AS IDENTITY;", + "type": "table.column", + "operation": "alter", + "path": "public.table1.c3" + }, + { + "sql": "SELECT setval(pg_get_serial_sequence('table1', 'c3'), COALESCE(MAX(c3), 0) + 1) FROM table1;", + "type": "table.column", + "operation": "alter", + "path": "public.table1.c3" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/composite_fk_column_order/plan.json b/testdata/diff/create_table/composite_fk_column_order/plan.json index a0e2284f..2fe15feb 100644 --- a/testdata/diff/create_table/composite_fk_column_order/plan.json +++ b/testdata/diff/create_table/composite_fk_column_order/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "4a1d5994bf17ba1a92365f4aa2ee445bcd47f70f4f2f03ac0c92e1eea98e8628" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "4a1d5994bf17ba1a92365f4aa2ee445bcd47f70f4f2f03ac0c92e1eea98e8628" + }, + "groups": [ { - "sql": "ALTER TABLE order_items\nADD CONSTRAINT fk_order_items_order FOREIGN KEY (customer_id, order_id) REFERENCES orders (customer_id, order_id) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.order_items.fk_order_items_order" - }, - { - "sql": "ALTER TABLE order_items VALIDATE CONSTRAINT fk_order_items_order;", - "type": "table.constraint", - "operation": "create", - "path": "public.order_items.fk_order_items_order" + "steps": [ + { + "sql": "ALTER TABLE order_items\nADD CONSTRAINT fk_order_items_order FOREIGN KEY (customer_id, order_id) REFERENCES orders (customer_id, order_id) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.order_items.fk_order_items_order" + }, + { + "sql": "ALTER TABLE order_items VALIDATE CONSTRAINT fk_order_items_order;", + "type": "table.constraint", + "operation": "create", + "path": "public.order_items.fk_order_items_order" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/drop_column/plan.json b/testdata/diff/create_table/drop_column/plan.json index c5c97444..1465c570 100644 --- a/testdata/diff/create_table/drop_column/plan.json +++ b/testdata/diff/create_table/drop_column/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "de93945f2adbf913ad174b88f9a092ae770871ee6df38fa3b45fdbb83fdde371" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "de93945f2adbf913ad174b88f9a092ae770871ee6df38fa3b45fdbb83fdde371" + }, + "groups": [ { - "sql": "ALTER TABLE users DROP COLUMN email;", - "type": "table.column", - "operation": "drop", - "path": "public.users.email" + "steps": [ + { + "sql": "ALTER TABLE users DROP COLUMN email;", + "type": "table.column", + "operation": "drop", + "path": "public.users.email" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/issue_281_exclude_constraint/plan.json b/testdata/diff/create_table/issue_281_exclude_constraint/plan.json index d4ab5e8c..71d423d4 100644 --- a/testdata/diff/create_table/issue_281_exclude_constraint/plan.json +++ b/testdata/diff/create_table/issue_281_exclude_constraint/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "8c02dcded896566d3e27f6ce69aea524eed71de4ee798e3bc5e0e21dda5be9d6" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "8c02dcded896566d3e27f6ce69aea524eed71de4ee798e3bc5e0e21dda5be9d6" + }, + "groups": [ { - "sql": "ALTER TABLE test_table\nADD CONSTRAINT excl_no_overlap EXCLUDE USING gist (range_col WITH &&);", - "type": "table.constraint", - "operation": "create", - "path": "public.test_table.excl_no_overlap" + "steps": [ + { + "sql": "ALTER TABLE test_table\nADD CONSTRAINT excl_no_overlap EXCLUDE USING gist (range_col WITH &&);", + "type": "table.constraint", + "operation": "create", + "path": "public.test_table.excl_no_overlap" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/issue_283_function_default_schema_qualifier/plan.json b/testdata/diff/create_table/issue_283_function_default_schema_qualifier/plan.json index 9850800f..bc7e4e07 100644 --- a/testdata/diff/create_table/issue_283_function_default_schema_qualifier/plan.json +++ b/testdata/diff/create_table/issue_283_function_default_schema_qualifier/plan.json @@ -2,8 +2,12 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "420f93fc4d28a108b8b2b941d4745546a8620946e502e07311b540803d2bff1f" - }, - "groups": null + "schemas": { + "public": { + "source_fingerprint": { + "hash": "420f93fc4d28a108b8b2b941d4745546a8620946e502e07311b540803d2bff1f" + }, + "groups": null + } + } } diff --git a/testdata/diff/create_table/issue_295_pgvector_typmod/plan.json b/testdata/diff/create_table/issue_295_pgvector_typmod/plan.json index d6330d56..a9c013a2 100644 --- a/testdata/diff/create_table/issue_295_pgvector_typmod/plan.json +++ b/testdata/diff/create_table/issue_295_pgvector_typmod/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.7.1", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "groups": [ { - "sql": "ALTER TABLE activity ADD COLUMN embedding halfvec(384);", - "type": "table.column", - "operation": "create", - "path": "public.activity.embedding" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "ALTER TABLE activity ADD COLUMN embedding halfvec(384);", + "type": "table.column", + "operation": "create", + "path": "public.activity.embedding" + } + ] + }, { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS activity_embedding_idx ON activity USING hnsw (embedding halfvec_cosine_ops);", - "type": "table.index", - "operation": "create", - "path": "public.activity.activity_embedding_idx" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS activity_embedding_idx ON activity USING hnsw (embedding halfvec_cosine_ops);", + "type": "table.index", + "operation": "create", + "path": "public.activity.activity_embedding_idx" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'activity_embedding_idx';", - "directive": { - "type": "wait", - "message": "Creating index activity_embedding_idx" - }, - "type": "table.index", - "operation": "create", - "path": "public.activity.activity_embedding_idx" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'activity_embedding_idx';", + "directive": { + "type": "wait", + "message": "Creating index activity_embedding_idx" + }, + "type": "table.index", + "operation": "create", + "path": "public.activity.activity_embedding_idx" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/issue_382_drop_table_cascade_constraint/plan.json b/testdata/diff/create_table/issue_382_drop_table_cascade_constraint/plan.json index 421f4566..a4f4f3a3 100644 --- a/testdata/diff/create_table/issue_382_drop_table_cascade_constraint/plan.json +++ b/testdata/diff/create_table/issue_382_drop_table_cascade_constraint/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "0a32199426efead5dff4059c2fcb9eb77a49677202c5d381c43f98c2d73e54d9" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "0a32199426efead5dff4059c2fcb9eb77a49677202c5d381c43f98c2d73e54d9" + }, + "groups": [ { - "sql": "DROP TABLE IF EXISTS b CASCADE;", - "type": "table", - "operation": "drop", - "path": "public.b" - }, - { - "sql": "ALTER TABLE a DROP COLUMN b_id;", - "type": "table.column", - "operation": "drop", - "path": "public.a.b_id" + "steps": [ + { + "sql": "DROP TABLE IF EXISTS b CASCADE;", + "type": "table", + "operation": "drop", + "path": "public.b" + }, + { + "sql": "ALTER TABLE a DROP COLUMN b_id;", + "type": "table.column", + "operation": "drop", + "path": "public.a.b_id" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/issue_384_rename_column_constraint/plan.json b/testdata/diff/create_table/issue_384_rename_column_constraint/plan.json index dd166540..8361d671 100644 --- a/testdata/diff/create_table/issue_384_rename_column_constraint/plan.json +++ b/testdata/diff/create_table/issue_384_rename_column_constraint/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7fcec59d1a0d260fce77ca55af1cad30ebc8768996825647c4002962d1a8f8ee" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7fcec59d1a0d260fce77ca55af1cad30ebc8768996825647c4002962d1a8f8ee" + }, + "groups": [ { - "sql": "ALTER TABLE a DROP COLUMN revision;", - "type": "table.column", - "operation": "drop", - "path": "public.a.revision" - }, - { - "sql": "ALTER TABLE a ADD COLUMN current_revision bigint;", - "type": "table.column", - "operation": "create", - "path": "public.a.current_revision" - }, - { - "sql": "ALTER TABLE a\nADD CONSTRAINT a_revision_fkey FOREIGN KEY (current_revision) REFERENCES b (id) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.a.a_revision_fkey" - }, - { - "sql": "ALTER TABLE a VALIDATE CONSTRAINT a_revision_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.a.a_revision_fkey" + "steps": [ + { + "sql": "ALTER TABLE a DROP COLUMN revision;", + "type": "table.column", + "operation": "drop", + "path": "public.a.revision" + }, + { + "sql": "ALTER TABLE a ADD COLUMN current_revision bigint;", + "type": "table.column", + "operation": "create", + "path": "public.a.current_revision" + }, + { + "sql": "ALTER TABLE a\nADD CONSTRAINT a_revision_fkey FOREIGN KEY (current_revision) REFERENCES b (id) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.a.a_revision_fkey" + }, + { + "sql": "ALTER TABLE a VALIDATE CONSTRAINT a_revision_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.a.a_revision_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/remove_not_null/plan.json b/testdata/diff/create_table/remove_not_null/plan.json index bea4e91d..7cdf0630 100644 --- a/testdata/diff/create_table/remove_not_null/plan.json +++ b/testdata/diff/create_table/remove_not_null/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ad30a66c1484efacafca59487ecb37827ecf2fb84d50c08d37e76bfccffa22fd" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ad30a66c1484efacafca59487ecb37827ecf2fb84d50c08d37e76bfccffa22fd" + }, + "groups": [ { - "sql": "ALTER TABLE employees ALTER COLUMN name DROP NOT NULL;", - "type": "table.column", - "operation": "alter", - "path": "public.employees.name" + "steps": [ + { + "sql": "ALTER TABLE employees ALTER COLUMN name DROP NOT NULL;", + "type": "table.column", + "operation": "alter", + "path": "public.employees.name" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/set_logged/plan.json b/testdata/diff/create_table/set_logged/plan.json index d6c8e492..6bf6af26 100644 --- a/testdata/diff/create_table/set_logged/plan.json +++ b/testdata/diff/create_table/set_logged/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "b465915d72f5be87a29cb98e6808e1cb91db1525bbb7e17775df4774cd0737da" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "b465915d72f5be87a29cb98e6808e1cb91db1525bbb7e17775df4774cd0737da" + }, + "groups": [ { - "sql": "ALTER TABLE events SET LOGGED;", - "type": "table.persistence", - "operation": "alter", - "path": "public.events" + "steps": [ + { + "sql": "ALTER TABLE events SET LOGGED;", + "type": "table.persistence", + "operation": "alter", + "path": "public.events" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_table/set_unlogged/plan.json b/testdata/diff/create_table/set_unlogged/plan.json index 3de30970..df3a1c2d 100644 --- a/testdata/diff/create_table/set_unlogged/plan.json +++ b/testdata/diff/create_table/set_unlogged/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "cb84b30ef73ee96caeeef3901be4354d9f8aecf303f0fcb9ae70618c1d106761" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "cb84b30ef73ee96caeeef3901be4354d9f8aecf303f0fcb9ae70618c1d106761" + }, + "groups": [ { - "sql": "ALTER TABLE events SET UNLOGGED;", - "type": "table.persistence", - "operation": "alter", - "path": "public.events" + "steps": [ + { + "sql": "ALTER TABLE events SET UNLOGGED;", + "type": "table.persistence", + "operation": "alter", + "path": "public.events" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/add_trigger/plan.json b/testdata/diff/create_trigger/add_trigger/plan.json index 2df62ca4..6a50579e 100644 --- a/testdata/diff/create_trigger/add_trigger/plan.json +++ b/testdata/diff/create_trigger/add_trigger/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "71591928eb9f4b89bfdb9fd291db500f36cf140bf6002a62c89de4a6c4d016be" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "71591928eb9f4b89bfdb9fd291db500f36cf140bf6002a62c89de4a6c4d016be" + }, + "groups": [ { - "sql": "CREATE OR REPLACE TRIGGER employees_insert_timestamp_trigger\n AFTER INSERT ON employees\n FOR EACH ROW\n EXECUTE FUNCTION update_last_modified();", - "type": "table.trigger", - "operation": "create", - "path": "public.employees.employees_insert_timestamp_trigger" - }, - { - "sql": "CREATE OR REPLACE TRIGGER employees_last_modified_trigger\n BEFORE UPDATE ON employees\n FOR EACH ROW\n EXECUTE FUNCTION update_last_modified();", - "type": "table.trigger", - "operation": "create", - "path": "public.employees.employees_last_modified_trigger" - }, - { - "sql": "CREATE OR REPLACE TRIGGER employees_salary_update_trigger\n BEFORE UPDATE OF salary ON employees\n FOR EACH ROW\n EXECUTE FUNCTION update_last_modified();", - "type": "table.trigger", - "operation": "create", - "path": "public.employees.employees_salary_update_trigger" - }, - { - "sql": "CREATE OR REPLACE TRIGGER employees_truncate_log_trigger\n AFTER TRUNCATE ON employees\n FOR EACH STATEMENT\n EXECUTE FUNCTION update_last_modified();", - "type": "table.trigger", - "operation": "create", - "path": "public.employees.employees_truncate_log_trigger" - }, - { - "sql": "CREATE OR REPLACE TRIGGER trg_employee_emails_insert\n INSTEAD OF INSERT ON employee_emails\n FOR EACH ROW\n EXECUTE FUNCTION insert_employee_emails();", - "type": "view.trigger", - "operation": "create", - "path": "public.employee_emails.trg_employee_emails_insert" + "steps": [ + { + "sql": "CREATE OR REPLACE TRIGGER employees_insert_timestamp_trigger\n AFTER INSERT ON employees\n FOR EACH ROW\n EXECUTE FUNCTION update_last_modified();", + "type": "table.trigger", + "operation": "create", + "path": "public.employees.employees_insert_timestamp_trigger" + }, + { + "sql": "CREATE OR REPLACE TRIGGER employees_last_modified_trigger\n BEFORE UPDATE ON employees\n FOR EACH ROW\n EXECUTE FUNCTION update_last_modified();", + "type": "table.trigger", + "operation": "create", + "path": "public.employees.employees_last_modified_trigger" + }, + { + "sql": "CREATE OR REPLACE TRIGGER employees_salary_update_trigger\n BEFORE UPDATE OF salary ON employees\n FOR EACH ROW\n EXECUTE FUNCTION update_last_modified();", + "type": "table.trigger", + "operation": "create", + "path": "public.employees.employees_salary_update_trigger" + }, + { + "sql": "CREATE OR REPLACE TRIGGER employees_truncate_log_trigger\n AFTER TRUNCATE ON employees\n FOR EACH STATEMENT\n EXECUTE FUNCTION update_last_modified();", + "type": "table.trigger", + "operation": "create", + "path": "public.employees.employees_truncate_log_trigger" + }, + { + "sql": "CREATE OR REPLACE TRIGGER trg_employee_emails_insert\n INSTEAD OF INSERT ON employee_emails\n FOR EACH ROW\n EXECUTE FUNCTION insert_employee_emails();", + "type": "view.trigger", + "operation": "create", + "path": "public.employee_emails.trg_employee_emails_insert" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/add_trigger_constraint/plan.json b/testdata/diff/create_trigger/add_trigger_constraint/plan.json index 206a4d5b..aafb5b2c 100644 --- a/testdata/diff/create_trigger/add_trigger_constraint/plan.json +++ b/testdata/diff/create_trigger/add_trigger_constraint/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "a47459950b6b41998921f1284295631251e890d54891c8ab378c1d3c7c7abff4" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "a47459950b6b41998921f1284295631251e890d54891c8ab378c1d3c7c7abff4" + }, + "groups": [ { - "sql": "CREATE CONSTRAINT TRIGGER prevent_code_update_trigger\n AFTER UPDATE ON products\n DEFERRABLE INITIALLY IMMEDIATE\n FOR EACH ROW\n EXECUTE FUNCTION prevent_code_update();", - "type": "table.trigger", - "operation": "create", - "path": "public.products.prevent_code_update_trigger" + "steps": [ + { + "sql": "CREATE CONSTRAINT TRIGGER prevent_code_update_trigger\n AFTER UPDATE ON products\n DEFERRABLE INITIALLY IMMEDIATE\n FOR EACH ROW\n EXECUTE FUNCTION prevent_code_update();", + "type": "table.trigger", + "operation": "create", + "path": "public.products.prevent_code_update_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/add_trigger_old_table/plan.json b/testdata/diff/create_trigger/add_trigger_old_table/plan.json index 02594c80..c6c30c7b 100644 --- a/testdata/diff/create_trigger/add_trigger_old_table/plan.json +++ b/testdata/diff/create_trigger/add_trigger_old_table/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "c72c98ff78c98bf6d04e41698ea8be67e045350586e0286ec945980d70cf72f3" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "c72c98ff78c98bf6d04e41698ea8be67e045350586e0286ec945980d70cf72f3" + }, + "groups": [ { - "sql": "CREATE OR REPLACE TRIGGER orders_delete_trigger\n AFTER DELETE ON orders\n REFERENCING OLD TABLE AS old_orders\n FOR EACH STATEMENT\n EXECUTE FUNCTION archive_deleted_orders();", - "type": "table.trigger", - "operation": "create", - "path": "public.orders.orders_delete_trigger" + "steps": [ + { + "sql": "CREATE OR REPLACE TRIGGER orders_delete_trigger\n AFTER DELETE ON orders\n REFERENCING OLD TABLE AS old_orders\n FOR EACH STATEMENT\n EXECUTE FUNCTION archive_deleted_orders();", + "type": "table.trigger", + "operation": "create", + "path": "public.orders.orders_delete_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/add_trigger_system_catalog/plan.json b/testdata/diff/create_trigger/add_trigger_system_catalog/plan.json index f3a02b40..ab8265db 100644 --- a/testdata/diff/create_trigger/add_trigger_system_catalog/plan.json +++ b/testdata/diff/create_trigger/add_trigger_system_catalog/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "43d9524fe7d4f30f7e7dc91616162b92fb4833343be7ac06d3c35f9cb60ccc40" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "43d9524fe7d4f30f7e7dc91616162b92fb4833343be7ac06d3c35f9cb60ccc40" + }, + "groups": [ { - "sql": "CREATE OR REPLACE TRIGGER employees_update_check\n BEFORE UPDATE ON employees\n FOR EACH ROW\n EXECUTE FUNCTION suppress_redundant_updates_trigger();", - "type": "table.trigger", - "operation": "create", - "path": "public.employees.employees_update_check" + "steps": [ + { + "sql": "CREATE OR REPLACE TRIGGER employees_update_check\n BEFORE UPDATE ON employees\n FOR EACH ROW\n EXECUTE FUNCTION suppress_redundant_updates_trigger();", + "type": "table.trigger", + "operation": "create", + "path": "public.employees.employees_update_check" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/add_trigger_when_distinct/plan.json b/testdata/diff/create_trigger/add_trigger_when_distinct/plan.json index 61e1c602..2415f8b0 100644 --- a/testdata/diff/create_trigger/add_trigger_when_distinct/plan.json +++ b/testdata/diff/create_trigger/add_trigger_when_distinct/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "29cdffe034879897517bf2048a11e13b3fb93bd0247072f19ab67cff58cd4ae5" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "29cdffe034879897517bf2048a11e13b3fb93bd0247072f19ab67cff58cd4ae5" + }, + "groups": [ { - "sql": "CREATE OR REPLACE TRIGGER products_description_trigger\n BEFORE UPDATE ON products\n FOR EACH ROW\n WHEN (((NEW.description IS DISTINCT FROM OLD.description)))\n EXECUTE FUNCTION log_description_change();", - "type": "table.trigger", - "operation": "create", - "path": "public.products.products_description_trigger" - }, - { - "sql": "CREATE OR REPLACE TRIGGER products_status_trigger\n BEFORE UPDATE ON products\n FOR EACH ROW\n WHEN (((NEW.status IS NOT DISTINCT FROM OLD.status)))\n EXECUTE FUNCTION skip_status_change();", - "type": "table.trigger", - "operation": "create", - "path": "public.products.products_status_trigger" + "steps": [ + { + "sql": "CREATE OR REPLACE TRIGGER products_description_trigger\n BEFORE UPDATE ON products\n FOR EACH ROW\n WHEN (((NEW.description IS DISTINCT FROM OLD.description)))\n EXECUTE FUNCTION log_description_change();", + "type": "table.trigger", + "operation": "create", + "path": "public.products.products_description_trigger" + }, + { + "sql": "CREATE OR REPLACE TRIGGER products_status_trigger\n BEFORE UPDATE ON products\n FOR EACH ROW\n WHEN (((NEW.status IS NOT DISTINCT FROM OLD.status)))\n EXECUTE FUNCTION skip_status_change();", + "type": "table.trigger", + "operation": "create", + "path": "public.products.products_status_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/alter_trigger/plan.json b/testdata/diff/create_trigger/alter_trigger/plan.json index 34fcdb34..b8e62657 100644 --- a/testdata/diff/create_trigger/alter_trigger/plan.json +++ b/testdata/diff/create_trigger/alter_trigger/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e80a9a1556e42c582c28840b5597b70cc7ee51f34627e869efd7b5d1ed2a2c49" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e80a9a1556e42c582c28840b5597b70cc7ee51f34627e869efd7b5d1ed2a2c49" + }, + "groups": [ { - "sql": "CREATE OR REPLACE TRIGGER employees_last_modified_trigger\n BEFORE INSERT OR UPDATE OF salary ON employees\n FOR EACH ROW\n WHEN (((NEW.salary IS NOT NULL)))\n EXECUTE FUNCTION update_last_modified();", - "type": "table.trigger", - "operation": "alter", - "path": "public.employees.employees_last_modified_trigger" + "steps": [ + { + "sql": "CREATE OR REPLACE TRIGGER employees_last_modified_trigger\n BEFORE INSERT OR UPDATE OF salary ON employees\n FOR EACH ROW\n WHEN (((NEW.salary IS NOT NULL)))\n EXECUTE FUNCTION update_last_modified();", + "type": "table.trigger", + "operation": "alter", + "path": "public.employees.employees_last_modified_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_trigger/drop_trigger/plan.json b/testdata/diff/create_trigger/drop_trigger/plan.json index f93302cf..44179df5 100644 --- a/testdata/diff/create_trigger/drop_trigger/plan.json +++ b/testdata/diff/create_trigger/drop_trigger/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e80a9a1556e42c582c28840b5597b70cc7ee51f34627e869efd7b5d1ed2a2c49" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e80a9a1556e42c582c28840b5597b70cc7ee51f34627e869efd7b5d1ed2a2c49" + }, + "groups": [ { - "sql": "DROP TRIGGER IF EXISTS employees_last_modified_trigger ON employees;", - "type": "table.trigger", - "operation": "drop", - "path": "public.employees.employees_last_modified_trigger" + "steps": [ + { + "sql": "DROP TRIGGER IF EXISTS employees_last_modified_trigger ON employees;", + "type": "table.trigger", + "operation": "drop", + "path": "public.employees.employees_last_modified_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_type/add_type/plan.json b/testdata/diff/create_type/add_type/plan.json index 1c698e0a..6c0113c8 100644 --- a/testdata/diff/create_type/add_type/plan.json +++ b/testdata/diff/create_type/add_type/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "fbbc3bde12e51ab79c0bc379f1ffea7adea61a422b411965728218795aff819d" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "fbbc3bde12e51ab79c0bc379f1ffea7adea61a422b411965728218795aff819d" + }, + "groups": [ { - "sql": "ALTER TYPE status ADD VALUE 'archived' AFTER 'pending';", - "type": "type", - "operation": "alter", - "path": "public.status" + "steps": [ + { + "sql": "ALTER TYPE status ADD VALUE 'archived' AFTER 'pending';", + "type": "type", + "operation": "alter", + "path": "public.status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_type/add_value/plan.json b/testdata/diff/create_type/add_value/plan.json index 879ebf5a..1ea39a18 100644 --- a/testdata/diff/create_type/add_value/plan.json +++ b/testdata/diff/create_type/add_value/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TYPE status AS ENUM (\n 'active',\n 'inactive',\n 'pending'\n);", - "type": "type", - "operation": "create", - "path": "public.status" + "steps": [ + { + "sql": "CREATE TYPE status AS ENUM (\n 'active',\n 'inactive',\n 'pending'\n);", + "type": "type", + "operation": "create", + "path": "public.status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_type/drop_type/plan.json b/testdata/diff/create_type/drop_type/plan.json index 7e1e7d21..4eb51629 100644 --- a/testdata/diff/create_type/drop_type/plan.json +++ b/testdata/diff/create_type/drop_type/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7739601f0084f192ccf2d01e2d6602820b2f4e39b5acdd7c4dd14c6d3b8a3da1" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7739601f0084f192ccf2d01e2d6602820b2f4e39b5acdd7c4dd14c6d3b8a3da1" + }, + "groups": [ { - "sql": "DROP TYPE IF EXISTS priority RESTRICT;", - "type": "type", - "operation": "drop", - "path": "public.priority" + "steps": [ + { + "sql": "DROP TYPE IF EXISTS priority RESTRICT;", + "type": "type", + "operation": "drop", + "path": "public.priority" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_view/add_view/plan.json b/testdata/diff/create_view/add_view/plan.json index 35ca3078..151cb500 100644 --- a/testdata/diff/create_view/add_view/plan.json +++ b/testdata/diff/create_view/add_view/plan.json @@ -2,49 +2,53 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "cee0753877527b3ec8028ecd1b41ab73dae6c726a91f3efdde66d8569e6fb103" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "cee0753877527b3ec8028ecd1b41ab73dae6c726a91f3efdde66d8569e6fb103" + }, + "groups": [ { - "sql": "CREATE OR REPLACE VIEW array_operators_view AS\n SELECT id,\n priority,\n CASE\n WHEN priority = ANY (ARRAY[10, 20, 30]) THEN 'matched'::text\n ELSE 'not_matched'::text\n END AS equal_any_test,\n CASE\n WHEN priority > ANY (ARRAY[10, 20, 30]) THEN 'high'::text\n ELSE 'low'::text\n END AS greater_any_test,\n CASE\n WHEN priority < ANY (ARRAY[5, 15, 25]) THEN 'found_lower'::text\n ELSE 'all_higher'::text\n END AS less_any_test,\n CASE\n WHEN priority <> ANY (ARRAY[1, 2, 3]) THEN 'different'::text\n ELSE 'same'::text\n END AS not_equal_any_test\n FROM employees;", - "type": "view", - "operation": "create", - "path": "public.array_operators_view" - }, - { - "sql": "CREATE OR REPLACE VIEW cte_with_case_view AS\n WITH monthly_stats AS (\n SELECT date_trunc('month'::text, CURRENT_DATE - ((n.n || ' months'::text)::interval)) AS month_start,\n n.n AS month_offset\n FROM generate_series(0, 11) n(n)\n ), employee_summary AS (\n SELECT employees.department_id,\n count(*) AS employee_count,\n avg(employees.priority) AS avg_priority\n FROM employees\n WHERE employees.status::text = 'active'::text\n GROUP BY employees.department_id\n )\n SELECT ms.month_start,\n ms.month_offset,\n d.name AS department_name,\n COALESCE(es.employee_count, 0::bigint) AS employee_count,\n CASE\n WHEN es.avg_priority > 50::numeric THEN 'high'::text\n WHEN es.avg_priority > 25::numeric THEN 'medium'::text\n WHEN es.avg_priority IS NOT NULL THEN 'low'::text\n ELSE 'no_data'::text\n END AS priority_level,\n CASE\n WHEN ms.month_offset = 0 THEN 'current'::text\n WHEN ms.month_offset <= 3 THEN 'recent'::text\n ELSE 'historical'::text\n END AS period_type\n FROM monthly_stats ms\n CROSS JOIN departments d\n LEFT JOIN employee_summary es ON d.id = es.department_id\n ORDER BY ms.month_start DESC, d.name;", - "type": "view", - "operation": "create", - "path": "public.cte_with_case_view" - }, - { - "sql": "CREATE OR REPLACE VIEW nullif_functions_view AS\n SELECT e.id,\n e.name AS employee_name,\n d.name AS department_name,\n (e.priority - d.manager_id) / NULLIF(d.manager_id, 0) AS priority_ratio,\n NULLIF(e.status::text, 'inactive'::text) AS active_status,\n NULLIF(e.email::text, ''::text) AS valid_email,\n GREATEST(e.priority, 0) AS min_priority,\n LEAST(e.priority, 100) AS max_priority,\n GREATEST(e.id, d.id, e.department_id) AS max_id,\n CASE\n WHEN NULLIF(e.department_id, 0) IS NOT NULL THEN 'assigned'::text\n ELSE 'unassigned'::text\n END AS assignment_status\n FROM employees e\n JOIN departments d USING (id)\n WHERE e.priority > 0;", - "type": "view", - "operation": "create", - "path": "public.nullif_functions_view" - }, - { - "sql": "CREATE OR REPLACE VIEW secure_employee_view WITH (security_invoker=true) AS\n SELECT id,\n name,\n email,\n status\n FROM employees\n WHERE status::text = 'active'::text;", - "type": "view", - "operation": "create", - "path": "public.secure_employee_view" - }, - { - "sql": "CREATE OR REPLACE VIEW text_search_view AS\n SELECT id,\n COALESCE((first_name::text || ' '::text) || last_name::text, 'Anonymous'::text) AS display_name,\n COALESCE(email, ''::character varying) AS email,\n COALESCE(bio, 'No description available'::text) AS description,\n to_tsvector('english'::regconfig, (((COALESCE(first_name, ''::character varying)::text || ' '::text) || COALESCE(last_name, ''::character varying)::text) || ' '::text) || COALESCE(bio, ''::text)) AS search_vector\n FROM employees\n WHERE status::text = 'active'::text;", - "type": "view", - "operation": "create", - "path": "public.text_search_view" - }, - { - "sql": "CREATE OR REPLACE VIEW union_subquery_view AS\n SELECT id,\n name,\n source_type,\n CASE\n WHEN source_type = 'employee'::text THEN 'active'::text\n WHEN source_type = 'department'::text THEN 'organizational'::text\n ELSE 'unknown'::text\n END AS category\n FROM ((\n SELECT employees.id,\n employees.name,\n 'employee'::text AS source_type\n FROM employees\n WHERE employees.status::text = 'active'::text\n UNION\n SELECT departments.id,\n departments.name,\n 'department'::text AS source_type\n FROM departments\n WHERE departments.manager_id IS NOT NULL\n ) UNION ALL\n SELECT employees.id,\n COALESCE((employees.first_name::text || ' '::text) || employees.last_name::text, employees.name::text) AS name,\n 'employee_full'::text AS source_type\n FROM employees\n WHERE employees.priority > 10) combined_data\n WHERE id IS NOT NULL\n ORDER BY source_type, id;", - "type": "view", - "operation": "create", - "path": "public.union_subquery_view" + "steps": [ + { + "sql": "CREATE OR REPLACE VIEW array_operators_view AS\n SELECT id,\n priority,\n CASE\n WHEN priority = ANY (ARRAY[10, 20, 30]) THEN 'matched'::text\n ELSE 'not_matched'::text\n END AS equal_any_test,\n CASE\n WHEN priority > ANY (ARRAY[10, 20, 30]) THEN 'high'::text\n ELSE 'low'::text\n END AS greater_any_test,\n CASE\n WHEN priority < ANY (ARRAY[5, 15, 25]) THEN 'found_lower'::text\n ELSE 'all_higher'::text\n END AS less_any_test,\n CASE\n WHEN priority <> ANY (ARRAY[1, 2, 3]) THEN 'different'::text\n ELSE 'same'::text\n END AS not_equal_any_test\n FROM employees;", + "type": "view", + "operation": "create", + "path": "public.array_operators_view" + }, + { + "sql": "CREATE OR REPLACE VIEW cte_with_case_view AS\n WITH monthly_stats AS (\n SELECT date_trunc('month'::text, CURRENT_DATE - ((n.n || ' months'::text)::interval)) AS month_start,\n n.n AS month_offset\n FROM generate_series(0, 11) n(n)\n ), employee_summary AS (\n SELECT employees.department_id,\n count(*) AS employee_count,\n avg(employees.priority) AS avg_priority\n FROM employees\n WHERE employees.status::text = 'active'::text\n GROUP BY employees.department_id\n )\n SELECT ms.month_start,\n ms.month_offset,\n d.name AS department_name,\n COALESCE(es.employee_count, 0::bigint) AS employee_count,\n CASE\n WHEN es.avg_priority > 50::numeric THEN 'high'::text\n WHEN es.avg_priority > 25::numeric THEN 'medium'::text\n WHEN es.avg_priority IS NOT NULL THEN 'low'::text\n ELSE 'no_data'::text\n END AS priority_level,\n CASE\n WHEN ms.month_offset = 0 THEN 'current'::text\n WHEN ms.month_offset <= 3 THEN 'recent'::text\n ELSE 'historical'::text\n END AS period_type\n FROM monthly_stats ms\n CROSS JOIN departments d\n LEFT JOIN employee_summary es ON d.id = es.department_id\n ORDER BY ms.month_start DESC, d.name;", + "type": "view", + "operation": "create", + "path": "public.cte_with_case_view" + }, + { + "sql": "CREATE OR REPLACE VIEW nullif_functions_view AS\n SELECT e.id,\n e.name AS employee_name,\n d.name AS department_name,\n (e.priority - d.manager_id) / NULLIF(d.manager_id, 0) AS priority_ratio,\n NULLIF(e.status::text, 'inactive'::text) AS active_status,\n NULLIF(e.email::text, ''::text) AS valid_email,\n GREATEST(e.priority, 0) AS min_priority,\n LEAST(e.priority, 100) AS max_priority,\n GREATEST(e.id, d.id, e.department_id) AS max_id,\n CASE\n WHEN NULLIF(e.department_id, 0) IS NOT NULL THEN 'assigned'::text\n ELSE 'unassigned'::text\n END AS assignment_status\n FROM employees e\n JOIN departments d USING (id)\n WHERE e.priority > 0;", + "type": "view", + "operation": "create", + "path": "public.nullif_functions_view" + }, + { + "sql": "CREATE OR REPLACE VIEW secure_employee_view WITH (security_invoker=true) AS\n SELECT id,\n name,\n email,\n status\n FROM employees\n WHERE status::text = 'active'::text;", + "type": "view", + "operation": "create", + "path": "public.secure_employee_view" + }, + { + "sql": "CREATE OR REPLACE VIEW text_search_view AS\n SELECT id,\n COALESCE((first_name::text || ' '::text) || last_name::text, 'Anonymous'::text) AS display_name,\n COALESCE(email, ''::character varying) AS email,\n COALESCE(bio, 'No description available'::text) AS description,\n to_tsvector('english'::regconfig, (((COALESCE(first_name, ''::character varying)::text || ' '::text) || COALESCE(last_name, ''::character varying)::text) || ' '::text) || COALESCE(bio, ''::text)) AS search_vector\n FROM employees\n WHERE status::text = 'active'::text;", + "type": "view", + "operation": "create", + "path": "public.text_search_view" + }, + { + "sql": "CREATE OR REPLACE VIEW union_subquery_view AS\n SELECT id,\n name,\n source_type,\n CASE\n WHEN source_type = 'employee'::text THEN 'active'::text\n WHEN source_type = 'department'::text THEN 'organizational'::text\n ELSE 'unknown'::text\n END AS category\n FROM ((\n SELECT employees.id,\n employees.name,\n 'employee'::text AS source_type\n FROM employees\n WHERE employees.status::text = 'active'::text\n UNION\n SELECT departments.id,\n departments.name,\n 'department'::text AS source_type\n FROM departments\n WHERE departments.manager_id IS NOT NULL\n ) UNION ALL\n SELECT employees.id,\n COALESCE((employees.first_name::text || ' '::text) || employees.last_name::text, employees.name::text) AS name,\n 'employee_full'::text AS source_type\n FROM employees\n WHERE employees.priority > 10) combined_data\n WHERE id IS NOT NULL\n ORDER BY source_type, id;", + "type": "view", + "operation": "create", + "path": "public.union_subquery_view" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_view/add_view_join/plan.json b/testdata/diff/create_view/add_view_join/plan.json index ed51e2f6..769e2665 100644 --- a/testdata/diff/create_view/add_view_join/plan.json +++ b/testdata/diff/create_view/add_view_join/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "13b0da782fac53005595f43139ce2283283f1bc1fa326ee2c92c4629875253c6" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "13b0da782fac53005595f43139ce2283283f1bc1fa326ee2c92c4629875253c6" + }, + "groups": [ { - "sql": "CREATE OR REPLACE VIEW all_departments_with_emp AS\n SELECT d.id,\n d.name AS dept_name,\n e.name AS emp_name\n FROM employees e\n RIGHT JOIN departments d ON e.department_id = d.id;", - "type": "view", - "operation": "create", - "path": "public.all_departments_with_emp" - }, - { - "sql": "CREATE OR REPLACE VIEW all_employees_with_dept AS\n SELECT e.id,\n e.name,\n d.name AS dept_name\n FROM employees e\n LEFT JOIN departments d ON e.department_id = d.id;", - "type": "view", - "operation": "create", - "path": "public.all_employees_with_dept" - }, - { - "sql": "CREATE OR REPLACE VIEW complete_employee_dept AS\n SELECT e.id AS emp_id,\n e.name AS emp_name,\n d.id AS dept_id,\n d.name AS dept_name\n FROM employees e\n FULL JOIN departments d ON e.department_id = d.id;", - "type": "view", - "operation": "create", - "path": "public.complete_employee_dept" - }, - { - "sql": "CREATE OR REPLACE VIEW employee_department_view AS\n SELECT e.id AS employee_id,\n e.name AS employee_name,\n d.name AS department_name,\n d.location\n FROM employees e\n JOIN departments d ON e.department_id = d.id;", - "type": "view", - "operation": "create", - "path": "public.employee_department_view" - }, - { - "sql": "CREATE OR REPLACE VIEW employee_dept_cross AS\n SELECT e.name AS employee_name,\n d.name AS department_name\n FROM employees e\n CROSS JOIN departments d;", - "type": "view", - "operation": "create", - "path": "public.employee_dept_cross" + "steps": [ + { + "sql": "CREATE OR REPLACE VIEW all_departments_with_emp AS\n SELECT d.id,\n d.name AS dept_name,\n e.name AS emp_name\n FROM employees e\n RIGHT JOIN departments d ON e.department_id = d.id;", + "type": "view", + "operation": "create", + "path": "public.all_departments_with_emp" + }, + { + "sql": "CREATE OR REPLACE VIEW all_employees_with_dept AS\n SELECT e.id,\n e.name,\n d.name AS dept_name\n FROM employees e\n LEFT JOIN departments d ON e.department_id = d.id;", + "type": "view", + "operation": "create", + "path": "public.all_employees_with_dept" + }, + { + "sql": "CREATE OR REPLACE VIEW complete_employee_dept AS\n SELECT e.id AS emp_id,\n e.name AS emp_name,\n d.id AS dept_id,\n d.name AS dept_name\n FROM employees e\n FULL JOIN departments d ON e.department_id = d.id;", + "type": "view", + "operation": "create", + "path": "public.complete_employee_dept" + }, + { + "sql": "CREATE OR REPLACE VIEW employee_department_view AS\n SELECT e.id AS employee_id,\n e.name AS employee_name,\n d.name AS department_name,\n d.location\n FROM employees e\n JOIN departments d ON e.department_id = d.id;", + "type": "view", + "operation": "create", + "path": "public.employee_department_view" + }, + { + "sql": "CREATE OR REPLACE VIEW employee_dept_cross AS\n SELECT e.name AS employee_name,\n d.name AS department_name\n FROM employees e\n CROSS JOIN departments d;", + "type": "view", + "operation": "create", + "path": "public.employee_dept_cross" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_view/alter_view/plan.json b/testdata/diff/create_view/alter_view/plan.json index 9bbe0e13..85aa2fd8 100644 --- a/testdata/diff/create_view/alter_view/plan.json +++ b/testdata/diff/create_view/alter_view/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e416c57a7e97843652d92d544f409ff025128ce45358273b5bffd768c6f29cc7" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e416c57a7e97843652d92d544f409ff025128ce45358273b5bffd768c6f29cc7" + }, + "groups": [ { - "sql": "CREATE OR REPLACE VIEW active_employees AS\n SELECT status,\n count(*) AS employee_count,\n avg(salary) AS avg_salary\n FROM employees\n WHERE status::text = 'active'::text\n GROUP BY status\n HAVING avg(salary) > 50000::numeric\n ORDER BY (count(*)), (avg(salary)) DESC;", - "type": "view", - "operation": "alter", - "path": "public.active_employees" + "steps": [ + { + "sql": "CREATE OR REPLACE VIEW active_employees AS\n SELECT status,\n count(*) AS employee_count,\n avg(salary) AS avg_salary\n FROM employees\n WHERE status::text = 'active'::text\n GROUP BY status\n HAVING avg(salary) > 50000::numeric\n ORDER BY (count(*)), (avg(salary)) DESC;", + "type": "view", + "operation": "alter", + "path": "public.active_employees" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_view/drop_view/plan.json b/testdata/diff/create_view/drop_view/plan.json index 90bb148d..7c9ade6f 100644 --- a/testdata/diff/create_view/drop_view/plan.json +++ b/testdata/diff/create_view/drop_view/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "5bddbd174fba7e0c39fb1ffbea0f8ca185f676b54286b18d5cf7350d81c4224a" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "5bddbd174fba7e0c39fb1ffbea0f8ca185f676b54286b18d5cf7350d81c4224a" + }, + "groups": [ { - "sql": "DROP VIEW IF EXISTS active_employees CASCADE;", - "type": "view", - "operation": "drop", - "path": "public.active_employees" + "steps": [ + { + "sql": "DROP VIEW IF EXISTS active_employees CASCADE;", + "type": "view", + "operation": "drop", + "path": "public.active_employees" + } + ] } ] } - ] + } } diff --git a/testdata/diff/create_view/issue_350_view_options/plan.json b/testdata/diff/create_view/issue_350_view_options/plan.json index 05682b50..84e3c72b 100644 --- a/testdata/diff/create_view/issue_350_view_options/plan.json +++ b/testdata/diff/create_view/issue_350_view_options/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "360832e875cab1bfc83c551db1100fc7c3ba71d034ded73495053fb63adfbc83" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "360832e875cab1bfc83c551db1100fc7c3ba71d034ded73495053fb63adfbc83" + }, + "groups": [ { - "sql": "ALTER VIEW employee_emails SET (security_invoker=true);", - "type": "view", - "operation": "alter", - "path": "public.employee_emails" - }, - { - "sql": "ALTER VIEW employee_names SET (security_invoker=true);", - "type": "view", - "operation": "alter", - "path": "public.employee_names" - }, - { - "sql": "ALTER VIEW employee_secure RESET (security_invoker);", - "type": "view", - "operation": "alter", - "path": "public.employee_secure" - }, - { - "sql": "ALTER MATERIALIZED VIEW employee_summary SET (fillfactor=80);", - "type": "materialized_view", - "operation": "alter", - "path": "public.employee_summary" + "steps": [ + { + "sql": "ALTER VIEW employee_emails SET (security_invoker=true);", + "type": "view", + "operation": "alter", + "path": "public.employee_emails" + }, + { + "sql": "ALTER VIEW employee_names SET (security_invoker=true);", + "type": "view", + "operation": "alter", + "path": "public.employee_names" + }, + { + "sql": "ALTER VIEW employee_secure RESET (security_invoker);", + "type": "view", + "operation": "alter", + "path": "public.employee_secure" + }, + { + "sql": "ALTER MATERIALIZED VIEW employee_summary SET (fillfactor=80);", + "type": "materialized_view", + "operation": "alter", + "path": "public.employee_summary" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/add_function_privilege/plan.json b/testdata/diff/default_privilege/add_function_privilege/plan.json index b240d2ea..ff5c9a39 100644 --- a/testdata/diff/default_privilege/add_function_privilege/plan.json +++ b/testdata/diff/default_privilege/add_function_privilege/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO api_user;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.FUNCTIONS.api_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO api_user;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.FUNCTIONS.api_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/add_privilege_with_grant_option/plan.json b/testdata/diff/default_privilege/add_privilege_with_grant_option/plan.json index 50f11f94..a2d298c3 100644 --- a/testdata/diff/default_privilege/add_privilege_with_grant_option/plan.json +++ b/testdata/diff/default_privilege/add_privilege_with_grant_option/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, SELECT ON TABLES TO admin_user WITH GRANT OPTION;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.TABLES.admin_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, SELECT ON TABLES TO admin_user WITH GRANT OPTION;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.TABLES.admin_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/add_sequence_privilege/plan.json b/testdata/diff/default_privilege/add_sequence_privilege/plan.json index 6539b979..db2497ba 100644 --- a/testdata/diff/default_privilege/add_sequence_privilege/plan.json +++ b/testdata/diff/default_privilege/add_sequence_privilege/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT, USAGE ON SEQUENCES TO app_user;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.SEQUENCES.app_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT, USAGE ON SEQUENCES TO app_user;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.SEQUENCES.app_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/add_table_privilege/plan.json b/testdata/diff/default_privilege/add_table_privilege/plan.json index 515b4dc4..5e15d829 100644 --- a/testdata/diff/default_privilege/add_table_privilege/plan.json +++ b/testdata/diff/default_privilege/add_table_privilege/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT ON TABLES TO PUBLIC;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.TABLES.PUBLIC" - }, - { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, UPDATE ON TABLES TO app_user;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.TABLES.app_user" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS users (\n id integer,\n name text NOT NULL,\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.users" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT ON TABLES TO PUBLIC;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.TABLES.PUBLIC" + }, + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, UPDATE ON TABLES TO app_user;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.TABLES.app_user" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS users (\n id integer,\n name text NOT NULL,\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/add_type_privilege/plan.json b/testdata/diff/default_privilege/add_type_privilege/plan.json index 61dc1eee..0d4e3f7c 100644 --- a/testdata/diff/default_privilege/add_type_privilege/plan.json +++ b/testdata/diff/default_privilege/add_type_privilege/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT USAGE ON TYPES TO app_user;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.TYPES.app_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT USAGE ON TYPES TO app_user;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.TYPES.app_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/alter_privilege/plan.json b/testdata/diff/default_privilege/alter_privilege/plan.json index 61d32d99..b1469e12 100644 --- a/testdata/diff/default_privilege/alter_privilege/plan.json +++ b/testdata/diff/default_privilege/alter_privilege/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e8f50b636ad809e723b6c4911b9af2cc6a9b55b2f63ef598aeeee7ba4dcc7167" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e8f50b636ad809e723b6c4911b9af2cc6a9b55b2f63ef598aeeee7ba4dcc7167" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public REVOKE USAGE ON SEQUENCES FROM app_user;", - "type": "default_privilege", - "operation": "drop", - "path": "default_privileges.testuser.SEQUENCES.app_user" - }, - { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, UPDATE ON TABLES TO app_user;", - "type": "default_privilege", - "operation": "alter", - "path": "default_privileges.testuser.TABLES.app_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public REVOKE USAGE ON SEQUENCES FROM app_user;", + "type": "default_privilege", + "operation": "drop", + "path": "default_privileges.testuser.SEQUENCES.app_user" + }, + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, UPDATE ON TABLES TO app_user;", + "type": "default_privilege", + "operation": "alter", + "path": "default_privileges.testuser.TABLES.app_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/alter_privilege_and_grant_option/plan.json b/testdata/diff/default_privilege/alter_privilege_and_grant_option/plan.json index fdc531eb..a367afa2 100644 --- a/testdata/diff/default_privilege/alter_privilege_and_grant_option/plan.json +++ b/testdata/diff/default_privilege/alter_privilege_and_grant_option/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "eecde391d3d75636f96f2f70fea8e5deb5804fd90aaa1c04be028b573c7ca11b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "eecde391d3d75636f96f2f70fea8e5deb5804fd90aaa1c04be028b573c7ca11b" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, UPDATE ON TABLES TO app_user WITH GRANT OPTION;", - "type": "default_privilege", - "operation": "alter", - "path": "default_privileges.testuser.TABLES.app_user" - }, - { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public REVOKE SELECT ON TABLES FROM app_user;", - "type": "default_privilege", - "operation": "alter", - "path": "default_privileges.testuser.TABLES.app_user" - }, - { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT ON TABLES TO app_user WITH GRANT OPTION;", - "type": "default_privilege", - "operation": "alter", - "path": "default_privileges.testuser.TABLES.app_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT INSERT, UPDATE ON TABLES TO app_user WITH GRANT OPTION;", + "type": "default_privilege", + "operation": "alter", + "path": "default_privileges.testuser.TABLES.app_user" + }, + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public REVOKE SELECT ON TABLES FROM app_user;", + "type": "default_privilege", + "operation": "alter", + "path": "default_privileges.testuser.TABLES.app_user" + }, + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT ON TABLES TO app_user WITH GRANT OPTION;", + "type": "default_privilege", + "operation": "alter", + "path": "default_privileges.testuser.TABLES.app_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/auto_grant_idempotent/plan.json b/testdata/diff/default_privilege/auto_grant_idempotent/plan.json index 49efe865..3639b197 100644 --- a/testdata/diff/default_privilege/auto_grant_idempotent/plan.json +++ b/testdata/diff/default_privilege/auto_grant_idempotent/plan.json @@ -2,8 +2,12 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "bc57a912a491b0def11bf6e4b9b4de9d1b27fed796bf0988e1ed492cadd64493" - }, - "groups": null + "schemas": { + "public": { + "source_fingerprint": { + "hash": "bc57a912a491b0def11bf6e4b9b4de9d1b27fed796bf0988e1ed492cadd64493" + }, + "groups": null + } + } } diff --git a/testdata/diff/default_privilege/drop_privilege/plan.json b/testdata/diff/default_privilege/drop_privilege/plan.json index a88e1274..5c97a573 100644 --- a/testdata/diff/default_privilege/drop_privilege/plan.json +++ b/testdata/diff/default_privilege/drop_privilege/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "70ccd3a27b733e69da2235e2c444cd4c6b327eb2e75b4df82d4b8096f7b79194" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "70ccd3a27b733e69da2235e2c444cd4c6b327eb2e75b4df82d4b8096f7b79194" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE test_admin IN SCHEMA public REVOKE DELETE, INSERT, UPDATE ON TABLES FROM app_user;", - "type": "default_privilege", - "operation": "drop", - "path": "default_privileges.test_admin.TABLES.app_user" - }, - { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE test_admin IN SCHEMA public REVOKE SELECT ON TABLES FROM readonly_user;", - "type": "default_privilege", - "operation": "drop", - "path": "default_privileges.test_admin.TABLES.readonly_user" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE test_admin IN SCHEMA public REVOKE DELETE, INSERT, UPDATE ON TABLES FROM app_user;", + "type": "default_privilege", + "operation": "drop", + "path": "default_privileges.test_admin.TABLES.app_user" + }, + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE test_admin IN SCHEMA public REVOKE SELECT ON TABLES FROM readonly_user;", + "type": "default_privilege", + "operation": "drop", + "path": "default_privileges.test_admin.TABLES.readonly_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/default_privilege/issue_303_for_role/plan.json b/testdata/diff/default_privilege/issue_303_for_role/plan.json index fd41cbfd..38ccbee7 100644 --- a/testdata/diff/default_privilege/issue_303_for_role/plan.json +++ b/testdata/diff/default_privilege/issue_303_for_role/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT ON TABLES TO demouser;", - "type": "default_privilege", - "operation": "create", - "path": "default_privileges.testuser.TABLES.demouser" + "steps": [ + { + "sql": "ALTER DEFAULT PRIVILEGES FOR ROLE testuser IN SCHEMA public GRANT SELECT ON TABLES TO demouser;", + "type": "default_privilege", + "operation": "create", + "path": "default_privileges.testuser.TABLES.demouser" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/function_to_function/plan.json b/testdata/diff/dependency/function_to_function/plan.json index f55e3db7..ad3b3b0e 100644 --- a/testdata/diff/dependency/function_to_function/plan.json +++ b/testdata/diff/dependency/function_to_function/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION get_raw_result()\nRETURNS integer\nLANGUAGE sql\nVOLATILE\nRETURN 42;", - "type": "function", - "operation": "create", - "path": "public.get_raw_result" - }, - { - "sql": "CREATE OR REPLACE FUNCTION process_result(\n val integer DEFAULT get_raw_result()\n)\nRETURNS text\nLANGUAGE sql\nVOLATILE\nRETURN ('Processed: '::text || (val)::text);", - "type": "function", - "operation": "create", - "path": "public.process_result" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION get_raw_result()\nRETURNS integer\nLANGUAGE sql\nVOLATILE\nRETURN 42;", + "type": "function", + "operation": "create", + "path": "public.get_raw_result" + }, + { + "sql": "CREATE OR REPLACE FUNCTION process_result(\n val integer DEFAULT get_raw_result()\n)\nRETURNS text\nLANGUAGE sql\nVOLATILE\nRETURN ('Processed: '::text || (val)::text);", + "type": "function", + "operation": "create", + "path": "public.process_result" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/function_to_table/plan.json b/testdata/diff/dependency/function_to_table/plan.json index 0014c45e..061e2266 100644 --- a/testdata/diff/dependency/function_to_table/plan.json +++ b/testdata/diff/dependency/function_to_table/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION get_default_status()\nRETURNS text\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n RETURN 'active';\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.get_default_status" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS users (\n id SERIAL,\n name text NOT NULL,\n status text DEFAULT get_default_status(),\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.users" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION get_default_status()\nRETURNS text\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n RETURN 'active';\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.get_default_status" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS users (\n id SERIAL,\n name text NOT NULL,\n status text DEFAULT get_default_status(),\n CONSTRAINT users_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.users" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/function_to_trigger/plan.json b/testdata/diff/dependency/function_to_trigger/plan.json index 77a9b66f..dc8431cb 100644 --- a/testdata/diff/dependency/function_to_trigger/plan.json +++ b/testdata/diff/dependency/function_to_trigger/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e1ad9e7efedd98f518dfa2edbb6f5b4f7d2b5f4ddfee9cfe0f99cc1c2a866eb4" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e1ad9e7efedd98f518dfa2edbb6f5b4f7d2b5f4ddfee9cfe0f99cc1c2a866eb4" + }, + "groups": [ { - "sql": "DROP TRIGGER IF EXISTS update_users_modified_time ON users;", - "type": "table.trigger", - "operation": "drop", - "path": "public.users.update_users_modified_time" - }, - { - "sql": "DROP FUNCTION IF EXISTS update_modified_time();", - "type": "function", - "operation": "drop", - "path": "public.update_modified_time" - }, - { - "sql": "CREATE OR REPLACE FUNCTION log_user_changes()\nRETURNS trigger\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n RAISE NOTICE 'User record changed: %', NEW.id;\n RETURN NEW;\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.log_user_changes" - }, - { - "sql": "CREATE OR REPLACE TRIGGER log_users_trigger\n AFTER INSERT OR UPDATE ON users\n FOR EACH ROW\n EXECUTE FUNCTION log_user_changes();", - "type": "table.trigger", - "operation": "create", - "path": "public.users.log_users_trigger" + "steps": [ + { + "sql": "DROP TRIGGER IF EXISTS update_users_modified_time ON users;", + "type": "table.trigger", + "operation": "drop", + "path": "public.users.update_users_modified_time" + }, + { + "sql": "DROP FUNCTION IF EXISTS update_modified_time();", + "type": "function", + "operation": "drop", + "path": "public.update_modified_time" + }, + { + "sql": "CREATE OR REPLACE FUNCTION log_user_changes()\nRETURNS trigger\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n RAISE NOTICE 'User record changed: %', NEW.id;\n RETURN NEW;\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.log_user_changes" + }, + { + "sql": "CREATE OR REPLACE TRIGGER log_users_trigger\n AFTER INSERT OR UPDATE ON users\n FOR EACH ROW\n EXECUTE FUNCTION log_user_changes();", + "type": "table.trigger", + "operation": "create", + "path": "public.users.log_users_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/issue_300_function_table_composite_type/plan.json b/testdata/diff/dependency/issue_300_function_table_composite_type/plan.json index 109239e9..3429d8c7 100644 --- a/testdata/diff/dependency/issue_300_function_table_composite_type/plan.json +++ b/testdata/diff/dependency/issue_300_function_table_composite_type/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS activity (\n id uuid,\n author_id uuid,\n CONSTRAINT activity_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.activity" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS contact (\n id uuid,\n name text NOT NULL,\n CONSTRAINT contact_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.contact" - }, - { - "sql": "CREATE OR REPLACE VIEW actor AS\n SELECT id,\n name\n FROM contact;", - "type": "view", - "operation": "create", - "path": "public.actor" - }, - { - "sql": "CREATE OR REPLACE FUNCTION get_actor(\n activity activity\n)\nRETURNS SETOF actor\nLANGUAGE sql\nSTABLE\nAS $$ SELECT actor.* FROM actor WHERE actor.id = activity.author_id\n$$;", - "type": "function", - "operation": "create", - "path": "public.get_actor" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS activity (\n id uuid,\n author_id uuid,\n CONSTRAINT activity_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.activity" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS contact (\n id uuid,\n name text NOT NULL,\n CONSTRAINT contact_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.contact" + }, + { + "sql": "CREATE OR REPLACE VIEW actor AS\n SELECT id,\n name\n FROM contact;", + "type": "view", + "operation": "create", + "path": "public.actor" + }, + { + "sql": "CREATE OR REPLACE FUNCTION get_actor(\n activity activity\n)\nRETURNS SETOF actor\nLANGUAGE sql\nSTABLE\nAS $$ SELECT actor.* FROM actor WHERE actor.id = activity.author_id\n$$;", + "type": "function", + "operation": "create", + "path": "public.get_actor" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/issue_300_view_depends_on_view/plan.json b/testdata/diff/dependency/issue_300_view_depends_on_view/plan.json index 7215aa64..d67d89a9 100644 --- a/testdata/diff/dependency/issue_300_view_depends_on_view/plan.json +++ b/testdata/diff/dependency/issue_300_view_depends_on_view/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS activity_x (\n id integer,\n title text NOT NULL,\n priority_user_id integer,\n CONSTRAINT activity_x_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.activity_x" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS priority (\n id integer,\n name text NOT NULL,\n level integer NOT NULL,\n CONSTRAINT priority_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.priority" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS priority_user (\n id integer,\n user_id integer NOT NULL,\n priority_id integer,\n CONSTRAINT priority_user_pkey PRIMARY KEY (id),\n CONSTRAINT priority_user_priority_id_fkey FOREIGN KEY (priority_id) REFERENCES priority (id)\n);", - "type": "table", - "operation": "create", - "path": "public.priority_user" - }, - { - "sql": "CREATE OR REPLACE VIEW priority_expanded AS\n SELECT pu.id,\n pu.user_id,\n p.name AS priority_name,\n p.level\n FROM priority_user pu\n JOIN priority p ON p.id = pu.priority_id;", - "type": "view", - "operation": "create", - "path": "public.priority_expanded" - }, - { - "sql": "CREATE OR REPLACE VIEW activity AS\n SELECT a.id,\n a.title,\n upe.priority_name\n FROM activity_x a\n JOIN priority_expanded upe ON upe.id = a.priority_user_id;", - "type": "view", - "operation": "create", - "path": "public.activity" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS activity_x (\n id integer,\n title text NOT NULL,\n priority_user_id integer,\n CONSTRAINT activity_x_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.activity_x" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS priority (\n id integer,\n name text NOT NULL,\n level integer NOT NULL,\n CONSTRAINT priority_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.priority" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS priority_user (\n id integer,\n user_id integer NOT NULL,\n priority_id integer,\n CONSTRAINT priority_user_pkey PRIMARY KEY (id),\n CONSTRAINT priority_user_priority_id_fkey FOREIGN KEY (priority_id) REFERENCES priority (id)\n);", + "type": "table", + "operation": "create", + "path": "public.priority_user" + }, + { + "sql": "CREATE OR REPLACE VIEW priority_expanded AS\n SELECT pu.id,\n pu.user_id,\n p.name AS priority_name,\n p.level\n FROM priority_user pu\n JOIN priority p ON p.id = pu.priority_id;", + "type": "view", + "operation": "create", + "path": "public.priority_expanded" + }, + { + "sql": "CREATE OR REPLACE VIEW activity AS\n SELECT a.id,\n a.title,\n upe.priority_name\n FROM activity_x a\n JOIN priority_expanded upe ON upe.id = a.priority_user_id;", + "type": "view", + "operation": "create", + "path": "public.activity" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/issue_308_view_select_star_column_reorder/plan.json b/testdata/diff/dependency/issue_308_view_select_star_column_reorder/plan.json index 5addf313..eec69d25 100644 --- a/testdata/diff/dependency/issue_308_view_select_star_column_reorder/plan.json +++ b/testdata/diff/dependency/issue_308_view_select_star_column_reorder/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "584e78ed59cc9fc48eae0b3f7fb8951623a81775c011bafcb4c27e781ed5f170" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "584e78ed59cc9fc48eae0b3f7fb8951623a81775c011bafcb4c27e781ed5f170" + }, + "groups": [ { - "sql": "ALTER TABLE item ADD COLUMN new_col text;", - "type": "table.column", - "operation": "create", - "path": "public.item.new_col" - }, - { - "sql": "DROP VIEW IF EXISTS item_extended RESTRICT;", - "type": "view", - "operation": "alter", - "path": "public.item_extended" - }, - { - "sql": "CREATE OR REPLACE VIEW item_extended AS\n SELECT i.id,\n i.title,\n i.status,\n i.new_col,\n c.name AS category_name\n FROM item i\n JOIN category c ON c.id = i.id;", - "type": "view", - "operation": "alter", - "path": "public.item_extended" + "steps": [ + { + "sql": "ALTER TABLE item ADD COLUMN new_col text;", + "type": "table.column", + "operation": "create", + "path": "public.item.new_col" + }, + { + "sql": "DROP VIEW IF EXISTS item_extended RESTRICT;", + "type": "view", + "operation": "alter", + "path": "public.item_extended" + }, + { + "sql": "CREATE OR REPLACE VIEW item_extended AS\n SELECT i.id,\n i.title,\n i.status,\n i.new_col,\n c.name AS category_name\n FROM item i\n JOIN category c ON c.id = i.id;", + "type": "view", + "operation": "alter", + "path": "public.item_extended" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/issue_373_policy_references_other_table/plan.json b/testdata/diff/dependency/issue_373_policy_references_other_table/plan.json index 8890a135..63154e76 100644 --- a/testdata/diff/dependency/issue_373_policy_references_other_table/plan.json +++ b/testdata/diff/dependency/issue_373_policy_references_other_table/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS manager (\n id SERIAL,\n user_id uuid NOT NULL\n);", - "type": "table", - "operation": "create", - "path": "public.manager" - }, - { - "sql": "ALTER TABLE manager ENABLE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "alter", - "path": "public.manager" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS project_manager (\n id SERIAL,\n project_id integer NOT NULL,\n manager_id integer NOT NULL,\n is_deleted boolean DEFAULT false NOT NULL\n);", - "type": "table", - "operation": "create", - "path": "public.project_manager" - }, - { - "sql": "CREATE POLICY employee_manager_select ON manager FOR SELECT TO PUBLIC USING (id IN ( SELECT pam.manager_id FROM project_manager pam WHERE ((pam.project_id IN ( SELECT unnest(ARRAY[1, 2, 3]) AS unnest)) AND (pam.is_deleted = false))));", - "type": "table.policy", - "operation": "create", - "path": "public.manager.employee_manager_select" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS manager (\n id SERIAL,\n user_id uuid NOT NULL\n);", + "type": "table", + "operation": "create", + "path": "public.manager" + }, + { + "sql": "ALTER TABLE manager ENABLE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "alter", + "path": "public.manager" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS project_manager (\n id SERIAL,\n project_id integer NOT NULL,\n manager_id integer NOT NULL,\n is_deleted boolean DEFAULT false NOT NULL\n);", + "type": "table", + "operation": "create", + "path": "public.project_manager" + }, + { + "sql": "CREATE POLICY employee_manager_select ON manager FOR SELECT TO PUBLIC USING (id IN ( SELECT pam.manager_id FROM project_manager pam WHERE ((pam.project_id IN ( SELECT unnest(ARRAY[1, 2, 3]) AS unnest)) AND (pam.is_deleted = false))));", + "type": "table.policy", + "operation": "create", + "path": "public.manager.employee_manager_select" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/sql_function_body_reference/plan.json b/testdata/diff/dependency/sql_function_body_reference/plan.json index 1c4133b5..68033fc6 100644 --- a/testdata/diff/dependency/sql_function_body_reference/plan.json +++ b/testdata/diff/dependency/sql_function_body_reference/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE OR REPLACE FUNCTION z_helper(\n input text\n)\nRETURNS text\nLANGUAGE sql\nIMMUTABLE\nAS $$ SELECT upper(input)\n$$;", - "type": "function", - "operation": "create", - "path": "public.z_helper" - }, - { - "sql": "CREATE OR REPLACE FUNCTION a_wrapper(\n input text\n)\nRETURNS text\nLANGUAGE sql\nIMMUTABLE\nAS $$ SELECT z_helper(input)\n$$;", - "type": "function", - "operation": "create", - "path": "public.a_wrapper" + "steps": [ + { + "sql": "CREATE OR REPLACE FUNCTION z_helper(\n input text\n)\nRETURNS text\nLANGUAGE sql\nIMMUTABLE\nAS $$ SELECT upper(input)\n$$;", + "type": "function", + "operation": "create", + "path": "public.z_helper" + }, + { + "sql": "CREATE OR REPLACE FUNCTION a_wrapper(\n input text\n)\nRETURNS text\nLANGUAGE sql\nIMMUTABLE\nAS $$ SELECT z_helper(input)\n$$;", + "type": "function", + "operation": "create", + "path": "public.a_wrapper" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/table_fk_to_generated_column/plan.json b/testdata/diff/dependency/table_fk_to_generated_column/plan.json index 673f0416..d264d835 100644 --- a/testdata/diff/dependency/table_fk_to_generated_column/plan.json +++ b/testdata/diff/dependency/table_fk_to_generated_column/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS articlesource (\n id integer,\n article_id integer NOT NULL,\n source_url text,\n CONSTRAINT articlesource_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.articlesource" - }, - { - "sql": "CREATE OR REPLACE FUNCTION calc_priority()\nRETURNS integer\nLANGUAGE sql\nIMMUTABLE\nAS $$SELECT 1\n$$;", - "type": "function", - "operation": "create", - "path": "public.calc_priority" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS article (\n id integer,\n title text NOT NULL,\n priority integer GENERATED ALWAYS AS (calc_priority()) STORED,\n CONSTRAINT article_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.article" - }, - { - "sql": "ALTER TABLE articlesource\nADD CONSTRAINT articlesource_article_id_fkey FOREIGN KEY (article_id) REFERENCES article (id);", - "type": "table.constraint", - "operation": "create", - "path": "public.articlesource.articlesource_article_id_fkey" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS articlesource (\n id integer,\n article_id integer NOT NULL,\n source_url text,\n CONSTRAINT articlesource_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.articlesource" + }, + { + "sql": "CREATE OR REPLACE FUNCTION calc_priority()\nRETURNS integer\nLANGUAGE sql\nIMMUTABLE\nAS $$SELECT 1\n$$;", + "type": "function", + "operation": "create", + "path": "public.calc_priority" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS article (\n id integer,\n title text NOT NULL,\n priority integer GENERATED ALWAYS AS (calc_priority()) STORED,\n CONSTRAINT article_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.article" + }, + { + "sql": "ALTER TABLE articlesource\nADD CONSTRAINT articlesource_article_id_fkey FOREIGN KEY (article_id) REFERENCES article (id);", + "type": "table.constraint", + "operation": "create", + "path": "public.articlesource.articlesource_article_id_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/table_to_function/plan.json b/testdata/diff/dependency/table_to_function/plan.json index 323b8343..b5cd600b 100644 --- a/testdata/diff/dependency/table_to_function/plan.json +++ b/testdata/diff/dependency/table_to_function/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS documents (\n id SERIAL,\n title text NOT NULL,\n content text,\n created_at timestamp DEFAULT CURRENT_TIMESTAMP,\n CONSTRAINT documents_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.documents" - }, - { - "sql": "CREATE OR REPLACE FUNCTION get_document_count()\nRETURNS integer\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n RETURN (SELECT COUNT(*) FROM public.documents);\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.get_document_count" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS documents (\n id SERIAL,\n title text NOT NULL,\n content text,\n created_at timestamp DEFAULT CURRENT_TIMESTAMP,\n CONSTRAINT documents_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.documents" + }, + { + "sql": "CREATE OR REPLACE FUNCTION get_document_count()\nRETURNS integer\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n RETURN (SELECT COUNT(*) FROM public.documents);\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.get_document_count" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/table_to_materialized_view/plan.json b/testdata/diff/dependency/table_to_materialized_view/plan.json index 1d9b373a..5023778e 100644 --- a/testdata/diff/dependency/table_to_materialized_view/plan.json +++ b/testdata/diff/dependency/table_to_materialized_view/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "f9203703a19466fbbef2ce5a7e5d63422c831ab0b28aee0ee5b52331ea50bd27" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "f9203703a19466fbbef2ce5a7e5d63422c831ab0b28aee0ee5b52331ea50bd27" + }, + "groups": [ { - "sql": "DROP MATERIALIZED VIEW expensive_products RESTRICT;", - "type": "materialized_view", - "operation": "recreate", - "path": "public.expensive_products" - }, - { - "sql": "ALTER TABLE products ADD COLUMN category text;", - "type": "table.column", - "operation": "create", - "path": "public.products.category" - }, - { - "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS expensive_products AS\n SELECT id,\n name,\n price,\n category\n FROM products\n WHERE price > 100::numeric;", - "type": "materialized_view", - "operation": "create", - "path": "public.expensive_products" + "steps": [ + { + "sql": "DROP MATERIALIZED VIEW expensive_products RESTRICT;", + "type": "materialized_view", + "operation": "recreate", + "path": "public.expensive_products" + }, + { + "sql": "ALTER TABLE products ADD COLUMN category text;", + "type": "table.column", + "operation": "create", + "path": "public.products.category" + }, + { + "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS expensive_products AS\n SELECT id,\n name,\n price,\n category\n FROM products\n WHERE price > 100::numeric;", + "type": "materialized_view", + "operation": "create", + "path": "public.expensive_products" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/table_to_table/plan.json b/testdata/diff/dependency/table_to_table/plan.json index dc821737..708a2e2d 100644 --- a/testdata/diff/dependency/table_to_table/plan.json +++ b/testdata/diff/dependency/table_to_table/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS departments (\n id integer,\n name text NOT NULL,\n manager_id integer,\n CONSTRAINT departments_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.departments" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS users (\n id integer,\n name text,\n email text,\n department_id integer,\n CONSTRAINT users_pkey PRIMARY KEY (id),\n CONSTRAINT users_email_key UNIQUE (email),\n CONSTRAINT users_department_id_fkey FOREIGN KEY (department_id) REFERENCES departments (id)\n);", - "type": "table", - "operation": "create", - "path": "public.users" - }, - { - "sql": "ALTER TABLE departments\nADD CONSTRAINT departments_manager_id_fkey FOREIGN KEY (manager_id) REFERENCES users (id);", - "type": "table.constraint", - "operation": "create", - "path": "public.departments.departments_manager_id_fkey" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS departments (\n id integer,\n name text NOT NULL,\n manager_id integer,\n CONSTRAINT departments_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.departments" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS users (\n id integer,\n name text,\n email text,\n department_id integer,\n CONSTRAINT users_pkey PRIMARY KEY (id),\n CONSTRAINT users_email_key UNIQUE (email),\n CONSTRAINT users_department_id_fkey FOREIGN KEY (department_id) REFERENCES departments (id)\n);", + "type": "table", + "operation": "create", + "path": "public.users" + }, + { + "sql": "ALTER TABLE departments\nADD CONSTRAINT departments_manager_id_fkey FOREIGN KEY (manager_id) REFERENCES users (id);", + "type": "table.constraint", + "operation": "create", + "path": "public.departments.departments_manager_id_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/table_to_view/plan.json b/testdata/diff/dependency/table_to_view/plan.json index 3c84b6f0..fdc74d72 100644 --- a/testdata/diff/dependency/table_to_view/plan.json +++ b/testdata/diff/dependency/table_to_view/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "edd3e06b150f88d743bd989d71daa0fd24b834c4a9e901aed640fa0fd19462dc" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "edd3e06b150f88d743bd989d71daa0fd24b834c4a9e901aed640fa0fd19462dc" + }, + "groups": [ { - "sql": "ALTER TABLE products ADD COLUMN category text;", - "type": "table.column", - "operation": "create", - "path": "public.products.category" - }, - { - "sql": "CREATE OR REPLACE VIEW expensive_products AS\n SELECT id,\n name,\n price,\n category\n FROM products\n WHERE price > 100::numeric;", - "type": "view", - "operation": "alter", - "path": "public.expensive_products" + "steps": [ + { + "sql": "ALTER TABLE products ADD COLUMN category text;", + "type": "table.column", + "operation": "create", + "path": "public.products.category" + }, + { + "sql": "CREATE OR REPLACE VIEW expensive_products AS\n SELECT id,\n name,\n price,\n category\n FROM products\n WHERE price > 100::numeric;", + "type": "view", + "operation": "alter", + "path": "public.expensive_products" + } + ] } ] } - ] + } } diff --git a/testdata/diff/dependency/type_to_type/plan.json b/testdata/diff/dependency/type_to_type/plan.json index e5d1c1e8..cc47c32c 100644 --- a/testdata/diff/dependency/type_to_type/plan.json +++ b/testdata/diff/dependency/type_to_type/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TYPE status_type AS ENUM (\n 'active',\n 'inactive'\n);", - "type": "type", - "operation": "create", - "path": "public.status_type" - }, - { - "sql": "CREATE TYPE record_type AS (id integer, status status_type);", - "type": "type", - "operation": "create", - "path": "public.record_type" + "steps": [ + { + "sql": "CREATE TYPE status_type AS ENUM (\n 'active',\n 'inactive'\n);", + "type": "type", + "operation": "create", + "path": "public.status_type" + }, + { + "sql": "CREATE TYPE record_type AS (id integer, status status_type);", + "type": "type", + "operation": "create", + "path": "public.record_type" + } + ] } ] } - ] + } } diff --git a/testdata/diff/migrate/v1/plan.json b/testdata/diff/migrate/v1/plan.json index a3badf69..b416763d 100644 --- a/testdata/diff/migrate/v1/plan.json +++ b/testdata/diff/migrate/v1/plan.json @@ -2,49 +2,53 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "965b1131737c955e24c7f827c55bd78e4cb49a75adfd04229e0ba297376f5085" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS department (\n dept_no text,\n dept_name text NOT NULL,\n CONSTRAINT department_pkey PRIMARY KEY (dept_no)\n);", - "type": "table", - "operation": "create", - "path": "public.department" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS employee (\n emp_no SERIAL,\n birth_date date NOT NULL,\n first_name text NOT NULL,\n last_name text NOT NULL,\n gender text NOT NULL,\n hire_date date NOT NULL,\n CONSTRAINT employee_pkey PRIMARY KEY (emp_no)\n);", - "type": "table", - "operation": "create", - "path": "public.employee" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS dept_emp (\n emp_no integer,\n dept_no text,\n from_date date NOT NULL,\n to_date date NOT NULL,\n CONSTRAINT dept_emp_pkey PRIMARY KEY (emp_no, dept_no),\n CONSTRAINT dept_emp_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no),\n CONSTRAINT dept_emp_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", - "type": "table", - "operation": "create", - "path": "public.dept_emp" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS dept_manager (\n emp_no integer,\n dept_no text,\n from_date date NOT NULL,\n to_date date NOT NULL,\n CONSTRAINT dept_manager_pkey PRIMARY KEY (emp_no, dept_no),\n CONSTRAINT dept_manager_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no),\n CONSTRAINT dept_manager_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", - "type": "table", - "operation": "create", - "path": "public.dept_manager" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS salary (\n emp_no integer,\n amount integer NOT NULL,\n from_date date,\n to_date date NOT NULL,\n CONSTRAINT salary_pkey PRIMARY KEY (emp_no, from_date),\n CONSTRAINT salary_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", - "type": "table", - "operation": "create", - "path": "public.salary" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS title (\n emp_no integer,\n title text,\n from_date date,\n to_date date,\n CONSTRAINT title_pkey PRIMARY KEY (emp_no, title, from_date),\n CONSTRAINT title_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", - "type": "table", - "operation": "create", - "path": "public.title" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS department (\n dept_no text,\n dept_name text NOT NULL,\n CONSTRAINT department_pkey PRIMARY KEY (dept_no)\n);", + "type": "table", + "operation": "create", + "path": "public.department" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS employee (\n emp_no SERIAL,\n birth_date date NOT NULL,\n first_name text NOT NULL,\n last_name text NOT NULL,\n gender text NOT NULL,\n hire_date date NOT NULL,\n CONSTRAINT employee_pkey PRIMARY KEY (emp_no)\n);", + "type": "table", + "operation": "create", + "path": "public.employee" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS dept_emp (\n emp_no integer,\n dept_no text,\n from_date date NOT NULL,\n to_date date NOT NULL,\n CONSTRAINT dept_emp_pkey PRIMARY KEY (emp_no, dept_no),\n CONSTRAINT dept_emp_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no),\n CONSTRAINT dept_emp_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", + "type": "table", + "operation": "create", + "path": "public.dept_emp" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS dept_manager (\n emp_no integer,\n dept_no text,\n from_date date NOT NULL,\n to_date date NOT NULL,\n CONSTRAINT dept_manager_pkey PRIMARY KEY (emp_no, dept_no),\n CONSTRAINT dept_manager_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no),\n CONSTRAINT dept_manager_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", + "type": "table", + "operation": "create", + "path": "public.dept_manager" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS salary (\n emp_no integer,\n amount integer NOT NULL,\n from_date date,\n to_date date NOT NULL,\n CONSTRAINT salary_pkey PRIMARY KEY (emp_no, from_date),\n CONSTRAINT salary_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", + "type": "table", + "operation": "create", + "path": "public.salary" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS title (\n emp_no integer,\n title text,\n from_date date,\n to_date date,\n CONSTRAINT title_pkey PRIMARY KEY (emp_no, title, from_date),\n CONSTRAINT title_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no)\n);", + "type": "table", + "operation": "create", + "path": "public.title" + } + ] } ] } - ] + } } diff --git a/testdata/diff/migrate/v2/plan.json b/testdata/diff/migrate/v2/plan.json index 86f3c5a7..0e600564 100644 --- a/testdata/diff/migrate/v2/plan.json +++ b/testdata/diff/migrate/v2/plan.json @@ -2,195 +2,199 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "b6ae92653cfa9d3b3e5ac5c0415518e81b96326b5236a9c8829e278a62b89efb" - }, - "groups": [ - { - "steps": [ - { - "sql": "ALTER TABLE department\nADD CONSTRAINT department_dept_name_key UNIQUE (dept_name);", - "type": "table.constraint", - "operation": "create", - "path": "public.department.department_dept_name_key" - }, - { - "sql": "ALTER TABLE dept_emp DROP CONSTRAINT dept_emp_dept_no_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.dept_emp.dept_emp_dept_no_fkey" - }, - { - "sql": "ALTER TABLE dept_emp\nADD CONSTRAINT dept_emp_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_emp.dept_emp_dept_no_fkey" - }, - { - "sql": "ALTER TABLE dept_emp VALIDATE CONSTRAINT dept_emp_dept_no_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_emp.dept_emp_dept_no_fkey" - }, - { - "sql": "ALTER TABLE dept_emp DROP CONSTRAINT dept_emp_emp_no_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.dept_emp.dept_emp_emp_no_fkey" - }, - { - "sql": "ALTER TABLE dept_emp\nADD CONSTRAINT dept_emp_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_emp.dept_emp_emp_no_fkey" - }, - { - "sql": "ALTER TABLE dept_emp VALIDATE CONSTRAINT dept_emp_emp_no_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_emp.dept_emp_emp_no_fkey" - }, - { - "sql": "ALTER TABLE dept_manager DROP CONSTRAINT dept_manager_dept_no_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.dept_manager.dept_manager_dept_no_fkey" - }, - { - "sql": "ALTER TABLE dept_manager\nADD CONSTRAINT dept_manager_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_manager.dept_manager_dept_no_fkey" - }, - { - "sql": "ALTER TABLE dept_manager VALIDATE CONSTRAINT dept_manager_dept_no_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_manager.dept_manager_dept_no_fkey" - }, - { - "sql": "ALTER TABLE dept_manager DROP CONSTRAINT dept_manager_emp_no_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.dept_manager.dept_manager_emp_no_fkey" - }, - { - "sql": "ALTER TABLE dept_manager\nADD CONSTRAINT dept_manager_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_manager.dept_manager_emp_no_fkey" - }, - { - "sql": "ALTER TABLE dept_manager VALIDATE CONSTRAINT dept_manager_emp_no_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.dept_manager.dept_manager_emp_no_fkey" - }, - { - "sql": "ALTER TABLE employee\nADD CONSTRAINT employee_gender_check CHECK (gender IN ('M'::text, 'F'::text)) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.employee.employee_gender_check" - }, - { - "sql": "ALTER TABLE employee VALIDATE CONSTRAINT employee_gender_check;", - "type": "table.constraint", - "operation": "create", - "path": "public.employee.employee_gender_check" - } - ] - }, - { - "steps": [ - { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_employee_hire_date ON employee (hire_date);", - "type": "table.index", - "operation": "create", - "path": "public.employee.idx_employee_hire_date" - } - ] - }, - { - "steps": [ - { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_employee_hire_date';", - "directive": { - "type": "wait", - "message": "Creating index idx_employee_hire_date" - }, - "type": "table.index", - "operation": "create", - "path": "public.employee.idx_employee_hire_date" - } - ] - }, - { - "steps": [ - { - "sql": "ALTER TABLE salary DROP CONSTRAINT salary_emp_no_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.salary.salary_emp_no_fkey" - }, - { - "sql": "ALTER TABLE salary\nADD CONSTRAINT salary_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.salary.salary_emp_no_fkey" - }, - { - "sql": "ALTER TABLE salary VALIDATE CONSTRAINT salary_emp_no_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.salary.salary_emp_no_fkey" - } - ] - }, - { - "steps": [ - { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_salary_amount ON salary (amount);", - "type": "table.index", - "operation": "create", - "path": "public.salary.idx_salary_amount" - } - ] - }, - { - "steps": [ - { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_salary_amount';", - "directive": { - "type": "wait", - "message": "Creating index idx_salary_amount" - }, - "type": "table.index", - "operation": "create", - "path": "public.salary.idx_salary_amount" - } - ] - }, - { - "steps": [ - { - "sql": "ALTER TABLE title DROP CONSTRAINT title_emp_no_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.title.title_emp_no_fkey" - }, - { - "sql": "ALTER TABLE title\nADD CONSTRAINT title_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.title.title_emp_no_fkey" - }, - { - "sql": "ALTER TABLE title VALIDATE CONSTRAINT title_emp_no_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.title.title_emp_no_fkey" + "schemas": { + "public": { + "source_fingerprint": { + "hash": "b6ae92653cfa9d3b3e5ac5c0415518e81b96326b5236a9c8829e278a62b89efb" + }, + "groups": [ + { + "steps": [ + { + "sql": "ALTER TABLE department\nADD CONSTRAINT department_dept_name_key UNIQUE (dept_name);", + "type": "table.constraint", + "operation": "create", + "path": "public.department.department_dept_name_key" + }, + { + "sql": "ALTER TABLE dept_emp DROP CONSTRAINT dept_emp_dept_no_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.dept_emp.dept_emp_dept_no_fkey" + }, + { + "sql": "ALTER TABLE dept_emp\nADD CONSTRAINT dept_emp_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_emp.dept_emp_dept_no_fkey" + }, + { + "sql": "ALTER TABLE dept_emp VALIDATE CONSTRAINT dept_emp_dept_no_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_emp.dept_emp_dept_no_fkey" + }, + { + "sql": "ALTER TABLE dept_emp DROP CONSTRAINT dept_emp_emp_no_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.dept_emp.dept_emp_emp_no_fkey" + }, + { + "sql": "ALTER TABLE dept_emp\nADD CONSTRAINT dept_emp_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_emp.dept_emp_emp_no_fkey" + }, + { + "sql": "ALTER TABLE dept_emp VALIDATE CONSTRAINT dept_emp_emp_no_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_emp.dept_emp_emp_no_fkey" + }, + { + "sql": "ALTER TABLE dept_manager DROP CONSTRAINT dept_manager_dept_no_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.dept_manager.dept_manager_dept_no_fkey" + }, + { + "sql": "ALTER TABLE dept_manager\nADD CONSTRAINT dept_manager_dept_no_fkey FOREIGN KEY (dept_no) REFERENCES department (dept_no) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_manager.dept_manager_dept_no_fkey" + }, + { + "sql": "ALTER TABLE dept_manager VALIDATE CONSTRAINT dept_manager_dept_no_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_manager.dept_manager_dept_no_fkey" + }, + { + "sql": "ALTER TABLE dept_manager DROP CONSTRAINT dept_manager_emp_no_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.dept_manager.dept_manager_emp_no_fkey" + }, + { + "sql": "ALTER TABLE dept_manager\nADD CONSTRAINT dept_manager_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_manager.dept_manager_emp_no_fkey" + }, + { + "sql": "ALTER TABLE dept_manager VALIDATE CONSTRAINT dept_manager_emp_no_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.dept_manager.dept_manager_emp_no_fkey" + }, + { + "sql": "ALTER TABLE employee\nADD CONSTRAINT employee_gender_check CHECK (gender IN ('M'::text, 'F'::text)) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.employee.employee_gender_check" + }, + { + "sql": "ALTER TABLE employee VALIDATE CONSTRAINT employee_gender_check;", + "type": "table.constraint", + "operation": "create", + "path": "public.employee.employee_gender_check" + } + ] + }, + { + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_employee_hire_date ON employee (hire_date);", + "type": "table.index", + "operation": "create", + "path": "public.employee.idx_employee_hire_date" + } + ] + }, + { + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_employee_hire_date';", + "directive": { + "type": "wait", + "message": "Creating index idx_employee_hire_date" + }, + "type": "table.index", + "operation": "create", + "path": "public.employee.idx_employee_hire_date" + } + ] + }, + { + "steps": [ + { + "sql": "ALTER TABLE salary DROP CONSTRAINT salary_emp_no_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.salary.salary_emp_no_fkey" + }, + { + "sql": "ALTER TABLE salary\nADD CONSTRAINT salary_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.salary.salary_emp_no_fkey" + }, + { + "sql": "ALTER TABLE salary VALIDATE CONSTRAINT salary_emp_no_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.salary.salary_emp_no_fkey" + } + ] + }, + { + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_salary_amount ON salary (amount);", + "type": "table.index", + "operation": "create", + "path": "public.salary.idx_salary_amount" + } + ] + }, + { + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_salary_amount';", + "directive": { + "type": "wait", + "message": "Creating index idx_salary_amount" + }, + "type": "table.index", + "operation": "create", + "path": "public.salary.idx_salary_amount" + } + ] + }, + { + "steps": [ + { + "sql": "ALTER TABLE title DROP CONSTRAINT title_emp_no_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.title.title_emp_no_fkey" + }, + { + "sql": "ALTER TABLE title\nADD CONSTRAINT title_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.title.title_emp_no_fkey" + }, + { + "sql": "ALTER TABLE title VALIDATE CONSTRAINT title_emp_no_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.title.title_emp_no_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/migrate/v3/plan.json b/testdata/diff/migrate/v3/plan.json index 3424d7e4..a750d81d 100644 --- a/testdata/diff/migrate/v3/plan.json +++ b/testdata/diff/migrate/v3/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "9e37fb057af32b98c899ee94f42f4916403b6ddf361b29beccb9b657b58abc1f" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "9e37fb057af32b98c899ee94f42f4916403b6ddf361b29beccb9b657b58abc1f" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS audit (\n id SERIAL,\n operation text NOT NULL,\n query text,\n user_name text NOT NULL,\n changed_at timestamptz DEFAULT CURRENT_TIMESTAMP,\n CONSTRAINT audit_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.audit" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_audit_changed_at ON audit (changed_at);", - "type": "table.index", - "operation": "create", - "path": "public.audit.idx_audit_changed_at" - }, - { - "sql": "CREATE OR REPLACE FUNCTION log_dml_operations()\nRETURNS trigger\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n IF (TG_OP = 'INSERT') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES ('INSERT', current_query(), current_user);\n RETURN NEW;\n ELSIF (TG_OP = 'UPDATE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES ('UPDATE', current_query(), current_user);\n RETURN NEW;\n ELSIF (TG_OP = 'DELETE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES ('DELETE', current_query(), current_user);\n RETURN OLD;\n END IF;\n RETURN NULL;\nEND;\n$$;", - "type": "function", - "operation": "create", - "path": "public.log_dml_operations" - }, - { - "sql": "CREATE OR REPLACE TRIGGER salary_log_trigger\n AFTER UPDATE OR DELETE ON salary\n FOR EACH ROW\n EXECUTE FUNCTION log_dml_operations();", - "type": "table.trigger", - "operation": "create", - "path": "public.salary.salary_log_trigger" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS audit (\n id SERIAL,\n operation text NOT NULL,\n query text,\n user_name text NOT NULL,\n changed_at timestamptz DEFAULT CURRENT_TIMESTAMP,\n CONSTRAINT audit_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.audit" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_audit_changed_at ON audit (changed_at);", + "type": "table.index", + "operation": "create", + "path": "public.audit.idx_audit_changed_at" + }, + { + "sql": "CREATE OR REPLACE FUNCTION log_dml_operations()\nRETURNS trigger\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nBEGIN\n IF (TG_OP = 'INSERT') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES ('INSERT', current_query(), current_user);\n RETURN NEW;\n ELSIF (TG_OP = 'UPDATE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES ('UPDATE', current_query(), current_user);\n RETURN NEW;\n ELSIF (TG_OP = 'DELETE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES ('DELETE', current_query(), current_user);\n RETURN OLD;\n END IF;\n RETURN NULL;\nEND;\n$$;", + "type": "function", + "operation": "create", + "path": "public.log_dml_operations" + }, + { + "sql": "CREATE OR REPLACE TRIGGER salary_log_trigger\n AFTER UPDATE OR DELETE ON salary\n FOR EACH ROW\n EXECUTE FUNCTION log_dml_operations();", + "type": "table.trigger", + "operation": "create", + "path": "public.salary.salary_log_trigger" + } + ] } ] } - ] + } } diff --git a/testdata/diff/migrate/v4/plan.json b/testdata/diff/migrate/v4/plan.json index f9bcbca1..972cb66e 100644 --- a/testdata/diff/migrate/v4/plan.json +++ b/testdata/diff/migrate/v4/plan.json @@ -2,107 +2,111 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "bdbae04ce681eb5be9912131759a7b327273fb07c3d1ad5648fd160cda572454" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "bdbae04ce681eb5be9912131759a7b327273fb07c3d1ad5648fd160cda572454" + }, + "groups": [ { - "sql": "CREATE OR REPLACE PROCEDURE simple_salary_update(\n IN p_emp_no integer,\n IN p_amount integer\n)\nLANGUAGE plpgsql\nAS $$\nBEGIN\n -- Simple update of salary amount\n UPDATE salary \n SET amount = p_amount \n WHERE emp_no = p_emp_no \n AND to_date = '9999-01-01';\n \n RAISE NOTICE 'Updated salary for employee % to $%', p_emp_no, p_amount;\nEND;\n$$;", - "type": "procedure", - "operation": "create", - "path": "public.simple_salary_update" + "steps": [ + { + "sql": "CREATE OR REPLACE PROCEDURE simple_salary_update(\n IN p_emp_no integer,\n IN p_amount integer\n)\nLANGUAGE plpgsql\nAS $$\nBEGIN\n -- Simple update of salary amount\n UPDATE salary \n SET amount = p_amount \n WHERE emp_no = p_emp_no \n AND to_date = '9999-01-01';\n \n RAISE NOTICE 'Updated salary for employee % to $%', p_emp_no, p_amount;\nEND;\n$$;", + "type": "procedure", + "operation": "create", + "path": "public.simple_salary_update" + }, + { + "sql": "CREATE OR REPLACE VIEW dept_emp_latest_date AS\n SELECT emp_no,\n max(from_date) AS from_date,\n max(to_date) AS to_date\n FROM dept_emp\n GROUP BY emp_no;", + "type": "view", + "operation": "create", + "path": "public.dept_emp_latest_date" + }, + { + "sql": "CREATE OR REPLACE VIEW current_dept_emp AS\n SELECT l.emp_no,\n d.dept_no,\n l.from_date,\n l.to_date\n FROM dept_emp d\n JOIN dept_emp_latest_date l ON d.emp_no = l.emp_no AND d.from_date = l.from_date AND l.to_date = d.to_date;", + "type": "view", + "operation": "create", + "path": "public.current_dept_emp" + }, + { + "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS employee_salary_summary AS\n SELECT d.dept_no,\n d.dept_name,\n count(DISTINCT e.emp_no) AS employee_count,\n avg(s.amount) AS avg_salary,\n max(s.amount) AS max_salary,\n min(s.amount) AS min_salary\n FROM employee e\n JOIN dept_emp de ON e.emp_no = de.emp_no\n JOIN department d ON de.dept_no = d.dept_no\n JOIN salary s ON e.emp_no = s.emp_no\n WHERE de.to_date = '9999-01-01'::date AND s.to_date = '9999-01-01'::date\n GROUP BY d.dept_no, d.dept_name;", + "type": "materialized_view", + "operation": "create", + "path": "public.employee_salary_summary" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_emp_salary_summary_dept_no ON employee_salary_summary (dept_no);", + "type": "materialized_view.index", + "operation": "create", + "path": "public.employee_salary_summary.idx_emp_salary_summary_dept_no" + } + ] }, { - "sql": "CREATE OR REPLACE VIEW dept_emp_latest_date AS\n SELECT emp_no,\n max(from_date) AS from_date,\n max(to_date) AS to_date\n FROM dept_emp\n GROUP BY emp_no;", - "type": "view", - "operation": "create", - "path": "public.dept_emp_latest_date" + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_operation ON audit (operation);", + "type": "table.index", + "operation": "create", + "path": "public.audit.idx_audit_operation" + } + ] }, { - "sql": "CREATE OR REPLACE VIEW current_dept_emp AS\n SELECT l.emp_no,\n d.dept_no,\n l.from_date,\n l.to_date\n FROM dept_emp d\n JOIN dept_emp_latest_date l ON d.emp_no = l.emp_no AND d.from_date = l.from_date AND l.to_date = d.to_date;", - "type": "view", - "operation": "create", - "path": "public.current_dept_emp" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_audit_operation';", + "directive": { + "type": "wait", + "message": "Creating index idx_audit_operation" + }, + "type": "table.index", + "operation": "create", + "path": "public.audit.idx_audit_operation" + } + ] }, { - "sql": "CREATE MATERIALIZED VIEW IF NOT EXISTS employee_salary_summary AS\n SELECT d.dept_no,\n d.dept_name,\n count(DISTINCT e.emp_no) AS employee_count,\n avg(s.amount) AS avg_salary,\n max(s.amount) AS max_salary,\n min(s.amount) AS min_salary\n FROM employee e\n JOIN dept_emp de ON e.emp_no = de.emp_no\n JOIN department d ON de.dept_no = d.dept_no\n JOIN salary s ON e.emp_no = s.emp_no\n WHERE de.to_date = '9999-01-01'::date AND s.to_date = '9999-01-01'::date\n GROUP BY d.dept_no, d.dept_name;", - "type": "materialized_view", - "operation": "create", - "path": "public.employee_salary_summary" + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_username ON audit (user_name);", + "type": "table.index", + "operation": "create", + "path": "public.audit.idx_audit_username" + } + ] }, { - "sql": "CREATE INDEX IF NOT EXISTS idx_emp_salary_summary_dept_no ON employee_salary_summary (dept_no);", - "type": "materialized_view.index", - "operation": "create", - "path": "public.employee_salary_summary.idx_emp_salary_summary_dept_no" - } - ] - }, - { - "steps": [ - { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_operation ON audit (operation);", - "type": "table.index", - "operation": "create", - "path": "public.audit.idx_audit_operation" - } - ] - }, - { - "steps": [ - { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_audit_operation';", - "directive": { - "type": "wait", - "message": "Creating index idx_audit_operation" - }, - "type": "table.index", - "operation": "create", - "path": "public.audit.idx_audit_operation" - } - ] - }, - { - "steps": [ - { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_audit_username ON audit (user_name);", - "type": "table.index", - "operation": "create", - "path": "public.audit.idx_audit_username" - } - ] - }, - { - "steps": [ - { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_audit_username';", - "directive": { - "type": "wait", - "message": "Creating index idx_audit_username" - }, - "type": "table.index", - "operation": "create", - "path": "public.audit.idx_audit_username" - } - ] - }, - { - "steps": [ - { - "sql": "CREATE OR REPLACE TRIGGER salary_log_trigger\n AFTER UPDATE OR DELETE ON salary\n FOR EACH ROW\n EXECUTE FUNCTION log_dml_operations('payroll', 'high');", - "type": "table.trigger", - "operation": "alter", - "path": "public.salary.salary_log_trigger" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_audit_username';", + "directive": { + "type": "wait", + "message": "Creating index idx_audit_username" + }, + "type": "table.index", + "operation": "create", + "path": "public.audit.idx_audit_username" + } + ] }, { - "sql": "CREATE OR REPLACE FUNCTION log_dml_operations()\nRETURNS trigger\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nDECLARE\n table_category TEXT;\n log_level TEXT;\nBEGIN\n -- Get arguments passed from trigger (if any)\n -- TG_ARGV[0] is the first argument, TG_ARGV[1] is the second\n table_category := COALESCE(TG_ARGV[0], 'default');\n log_level := COALESCE(TG_ARGV[1], 'standard');\n\n IF (TG_OP = 'INSERT') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES (\n 'INSERT [' || table_category || ':' || log_level || ']',\n current_query(),\n current_user\n );\n RETURN NEW;\n ELSIF (TG_OP = 'UPDATE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES (\n 'UPDATE [' || table_category || ':' || log_level || ']',\n current_query(),\n current_user\n );\n RETURN NEW;\n ELSIF (TG_OP = 'DELETE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES (\n 'DELETE [' || table_category || ':' || log_level || ']',\n current_query(),\n current_user\n );\n RETURN OLD;\n END IF;\n RETURN NULL;\nEND;\n$$;", - "type": "function", - "operation": "alter", - "path": "public.log_dml_operations" + "steps": [ + { + "sql": "CREATE OR REPLACE TRIGGER salary_log_trigger\n AFTER UPDATE OR DELETE ON salary\n FOR EACH ROW\n EXECUTE FUNCTION log_dml_operations('payroll', 'high');", + "type": "table.trigger", + "operation": "alter", + "path": "public.salary.salary_log_trigger" + }, + { + "sql": "CREATE OR REPLACE FUNCTION log_dml_operations()\nRETURNS trigger\nLANGUAGE plpgsql\nVOLATILE\nAS $$\nDECLARE\n table_category TEXT;\n log_level TEXT;\nBEGIN\n -- Get arguments passed from trigger (if any)\n -- TG_ARGV[0] is the first argument, TG_ARGV[1] is the second\n table_category := COALESCE(TG_ARGV[0], 'default');\n log_level := COALESCE(TG_ARGV[1], 'standard');\n\n IF (TG_OP = 'INSERT') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES (\n 'INSERT [' || table_category || ':' || log_level || ']',\n current_query(),\n current_user\n );\n RETURN NEW;\n ELSIF (TG_OP = 'UPDATE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES (\n 'UPDATE [' || table_category || ':' || log_level || ']',\n current_query(),\n current_user\n );\n RETURN NEW;\n ELSIF (TG_OP = 'DELETE') THEN\n INSERT INTO audit (operation, query, user_name)\n VALUES (\n 'DELETE [' || table_category || ':' || log_level || ']',\n current_query(),\n current_user\n );\n RETURN OLD;\n END IF;\n RETURN NULL;\nEND;\n$$;", + "type": "function", + "operation": "alter", + "path": "public.log_dml_operations" + } + ] } ] } - ] + } } diff --git a/testdata/diff/migrate/v5/plan.json b/testdata/diff/migrate/v5/plan.json index 7e095f2a..77172073 100644 --- a/testdata/diff/migrate/v5/plan.json +++ b/testdata/diff/migrate/v5/plan.json @@ -2,85 +2,89 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "1bdbd0c7d1e5902ba4bab0e98b09d3ec89efaa6286d785c96c8803befd111941" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "1bdbd0c7d1e5902ba4bab0e98b09d3ec89efaa6286d785c96c8803befd111941" + }, + "groups": [ { - "sql": "DROP PROCEDURE IF EXISTS simple_salary_update(IN p_emp_no integer, IN p_amount integer);", - "type": "procedure", - "operation": "drop", - "path": "public.simple_salary_update" - }, - { - "sql": "DROP TABLE IF EXISTS title CASCADE;", - "type": "table", - "operation": "drop", - "path": "public.title" - }, - { - "sql": "DROP TABLE IF EXISTS dept_manager CASCADE;", - "type": "table", - "operation": "drop", - "path": "public.dept_manager" - }, - { - "sql": "CREATE TYPE employee_status AS ENUM (\n 'active',\n 'inactive',\n 'terminated'\n);", - "type": "type", - "operation": "create", - "path": "public.employee_status" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS employee_status_log (\n id SERIAL,\n emp_no integer NOT NULL,\n status employee_status NOT NULL,\n effective_date date DEFAULT CURRENT_DATE NOT NULL,\n notes text,\n CONSTRAINT employee_status_log_pkey PRIMARY KEY (id),\n CONSTRAINT employee_status_log_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE\n);", - "type": "table", - "operation": "create", - "path": "public.employee_status_log" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_employee_status_log_effective_date ON employee_status_log (effective_date);", - "type": "table.index", - "operation": "create", - "path": "public.employee_status_log.idx_employee_status_log_effective_date" - }, - { - "sql": "CREATE INDEX IF NOT EXISTS idx_employee_status_log_emp_no ON employee_status_log (emp_no);", - "type": "table.index", - "operation": "create", - "path": "public.employee_status_log.idx_employee_status_log_emp_no" - }, - { - "sql": "CREATE OR REPLACE TRIGGER employee_status_log_trigger\n AFTER INSERT OR UPDATE ON employee_status_log\n FOR EACH ROW\n EXECUTE FUNCTION log_dml_operations('hr', 'medium');", - "type": "table.trigger", - "operation": "create", - "path": "public.employee_status_log.employee_status_log_trigger" - }, - { - "sql": "ALTER TABLE audit ENABLE ROW LEVEL SECURITY;", - "type": "table.rls", - "operation": "create", - "path": "public.audit" - }, - { - "sql": "CREATE POLICY audit_insert_system ON audit FOR INSERT TO PUBLIC WITH CHECK (true);", - "type": "table.policy", - "operation": "create", - "path": "public.audit.audit_insert_system" - }, - { - "sql": "CREATE POLICY audit_user_isolation ON audit TO PUBLIC USING (user_name = CURRENT_USER);", - "type": "table.policy", - "operation": "create", - "path": "public.audit.audit_user_isolation" - }, - { - "sql": "ALTER TABLE employee ADD COLUMN status employee_status DEFAULT 'active'::employee_status NOT NULL;", - "type": "table.column", - "operation": "create", - "path": "public.employee.status" + "steps": [ + { + "sql": "DROP PROCEDURE IF EXISTS simple_salary_update(IN p_emp_no integer, IN p_amount integer);", + "type": "procedure", + "operation": "drop", + "path": "public.simple_salary_update" + }, + { + "sql": "DROP TABLE IF EXISTS title CASCADE;", + "type": "table", + "operation": "drop", + "path": "public.title" + }, + { + "sql": "DROP TABLE IF EXISTS dept_manager CASCADE;", + "type": "table", + "operation": "drop", + "path": "public.dept_manager" + }, + { + "sql": "CREATE TYPE employee_status AS ENUM (\n 'active',\n 'inactive',\n 'terminated'\n);", + "type": "type", + "operation": "create", + "path": "public.employee_status" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS employee_status_log (\n id SERIAL,\n emp_no integer NOT NULL,\n status employee_status NOT NULL,\n effective_date date DEFAULT CURRENT_DATE NOT NULL,\n notes text,\n CONSTRAINT employee_status_log_pkey PRIMARY KEY (id),\n CONSTRAINT employee_status_log_emp_no_fkey FOREIGN KEY (emp_no) REFERENCES employee (emp_no) ON DELETE CASCADE\n);", + "type": "table", + "operation": "create", + "path": "public.employee_status_log" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_employee_status_log_effective_date ON employee_status_log (effective_date);", + "type": "table.index", + "operation": "create", + "path": "public.employee_status_log.idx_employee_status_log_effective_date" + }, + { + "sql": "CREATE INDEX IF NOT EXISTS idx_employee_status_log_emp_no ON employee_status_log (emp_no);", + "type": "table.index", + "operation": "create", + "path": "public.employee_status_log.idx_employee_status_log_emp_no" + }, + { + "sql": "CREATE OR REPLACE TRIGGER employee_status_log_trigger\n AFTER INSERT OR UPDATE ON employee_status_log\n FOR EACH ROW\n EXECUTE FUNCTION log_dml_operations('hr', 'medium');", + "type": "table.trigger", + "operation": "create", + "path": "public.employee_status_log.employee_status_log_trigger" + }, + { + "sql": "ALTER TABLE audit ENABLE ROW LEVEL SECURITY;", + "type": "table.rls", + "operation": "create", + "path": "public.audit" + }, + { + "sql": "CREATE POLICY audit_insert_system ON audit FOR INSERT TO PUBLIC WITH CHECK (true);", + "type": "table.policy", + "operation": "create", + "path": "public.audit.audit_insert_system" + }, + { + "sql": "CREATE POLICY audit_user_isolation ON audit TO PUBLIC USING (user_name = CURRENT_USER);", + "type": "table.policy", + "operation": "create", + "path": "public.audit.audit_user_isolation" + }, + { + "sql": "ALTER TABLE employee ADD COLUMN status employee_status DEFAULT 'active'::employee_status NOT NULL;", + "type": "table.column", + "operation": "create", + "path": "public.employee.status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_composite_index/plan.json b/testdata/diff/online/add_composite_index/plan.json index fbf9120c..ce1e08d0 100644 --- a/testdata/diff/online/add_composite_index/plan.json +++ b/testdata/diff/online/add_composite_index/plan.json @@ -2,33 +2,37 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "4174f5cda975543aad9cf30ba2b3a2bd5ace1e79185c119db587df0795195c33" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "4174f5cda975543aad9cf30ba2b3a2bd5ace1e79185c119db587df0795195c33" + }, + "groups": [ { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email_status ON users (email, status DESC);", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_email_status" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email_status ON users (email, status DESC);", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_email_status" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_email_status';", - "directive": { - "type": "wait", - "message": "Creating index idx_users_email_status" - }, - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_email_status" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_email_status';", + "directive": { + "type": "wait", + "message": "Creating index idx_users_email_status" + }, + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_email_status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_constraint/plan.json b/testdata/diff/online/add_constraint/plan.json index b696793e..10b13b70 100644 --- a/testdata/diff/online/add_constraint/plan.json +++ b/testdata/diff/online/add_constraint/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "a3bcb075d28a8e3584a89f8644ecdc54b7f0861216ae959392e9078128545620" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "a3bcb075d28a8e3584a89f8644ecdc54b7f0861216ae959392e9078128545620" + }, + "groups": [ { - "sql": "ALTER TABLE orders\nADD CONSTRAINT check_amount_positive CHECK (amount > 0::numeric) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.check_amount_positive" - }, - { - "sql": "ALTER TABLE orders VALIDATE CONSTRAINT check_amount_positive;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.check_amount_positive" - }, - { - "sql": "ALTER TABLE orders\nADD CONSTRAINT check_valid_status CHECK (status::text IN ('pending'::character varying, 'shipped'::character varying, 'delivered'::character varying)) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.check_valid_status" - }, - { - "sql": "ALTER TABLE orders VALIDATE CONSTRAINT check_valid_status;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.check_valid_status" + "steps": [ + { + "sql": "ALTER TABLE orders\nADD CONSTRAINT check_amount_positive CHECK (amount > 0::numeric) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.check_amount_positive" + }, + { + "sql": "ALTER TABLE orders VALIDATE CONSTRAINT check_amount_positive;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.check_amount_positive" + }, + { + "sql": "ALTER TABLE orders\nADD CONSTRAINT check_valid_status CHECK (status::text IN ('pending'::character varying, 'shipped'::character varying, 'delivered'::character varying)) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.check_valid_status" + }, + { + "sql": "ALTER TABLE orders VALIDATE CONSTRAINT check_valid_status;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.check_valid_status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_fk/plan.json b/testdata/diff/online/add_fk/plan.json index e70c4f3f..00106ed1 100644 --- a/testdata/diff/online/add_fk/plan.json +++ b/testdata/diff/online/add_fk/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "e67f1222093d3d30328f364981301ac4aa2c0c7678df5494fbf07bd3a01096ff" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "e67f1222093d3d30328f364981301ac4aa2c0c7678df5494fbf07bd3a01096ff" + }, + "groups": [ { - "sql": "ALTER TABLE z_companies\nADD CONSTRAINT z_companies_company_id_name_key UNIQUE (company_id, company_name);", - "type": "table.constraint", - "operation": "create", - "path": "public.z_companies.z_companies_company_id_name_key" - }, - { - "sql": "ALTER TABLE a_employees\nADD CONSTRAINT a_employees_company_fkey FOREIGN KEY (tenant_id, company_id) REFERENCES z_companies (tenant_id, company_id) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.a_employees.a_employees_company_fkey" - }, - { - "sql": "ALTER TABLE a_employees VALIDATE CONSTRAINT a_employees_company_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.a_employees.a_employees_company_fkey" - }, - { - "sql": "ALTER TABLE a_employees\nADD CONSTRAINT a_employees_company_name_fkey FOREIGN KEY (company_id, company_name) REFERENCES z_companies (company_id, company_name) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.a_employees.a_employees_company_name_fkey" - }, - { - "sql": "ALTER TABLE a_employees VALIDATE CONSTRAINT a_employees_company_name_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.a_employees.a_employees_company_name_fkey" + "steps": [ + { + "sql": "ALTER TABLE z_companies\nADD CONSTRAINT z_companies_company_id_name_key UNIQUE (company_id, company_name);", + "type": "table.constraint", + "operation": "create", + "path": "public.z_companies.z_companies_company_id_name_key" + }, + { + "sql": "ALTER TABLE a_employees\nADD CONSTRAINT a_employees_company_fkey FOREIGN KEY (tenant_id, company_id) REFERENCES z_companies (tenant_id, company_id) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.a_employees.a_employees_company_fkey" + }, + { + "sql": "ALTER TABLE a_employees VALIDATE CONSTRAINT a_employees_company_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.a_employees.a_employees_company_fkey" + }, + { + "sql": "ALTER TABLE a_employees\nADD CONSTRAINT a_employees_company_name_fkey FOREIGN KEY (company_id, company_name) REFERENCES z_companies (company_id, company_name) ON UPDATE CASCADE ON DELETE RESTRICT DEFERRABLE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.a_employees.a_employees_company_name_fkey" + }, + { + "sql": "ALTER TABLE a_employees VALIDATE CONSTRAINT a_employees_company_name_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.a_employees.a_employees_company_name_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_functional_index/plan.json b/testdata/diff/online/add_functional_index/plan.json index 7d8d96a6..2a4036a0 100644 --- a/testdata/diff/online/add_functional_index/plan.json +++ b/testdata/diff/online/add_functional_index/plan.json @@ -2,33 +2,37 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "406ccedbcdd9e1ddf84ea76e77ab7f801ff082a60dbf3067264ea6e3510885dc" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "406ccedbcdd9e1ddf84ea76e77ab7f801ff082a60dbf3067264ea6e3510885dc" + }, + "groups": [ { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_fullname_search ON users (lower(first_name), lower(last_name), lower(email));", - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_fullname_search" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_fullname_search ON users (lower(first_name), lower(last_name), lower(email));", + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_fullname_search" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_fullname_search';", - "directive": { - "type": "wait", - "message": "Creating index idx_users_fullname_search" - }, - "type": "table.index", - "operation": "create", - "path": "public.users.idx_users_fullname_search" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_fullname_search';", + "directive": { + "type": "wait", + "message": "Creating index idx_users_fullname_search" + }, + "type": "table.index", + "operation": "create", + "path": "public.users.idx_users_fullname_search" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_materialized_view_index/plan.json b/testdata/diff/online/add_materialized_view_index/plan.json index bdfa3ab7..df5a0bdb 100644 --- a/testdata/diff/online/add_materialized_view_index/plan.json +++ b/testdata/diff/online/add_materialized_view_index/plan.json @@ -2,33 +2,37 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "dfe0bcd1267c9c0da4dd48acf8336d699ccec6529b706ed8786d76fcf2c8f43a" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "dfe0bcd1267c9c0da4dd48acf8336d699ccec6529b706ed8786d76fcf2c8f43a" + }, + "groups": [ { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_summary_created_at ON user_summary (created_at);", - "type": "materialized_view.index", - "operation": "create", - "path": "public.user_summary.idx_user_summary_created_at" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_summary_created_at ON user_summary (created_at);", + "type": "materialized_view.index", + "operation": "create", + "path": "public.user_summary.idx_user_summary_created_at" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_user_summary_created_at';", - "directive": { - "type": "wait", - "message": "Creating index idx_user_summary_created_at" - }, - "type": "materialized_view.index", - "operation": "create", - "path": "public.user_summary.idx_user_summary_created_at" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_user_summary_created_at';", + "directive": { + "type": "wait", + "message": "Creating index idx_user_summary_created_at" + }, + "type": "materialized_view.index", + "operation": "create", + "path": "public.user_summary.idx_user_summary_created_at" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_not_null/plan.json b/testdata/diff/online/add_not_null/plan.json index 0266bca0..9dedde82 100644 --- a/testdata/diff/online/add_not_null/plan.json +++ b/testdata/diff/online/add_not_null/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "653938aaa4f39adc46e8b751c1d67ef7229a57a407833953c0b0176e33e70a58" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "653938aaa4f39adc46e8b751c1d67ef7229a57a407833953c0b0176e33e70a58" + }, + "groups": [ { - "sql": "ALTER TABLE users ADD CONSTRAINT email_not_null CHECK (email IS NOT NULL) NOT VALID;", - "type": "table.column", - "operation": "alter", - "path": "public.users.email" - }, - { - "sql": "ALTER TABLE users VALIDATE CONSTRAINT email_not_null;", - "type": "table.column", - "operation": "alter", - "path": "public.users.email" - }, - { - "sql": "ALTER TABLE users ALTER COLUMN email SET NOT NULL;", - "type": "table.column", - "operation": "alter", - "path": "public.users.email" - }, - { - "sql": "ALTER TABLE users DROP CONSTRAINT email_not_null;", - "type": "table.column", - "operation": "alter", - "path": "public.users.email" + "steps": [ + { + "sql": "ALTER TABLE users ADD CONSTRAINT email_not_null CHECK (email IS NOT NULL) NOT VALID;", + "type": "table.column", + "operation": "alter", + "path": "public.users.email" + }, + { + "sql": "ALTER TABLE users VALIDATE CONSTRAINT email_not_null;", + "type": "table.column", + "operation": "alter", + "path": "public.users.email" + }, + { + "sql": "ALTER TABLE users ALTER COLUMN email SET NOT NULL;", + "type": "table.column", + "operation": "alter", + "path": "public.users.email" + }, + { + "sql": "ALTER TABLE users DROP CONSTRAINT email_not_null;", + "type": "table.column", + "operation": "alter", + "path": "public.users.email" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_partial_index/plan.json b/testdata/diff/online/add_partial_index/plan.json index 7bf557db..69622945 100644 --- a/testdata/diff/online/add_partial_index/plan.json +++ b/testdata/diff/online/add_partial_index/plan.json @@ -2,33 +2,37 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "d3818157c6994cfe669982e5174d7e36ada8c106bb8d340c392c7cbe64ebc135" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "d3818157c6994cfe669982e5174d7e36ada8c106bb8d340c392c7cbe64ebc135" + }, + "groups": [ { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_active_orders_customer_date ON orders (customer_id, order_date DESC, total_amount) WHERE (status IN ('pending'::public.order_status, 'processing'::public.order_status, 'confirmed'::public.order_status)) AND (is_active IS NOT NULL);", - "type": "table.index", - "operation": "create", - "path": "public.orders.idx_active_orders_customer_date" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_active_orders_customer_date ON orders (customer_id, order_date DESC, total_amount) WHERE (status IN ('pending'::public.order_status, 'processing'::public.order_status, 'confirmed'::public.order_status)) AND (is_active IS NOT NULL);", + "type": "table.index", + "operation": "create", + "path": "public.orders.idx_active_orders_customer_date" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_active_orders_customer_date';", - "directive": { - "type": "wait", - "message": "Creating index idx_active_orders_customer_date" - }, - "type": "table.index", - "operation": "create", - "path": "public.orders.idx_active_orders_customer_date" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_active_orders_customer_date';", + "directive": { + "type": "wait", + "message": "Creating index idx_active_orders_customer_date" + }, + "type": "table.index", + "operation": "create", + "path": "public.orders.idx_active_orders_customer_date" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/add_unique_multi_column_index/plan.json b/testdata/diff/online/add_unique_multi_column_index/plan.json index 7b00a925..910bb281 100644 --- a/testdata/diff/online/add_unique_multi_column_index/plan.json +++ b/testdata/diff/online/add_unique_multi_column_index/plan.json @@ -2,33 +2,37 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "303f8c6f8d737bc168f33334810330f44f03a768358452ea1150dcffb9ccfdfa" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "303f8c6f8d737bc168f33334810330f44f03a768358452ea1150dcffb9ccfdfa" + }, + "groups": [ { - "sql": "CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS idx_unique_email_org ON user_profiles (email, organization_id) WHERE (deleted_at IS NULL);", - "type": "table.index", - "operation": "create", - "path": "public.user_profiles.idx_unique_email_org" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS idx_unique_email_org ON user_profiles (email, organization_id) WHERE (deleted_at IS NULL);", + "type": "table.index", + "operation": "create", + "path": "public.user_profiles.idx_unique_email_org" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_unique_email_org';", - "directive": { - "type": "wait", - "message": "Creating index idx_unique_email_org" - }, - "type": "table.index", - "operation": "create", - "path": "public.user_profiles.idx_unique_email_org" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_unique_email_org';", + "directive": { + "type": "wait", + "message": "Creating index idx_unique_email_org" + }, + "type": "table.index", + "operation": "create", + "path": "public.user_profiles.idx_unique_email_org" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/alter_composite_index/plan.json b/testdata/diff/online/alter_composite_index/plan.json index b5075f4c..9a201d76 100644 --- a/testdata/diff/online/alter_composite_index/plan.json +++ b/testdata/diff/online/alter_composite_index/plan.json @@ -2,89 +2,93 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "cd08bb1240e90143c0edfd9caf0adf02d37d2542afd705a4274eb9b287e180a4" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "cd08bb1240e90143c0edfd9caf0adf02d37d2542afd705a4274eb9b287e180a4" + }, + "groups": [ { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email_pgschema_new ON users (email, status);", - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_email" - } - ] - }, - { - "steps": [ - { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_email_pgschema_new';", - "directive": { - "type": "wait", - "message": "Creating index idx_users_email_pgschema_new" - }, - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_email" - } - ] - }, - { - "steps": [ - { - "sql": "DROP INDEX idx_users_email;", - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_email" + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email_pgschema_new ON users (email, status);", + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_email" + } + ] }, { - "sql": "ALTER INDEX idx_users_email_pgschema_new RENAME TO idx_users_email;", - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_email" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_email_pgschema_new';", + "directive": { + "type": "wait", + "message": "Creating index idx_users_email_pgschema_new" + }, + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_email" + } + ] + }, { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_status_pgschema_new ON users (status, department);", - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_status" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "DROP INDEX idx_users_email;", + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_email" + }, + { + "sql": "ALTER INDEX idx_users_email_pgschema_new RENAME TO idx_users_email;", + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_email" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_status_pgschema_new';", - "directive": { - "type": "wait", - "message": "Creating index idx_users_status_pgschema_new" - }, - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_status" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_status_pgschema_new ON users (status, department);", + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_status" + } + ] + }, { - "sql": "DROP INDEX idx_users_status;", - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_status" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_users_status_pgschema_new';", + "directive": { + "type": "wait", + "message": "Creating index idx_users_status_pgschema_new" + }, + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_status" + } + ] }, { - "sql": "ALTER INDEX idx_users_status_pgschema_new RENAME TO idx_users_status;", - "type": "table.index", - "operation": "alter", - "path": "public.users.idx_users_status" + "steps": [ + { + "sql": "DROP INDEX idx_users_status;", + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_status" + }, + { + "sql": "ALTER INDEX idx_users_status_pgschema_new RENAME TO idx_users_status;", + "type": "table.index", + "operation": "alter", + "path": "public.users.idx_users_status" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/alter_constraint/plan.json b/testdata/diff/online/alter_constraint/plan.json index dfb9fb19..d88074c6 100644 --- a/testdata/diff/online/alter_constraint/plan.json +++ b/testdata/diff/online/alter_constraint/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "0af470b5f74a5e9c2fc3f750512d275cb61cde0a9c61eb1ab69f2e505c4cb3fd" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "0af470b5f74a5e9c2fc3f750512d275cb61cde0a9c61eb1ab69f2e505c4cb3fd" + }, + "groups": [ { - "sql": "ALTER TABLE orders DROP CONSTRAINT check_amount_positive;", - "type": "table.constraint", - "operation": "drop", - "path": "public.orders.check_amount_positive" - }, - { - "sql": "ALTER TABLE orders\nADD CONSTRAINT check_amount_positive CHECK (amount > 0::numeric) NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.check_amount_positive" - }, - { - "sql": "ALTER TABLE orders VALIDATE CONSTRAINT check_amount_positive;", - "type": "table.constraint", - "operation": "create", - "path": "public.orders.check_amount_positive" + "steps": [ + { + "sql": "ALTER TABLE orders DROP CONSTRAINT check_amount_positive;", + "type": "table.constraint", + "operation": "drop", + "path": "public.orders.check_amount_positive" + }, + { + "sql": "ALTER TABLE orders\nADD CONSTRAINT check_amount_positive CHECK (amount > 0::numeric) NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.check_amount_positive" + }, + { + "sql": "ALTER TABLE orders VALIDATE CONSTRAINT check_amount_positive;", + "type": "table.constraint", + "operation": "create", + "path": "public.orders.check_amount_positive" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/alter_fk/plan.json b/testdata/diff/online/alter_fk/plan.json index 174a5552..95264d01 100644 --- a/testdata/diff/online/alter_fk/plan.json +++ b/testdata/diff/online/alter_fk/plan.json @@ -2,31 +2,35 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "eb60afcd651f636a432f85e7ec5571c6f9074518bc06d3a5fbc87df688e534ca" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "eb60afcd651f636a432f85e7ec5571c6f9074518bc06d3a5fbc87df688e534ca" + }, + "groups": [ { - "sql": "ALTER TABLE employees DROP CONSTRAINT employees_company_fkey;", - "type": "table.constraint", - "operation": "drop", - "path": "public.employees.employees_company_fkey" - }, - { - "sql": "ALTER TABLE employees\nADD CONSTRAINT employees_company_fkey FOREIGN KEY (tenant_id, company_id) REFERENCES companies (tenant_id, company_id) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.employees.employees_company_fkey" - }, - { - "sql": "ALTER TABLE employees VALIDATE CONSTRAINT employees_company_fkey;", - "type": "table.constraint", - "operation": "create", - "path": "public.employees.employees_company_fkey" + "steps": [ + { + "sql": "ALTER TABLE employees DROP CONSTRAINT employees_company_fkey;", + "type": "table.constraint", + "operation": "drop", + "path": "public.employees.employees_company_fkey" + }, + { + "sql": "ALTER TABLE employees\nADD CONSTRAINT employees_company_fkey FOREIGN KEY (tenant_id, company_id) REFERENCES companies (tenant_id, company_id) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.employees.employees_company_fkey" + }, + { + "sql": "ALTER TABLE employees VALIDATE CONSTRAINT employees_company_fkey;", + "type": "table.constraint", + "operation": "create", + "path": "public.employees.employees_company_fkey" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/alter_materialized_view_index/plan.json b/testdata/diff/online/alter_materialized_view_index/plan.json index eb9ea155..1f900417 100644 --- a/testdata/diff/online/alter_materialized_view_index/plan.json +++ b/testdata/diff/online/alter_materialized_view_index/plan.json @@ -2,49 +2,53 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "bc770ddaf5f2a6240a64435aded0de44672a1e62b8f2d4702ea2833d2074c825" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "bc770ddaf5f2a6240a64435aded0de44672a1e62b8f2d4702ea2833d2074c825" + }, + "groups": [ { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_summary_email_pgschema_new ON user_summary (email, status);", - "type": "materialized_view.index", - "operation": "alter", - "path": "public.user_summary.idx_user_summary_email" - } - ] - }, - { - "steps": [ - { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_user_summary_email_pgschema_new';", - "directive": { - "type": "wait", - "message": "Creating index idx_user_summary_email_pgschema_new" - }, - "type": "materialized_view.index", - "operation": "alter", - "path": "public.user_summary.idx_user_summary_email" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_summary_email_pgschema_new ON user_summary (email, status);", + "type": "materialized_view.index", + "operation": "alter", + "path": "public.user_summary.idx_user_summary_email" + } + ] + }, { - "sql": "DROP INDEX idx_user_summary_email;", - "type": "materialized_view.index", - "operation": "alter", - "path": "public.user_summary.idx_user_summary_email" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'idx_user_summary_email_pgschema_new';", + "directive": { + "type": "wait", + "message": "Creating index idx_user_summary_email_pgschema_new" + }, + "type": "materialized_view.index", + "operation": "alter", + "path": "public.user_summary.idx_user_summary_email" + } + ] }, { - "sql": "ALTER INDEX idx_user_summary_email_pgschema_new RENAME TO idx_user_summary_email;", - "type": "materialized_view.index", - "operation": "alter", - "path": "public.user_summary.idx_user_summary_email" + "steps": [ + { + "sql": "DROP INDEX idx_user_summary_email;", + "type": "materialized_view.index", + "operation": "alter", + "path": "public.user_summary.idx_user_summary_email" + }, + { + "sql": "ALTER INDEX idx_user_summary_email_pgschema_new RENAME TO idx_user_summary_email;", + "type": "materialized_view.index", + "operation": "alter", + "path": "public.user_summary.idx_user_summary_email" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/issue_286_reserved_keyword_quoting/plan.json b/testdata/diff/online/issue_286_reserved_keyword_quoting/plan.json index c241202a..4d74d842 100644 --- a/testdata/diff/online/issue_286_reserved_keyword_quoting/plan.json +++ b/testdata/diff/online/issue_286_reserved_keyword_quoting/plan.json @@ -2,43 +2,47 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7f1a921c5f4e77dff3078b3834e0a0e64270239601847f586817ce981b3f3d14" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7f1a921c5f4e77dff3078b3834e0a0e64270239601847f586817ce981b3f3d14" + }, + "groups": [ { - "sql": "ALTER TABLE \"order\"\nADD COLUMN tenant_id uuid CONSTRAINT \"FK_order_tenant\" REFERENCES tenant (id);", - "type": "table.column", - "operation": "create", - "path": "public.order.tenant_id" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "ALTER TABLE \"order\"\nADD COLUMN tenant_id uuid CONSTRAINT \"FK_order_tenant\" REFERENCES tenant (id);", + "type": "table.column", + "operation": "create", + "path": "public.order.tenant_id" + } + ] + }, { - "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS \"IDX_order_tenant_order_number\" ON \"order\" (tenant_id, order_number);", - "type": "table.index", - "operation": "create", - "path": "public.order.IDX_order_tenant_order_number" - } - ] - }, - { - "steps": [ + "steps": [ + { + "sql": "CREATE INDEX CONCURRENTLY IF NOT EXISTS \"IDX_order_tenant_order_number\" ON \"order\" (tenant_id, order_number);", + "type": "table.index", + "operation": "create", + "path": "public.order.IDX_order_tenant_order_number" + } + ] + }, { - "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'IDX_order_tenant_order_number';", - "directive": { - "type": "wait", - "message": "Creating index IDX_order_tenant_order_number" - }, - "type": "table.index", - "operation": "create", - "path": "public.order.IDX_order_tenant_order_number" + "steps": [ + { + "sql": "SELECT \n COALESCE(i.indisvalid, false) as done,\n CASE \n WHEN p.blocks_total > 0 THEN p.blocks_done * 100 / p.blocks_total\n ELSE 0\n END as progress\nFROM pg_class c\nLEFT JOIN pg_index i ON c.oid = i.indexrelid\nLEFT JOIN pg_stat_progress_create_index p ON c.oid = p.index_relid\nWHERE c.relname = 'IDX_order_tenant_order_number';", + "directive": { + "type": "wait", + "message": "Creating index IDX_order_tenant_order_number" + }, + "type": "table.index", + "operation": "create", + "path": "public.order.IDX_order_tenant_order_number" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/issue_313_camelcase_column_not_null/plan.json b/testdata/diff/online/issue_313_camelcase_column_not_null/plan.json index 126d4e2b..79ca15b5 100644 --- a/testdata/diff/online/issue_313_camelcase_column_not_null/plan.json +++ b/testdata/diff/online/issue_313_camelcase_column_not_null/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "c151d6b38e413e2a0fea4628892eb22ff76c48f1b933d03143cc3b0fde97e4f8" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "c151d6b38e413e2a0fea4628892eb22ff76c48f1b933d03143cc3b0fde97e4f8" + }, + "groups": [ { - "sql": "ALTER TABLE \"Planning\" ADD CONSTRAINT \"offersValidUntil_not_null\" CHECK (\"offersValidUntil\" IS NOT NULL) NOT VALID;", - "type": "table.column", - "operation": "alter", - "path": "public.Planning.offersValidUntil" - }, - { - "sql": "ALTER TABLE \"Planning\" VALIDATE CONSTRAINT \"offersValidUntil_not_null\";", - "type": "table.column", - "operation": "alter", - "path": "public.Planning.offersValidUntil" - }, - { - "sql": "ALTER TABLE \"Planning\" ALTER COLUMN \"offersValidUntil\" SET NOT NULL;", - "type": "table.column", - "operation": "alter", - "path": "public.Planning.offersValidUntil" - }, - { - "sql": "ALTER TABLE \"Planning\" DROP CONSTRAINT \"offersValidUntil_not_null\";", - "type": "table.column", - "operation": "alter", - "path": "public.Planning.offersValidUntil" + "steps": [ + { + "sql": "ALTER TABLE \"Planning\" ADD CONSTRAINT \"offersValidUntil_not_null\" CHECK (\"offersValidUntil\" IS NOT NULL) NOT VALID;", + "type": "table.column", + "operation": "alter", + "path": "public.Planning.offersValidUntil" + }, + { + "sql": "ALTER TABLE \"Planning\" VALIDATE CONSTRAINT \"offersValidUntil_not_null\";", + "type": "table.column", + "operation": "alter", + "path": "public.Planning.offersValidUntil" + }, + { + "sql": "ALTER TABLE \"Planning\" ALTER COLUMN \"offersValidUntil\" SET NOT NULL;", + "type": "table.column", + "operation": "alter", + "path": "public.Planning.offersValidUntil" + }, + { + "sql": "ALTER TABLE \"Planning\" DROP CONSTRAINT \"offersValidUntil_not_null\";", + "type": "table.column", + "operation": "alter", + "path": "public.Planning.offersValidUntil" + } + ] } ] } - ] + } } diff --git a/testdata/diff/online/issue_386_check_no_inherit/plan.json b/testdata/diff/online/issue_386_check_no_inherit/plan.json index a05c5e21..fcc528aa 100644 --- a/testdata/diff/online/issue_386_check_no_inherit/plan.json +++ b/testdata/diff/online/issue_386_check_no_inherit/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7a986c71ded1858e3a4c287f6f6b08f08719008f237eff921df9b3b52371867b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7a986c71ded1858e3a4c287f6f6b08f08719008f237eff921df9b3b52371867b" + }, + "groups": [ { - "sql": "ALTER TABLE parent_base\nADD CONSTRAINT no_direct_insert CHECK (false) NO INHERIT NOT VALID;", - "type": "table.constraint", - "operation": "create", - "path": "public.parent_base.no_direct_insert" - }, - { - "sql": "ALTER TABLE parent_base VALIDATE CONSTRAINT no_direct_insert;", - "type": "table.constraint", - "operation": "create", - "path": "public.parent_base.no_direct_insert" + "steps": [ + { + "sql": "ALTER TABLE parent_base\nADD CONSTRAINT no_direct_insert CHECK (false) NO INHERIT NOT VALID;", + "type": "table.constraint", + "operation": "create", + "path": "public.parent_base.no_direct_insert" + }, + { + "sql": "ALTER TABLE parent_base VALIDATE CONSTRAINT no_direct_insert;", + "type": "table.constraint", + "operation": "create", + "path": "public.parent_base.no_direct_insert" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/alter_privilege/plan.json b/testdata/diff/privilege/alter_privilege/plan.json index 03a2223a..a8379e92 100644 --- a/testdata/diff/privilege/alter_privilege/plan.json +++ b/testdata/diff/privilege/alter_privilege/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ce677037813532390cc0179dd03fa2e40961ec4dbbb86c957b83322744ab5a46" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ce677037813532390cc0179dd03fa2e40961ec4dbbb86c957b83322744ab5a46" + }, + "groups": [ { - "sql": "REVOKE INSERT ON TABLE inventory FROM app_role;", - "type": "privilege", - "operation": "alter", - "path": "privileges.TABLE.inventory.app_role" - }, - { - "sql": "GRANT DELETE, UPDATE ON TABLE inventory TO app_role;", - "type": "privilege", - "operation": "alter", - "path": "privileges.TABLE.inventory.app_role" + "steps": [ + { + "sql": "REVOKE INSERT ON TABLE inventory FROM app_role;", + "type": "privilege", + "operation": "alter", + "path": "privileges.TABLE.inventory.app_role" + }, + { + "sql": "GRANT DELETE, UPDATE ON TABLE inventory TO app_role;", + "type": "privilege", + "operation": "alter", + "path": "privileges.TABLE.inventory.app_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/grant_function_execute/plan.json b/testdata/diff/privilege/grant_function_execute/plan.json index 295b32db..8be5ab77 100644 --- a/testdata/diff/privilege/grant_function_execute/plan.json +++ b/testdata/diff/privilege/grant_function_execute/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ccf790899245f18bb779a82938ba62c137eb1a3807644be4a278b8e999a8fe30" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ccf790899245f18bb779a82938ba62c137eb1a3807644be4a278b8e999a8fe30" + }, + "groups": [ { - "sql": "GRANT EXECUTE ON FUNCTION calculate_total(quantity integer, unit_price numeric) TO api_role;", - "type": "privilege", - "operation": "create", - "path": "privileges.FUNCTION.calculate_total(quantity integer, unit_price numeric).api_role" + "steps": [ + { + "sql": "GRANT EXECUTE ON FUNCTION calculate_total(quantity integer, unit_price numeric) TO api_role;", + "type": "privilege", + "operation": "create", + "path": "privileges.FUNCTION.calculate_total(quantity integer, unit_price numeric).api_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/grant_sequence/plan.json b/testdata/diff/privilege/grant_sequence/plan.json index b62d007e..439bdc6b 100644 --- a/testdata/diff/privilege/grant_sequence/plan.json +++ b/testdata/diff/privilege/grant_sequence/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "7027e39d7e59e0103d597e7cd942af1e42217adff9a2c893a46d9b16c6a2ee25" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "7027e39d7e59e0103d597e7cd942af1e42217adff9a2c893a46d9b16c6a2ee25" + }, + "groups": [ { - "sql": "GRANT SELECT, USAGE ON SEQUENCE order_id_seq TO app_role;", - "type": "privilege", - "operation": "create", - "path": "privileges.SEQUENCE.order_id_seq.app_role" + "steps": [ + { + "sql": "GRANT SELECT, USAGE ON SEQUENCE order_id_seq TO app_role;", + "type": "privilege", + "operation": "create", + "path": "privileges.SEQUENCE.order_id_seq.app_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/grant_table_multiple/plan.json b/testdata/diff/privilege/grant_table_multiple/plan.json index ed13f497..ea45c9e0 100644 --- a/testdata/diff/privilege/grant_table_multiple/plan.json +++ b/testdata/diff/privilege/grant_table_multiple/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ed31ba27099edeef640871532dbf79cc408e417a5e1feeecc43df88c21447f53" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ed31ba27099edeef640871532dbf79cc408e417a5e1feeecc43df88c21447f53" + }, + "groups": [ { - "sql": "GRANT DELETE, INSERT, SELECT, UPDATE ON TABLE orders TO app_role;", - "type": "privilege", - "operation": "create", - "path": "privileges.TABLE.orders.app_role" + "steps": [ + { + "sql": "GRANT DELETE, INSERT, SELECT, UPDATE ON TABLE orders TO app_role;", + "type": "privilege", + "operation": "create", + "path": "privileges.TABLE.orders.app_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/grant_table_select/plan.json b/testdata/diff/privilege/grant_table_select/plan.json index 1b42edfe..92de4873 100644 --- a/testdata/diff/privilege/grant_table_select/plan.json +++ b/testdata/diff/privilege/grant_table_select/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "61c520955d9df95c9d233b93c7500d90fe0a5aabed7dbe61ab394a02cef98940" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "61c520955d9df95c9d233b93c7500d90fe0a5aabed7dbe61ab394a02cef98940" + }, + "groups": [ { - "sql": "GRANT SELECT ON TABLE users TO readonly_role;", - "type": "privilege", - "operation": "create", - "path": "privileges.TABLE.users.readonly_role" - }, - { - "sql": "GRANT SELECT (id) ON TABLE users TO column_reader;", - "type": "column_privilege", - "operation": "create", - "path": "column_privileges.TABLE.users.id.column_reader" + "steps": [ + { + "sql": "GRANT SELECT ON TABLE users TO readonly_role;", + "type": "privilege", + "operation": "create", + "path": "privileges.TABLE.users.readonly_role" + }, + { + "sql": "GRANT SELECT (id) ON TABLE users TO column_reader;", + "type": "column_privilege", + "operation": "create", + "path": "column_privileges.TABLE.users.id.column_reader" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/grant_type/plan.json b/testdata/diff/privilege/grant_type/plan.json index 60109fcb..675aad5f 100644 --- a/testdata/diff/privilege/grant_type/plan.json +++ b/testdata/diff/privilege/grant_type/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "23c1e89c461f82f0fe9f07f1090fd3a9ca0da2300fb48a6f9300b688f2eb31b8" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "23c1e89c461f82f0fe9f07f1090fd3a9ca0da2300fb48a6f9300b688f2eb31b8" + }, + "groups": [ { - "sql": "GRANT USAGE ON TYPE email_address TO app_role;", - "type": "privilege", - "operation": "create", - "path": "privileges.TYPE.email_address.app_role" + "steps": [ + { + "sql": "GRANT USAGE ON TYPE email_address TO app_role;", + "type": "privilege", + "operation": "create", + "path": "privileges.TYPE.email_address.app_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/grant_with_grant_option/plan.json b/testdata/diff/privilege/grant_with_grant_option/plan.json index 0ca783ad..1d72e736 100644 --- a/testdata/diff/privilege/grant_with_grant_option/plan.json +++ b/testdata/diff/privilege/grant_with_grant_option/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "ebd29a0231408c46d6b2733a4671e806b669cb4d27d79a4e7afb81db15f12445" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "ebd29a0231408c46d6b2733a4671e806b669cb4d27d79a4e7afb81db15f12445" + }, + "groups": [ { - "sql": "GRANT SELECT ON TABLE products TO admin_role WITH GRANT OPTION;", - "type": "privilege", - "operation": "create", - "path": "privileges.TABLE.products.admin_role" + "steps": [ + { + "sql": "GRANT SELECT ON TABLE products TO admin_role WITH GRANT OPTION;", + "type": "privilege", + "operation": "create", + "path": "privileges.TABLE.products.admin_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/issue_324_grant_revoke_order/plan.json b/testdata/diff/privilege/issue_324_grant_revoke_order/plan.json index 405347f7..4aae89b5 100644 --- a/testdata/diff/privilege/issue_324_grant_revoke_order/plan.json +++ b/testdata/diff/privilege/issue_324_grant_revoke_order/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "2f50f8550c48f6ac58cde417097cdd7c293b3ae0d9bd8861b197edb73e80b148" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "2f50f8550c48f6ac58cde417097cdd7c293b3ae0d9bd8861b197edb73e80b148" + }, + "groups": [ { - "sql": "REVOKE UPDATE ON TABLE sometable FROM app_user;", - "type": "privilege", - "operation": "drop", - "path": "privileges.TABLE.sometable.app_user" - }, - { - "sql": "GRANT UPDATE (somecolumn) ON TABLE sometable TO app_user;", - "type": "column_privilege", - "operation": "create", - "path": "column_privileges.TABLE.sometable.somecolumn.app_user" + "steps": [ + { + "sql": "REVOKE UPDATE ON TABLE sometable FROM app_user;", + "type": "privilege", + "operation": "drop", + "path": "privileges.TABLE.sometable.app_user" + }, + { + "sql": "GRANT UPDATE (somecolumn) ON TABLE sometable TO app_user;", + "type": "column_privilege", + "operation": "create", + "path": "column_privileges.TABLE.sometable.somecolumn.app_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/issue_324_modify_grant_to_column/plan.json b/testdata/diff/privilege/issue_324_modify_grant_to_column/plan.json index f061414a..fb7678da 100644 --- a/testdata/diff/privilege/issue_324_modify_grant_to_column/plan.json +++ b/testdata/diff/privilege/issue_324_modify_grant_to_column/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "1dec24426019fcb5e34cd89432fe55c1f3128e871f5b770263874c4ada106d6f" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "1dec24426019fcb5e34cd89432fe55c1f3128e871f5b770263874c4ada106d6f" + }, + "groups": [ { - "sql": "REVOKE UPDATE ON TABLE sometable FROM app_user;", - "type": "privilege", - "operation": "alter", - "path": "privileges.TABLE.sometable.app_user" - }, - { - "sql": "GRANT UPDATE (somecolumn) ON TABLE sometable TO app_user;", - "type": "column_privilege", - "operation": "create", - "path": "column_privileges.TABLE.sometable.somecolumn.app_user" + "steps": [ + { + "sql": "REVOKE UPDATE ON TABLE sometable FROM app_user;", + "type": "privilege", + "operation": "alter", + "path": "privileges.TABLE.sometable.app_user" + }, + { + "sql": "GRANT UPDATE (somecolumn) ON TABLE sometable TO app_user;", + "type": "column_privilege", + "operation": "create", + "path": "column_privileges.TABLE.sometable.somecolumn.app_user" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/issue_376_grant_revoke_temp_schema_leak/plan.json b/testdata/diff/privilege/issue_376_grant_revoke_temp_schema_leak/plan.json index ce780290..b3611636 100644 --- a/testdata/diff/privilege/issue_376_grant_revoke_temp_schema_leak/plan.json +++ b/testdata/diff/privilege/issue_376_grant_revoke_temp_schema_leak/plan.json @@ -2,25 +2,29 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "27f3398a052dfc62c099d3054981ea0e5493b954bff2ffd763222a4fc21de01c" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "27f3398a052dfc62c099d3054981ea0e5493b954bff2ffd763222a4fc21de01c" + }, + "groups": [ { - "sql": "REVOKE EXECUTE ON FUNCTION f_test(p_items my_input[]) FROM PUBLIC;", - "type": "revoked_default_privilege", - "operation": "create", - "path": "revoked_default.FUNCTION.f_test(p_items my_input[])" - }, - { - "sql": "GRANT EXECUTE ON FUNCTION f_test(p_items my_input[]) TO appname_apiuser;", - "type": "privilege", - "operation": "create", - "path": "privileges.FUNCTION.f_test(p_items my_input[]).appname_apiuser" + "steps": [ + { + "sql": "REVOKE EXECUTE ON FUNCTION f_test(p_items my_input[]) FROM PUBLIC;", + "type": "revoked_default_privilege", + "operation": "create", + "path": "revoked_default.FUNCTION.f_test(p_items my_input[])" + }, + { + "sql": "GRANT EXECUTE ON FUNCTION f_test(p_items my_input[]) TO appname_apiuser;", + "type": "privilege", + "operation": "create", + "path": "privileges.FUNCTION.f_test(p_items my_input[]).appname_apiuser" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/revoke_default_privilege/plan.json b/testdata/diff/privilege/revoke_default_privilege/plan.json index 53db03b1..dfcc57c6 100644 --- a/testdata/diff/privilege/revoke_default_privilege/plan.json +++ b/testdata/diff/privilege/revoke_default_privilege/plan.json @@ -2,37 +2,41 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "60a01e6c2c18215bcce70b3d9d1153e7ca0371ef8e12252c152547462d1d92bd" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "60a01e6c2c18215bcce70b3d9d1153e7ca0371ef8e12252c152547462d1d92bd" + }, + "groups": [ { - "sql": "CREATE TABLE IF NOT EXISTS readonly_data (\n id integer,\n value text,\n CONSTRAINT readonly_data_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.readonly_data" - }, - { - "sql": "CREATE TABLE IF NOT EXISTS secrets (\n id integer,\n data text,\n CONSTRAINT secrets_pkey PRIMARY KEY (id)\n);", - "type": "table", - "operation": "create", - "path": "public.secrets" - }, - { - "sql": "REVOKE DELETE, INSERT, UPDATE ON TABLE readonly_data FROM app_user;", - "type": "privilege", - "operation": "drop", - "path": "privileges.TABLE.readonly_data.app_user" - }, - { - "sql": "REVOKE SELECT ON TABLE secrets FROM reader;", - "type": "privilege", - "operation": "drop", - "path": "privileges.TABLE.secrets.reader" + "steps": [ + { + "sql": "CREATE TABLE IF NOT EXISTS readonly_data (\n id integer,\n value text,\n CONSTRAINT readonly_data_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.readonly_data" + }, + { + "sql": "CREATE TABLE IF NOT EXISTS secrets (\n id integer,\n data text,\n CONSTRAINT secrets_pkey PRIMARY KEY (id)\n);", + "type": "table", + "operation": "create", + "path": "public.secrets" + }, + { + "sql": "REVOKE DELETE, INSERT, UPDATE ON TABLE readonly_data FROM app_user;", + "type": "privilege", + "operation": "drop", + "path": "privileges.TABLE.readonly_data.app_user" + }, + { + "sql": "REVOKE SELECT ON TABLE secrets FROM reader;", + "type": "privilege", + "operation": "drop", + "path": "privileges.TABLE.secrets.reader" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/revoke_grant_option/plan.json b/testdata/diff/privilege/revoke_grant_option/plan.json index 8b4b081e..a58065af 100644 --- a/testdata/diff/privilege/revoke_grant_option/plan.json +++ b/testdata/diff/privilege/revoke_grant_option/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "06cc802df43a14a2fb3c8ae79646ae78edca7e87ec1e1267bc2591c73973e2b2" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "06cc802df43a14a2fb3c8ae79646ae78edca7e87ec1e1267bc2591c73973e2b2" + }, + "groups": [ { - "sql": "REVOKE GRANT OPTION FOR SELECT ON TABLE employees FROM manager_role;", - "type": "privilege", - "operation": "alter", - "path": "privileges.TABLE.employees.manager_role" + "steps": [ + { + "sql": "REVOKE GRANT OPTION FOR SELECT ON TABLE employees FROM manager_role;", + "type": "privilege", + "operation": "alter", + "path": "privileges.TABLE.employees.manager_role" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/revoke_public_function/plan.json b/testdata/diff/privilege/revoke_public_function/plan.json index 54fc1d8e..4c3fc02a 100644 --- a/testdata/diff/privilege/revoke_public_function/plan.json +++ b/testdata/diff/privilege/revoke_public_function/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "8f430acd242ac0e21c80ea77aaab0b48ad04a543aeb04dee3c19ac2c89f5dd8c" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "8f430acd242ac0e21c80ea77aaab0b48ad04a543aeb04dee3c19ac2c89f5dd8c" + }, + "groups": [ { - "sql": "REVOKE EXECUTE ON FUNCTION get_user_data(user_id integer) FROM PUBLIC;", - "type": "revoked_default_privilege", - "operation": "create", - "path": "revoked_default.FUNCTION.get_user_data(user_id integer)" + "steps": [ + { + "sql": "REVOKE EXECUTE ON FUNCTION get_user_data(user_id integer) FROM PUBLIC;", + "type": "revoked_default_privilege", + "operation": "create", + "path": "revoked_default.FUNCTION.get_user_data(user_id integer)" + } + ] } ] } - ] + } } diff --git a/testdata/diff/privilege/revoke_table_privilege/plan.json b/testdata/diff/privilege/revoke_table_privilege/plan.json index a459a4c3..4cd5a33a 100644 --- a/testdata/diff/privilege/revoke_table_privilege/plan.json +++ b/testdata/diff/privilege/revoke_table_privilege/plan.json @@ -2,19 +2,23 @@ "version": "1.0.0", "pgschema_version": "1.9.0", "created_at": "1970-01-01T00:00:00Z", - "source_fingerprint": { - "hash": "645f57ade0670f1bd4b95964bd9e0b5b04facb355b66cd4fb675256aeaf35c5b" - }, - "groups": [ - { - "steps": [ + "schemas": { + "public": { + "source_fingerprint": { + "hash": "645f57ade0670f1bd4b95964bd9e0b5b04facb355b66cd4fb675256aeaf35c5b" + }, + "groups": [ { - "sql": "REVOKE SELECT ON TABLE audit_log FROM old_role;", - "type": "privilege", - "operation": "drop", - "path": "privileges.TABLE.audit_log.old_role" + "steps": [ + { + "sql": "REVOKE SELECT ON TABLE audit_log FROM old_role;", + "type": "privilege", + "operation": "drop", + "path": "privileges.TABLE.audit_log.old_role" + } + ] } ] } - ] + } }