Migrations & Versioning

Database migration strategies, version control, and schema evolution for Smart Shelf.

Migrations & Versioning

Comprehensive database migration and versioning strategy for Smart Shelf, covering schema evolution, deployment processes, and data migration patterns using Supabase migrations.

Migration Strategy Overview

Smart Shelf implements a robust migration strategy that ensures:

  1. Version Control - All schema changes are tracked and versioned
  2. Rollback Support - Safe rollback procedures for failed migrations
  3. Zero-Downtime Deployments - Migrations that don't interrupt service
  4. Data Integrity - Validation and backup procedures
  5. Environment Consistency - Identical schemas across environments

Migration Structure

Migration File Organization

migrations/
├── 001_initial_schema.sql
├── 002_add_user_profiles.sql
├── 003_create_products_table.sql
├── 004_add_inventory_system.sql
├── 005_implement_rls_policies.sql
├── 006_add_audit_logging.sql
├── 007_optimize_indexes.sql
├── 008_add_stock_movements.sql
├── rollbacks/
│   ├── 001_rollback_initial_schema.sql
│   ├── 002_rollback_user_profiles.sql
│   └── ...
└── seeds/
    ├── 001_seed_categories.sql
    ├── 002_seed_test_data.sql
    └── ...

Migration Naming Convention

{version}_{descriptive_name}.sql

Examples:
- 001_initial_schema.sql
- 002_add_user_profiles.sql
- 003_create_products_table.sql
- 004_modify_inventory_constraints.sql

Core Migration Examples

Initial Schema Migration

-- migrations/001_initial_schema.sql
-- Version: 001
-- Description: Initial database schema setup
-- Author: Smart Shelf Team
-- Date: 2024-01-01

BEGIN;

-- Create custom types
CREATE TYPE user_role AS ENUM ('admin', 'manager', 'staff', 'viewer');
CREATE TYPE unit_type AS ENUM ('piece', 'kg', 'gram', 'liter', 'ml', 'box', 'pack', 'dozen');
CREATE TYPE movement_type AS ENUM ('IN', 'OUT', 'TRANSFER', 'ADJUSTMENT');

-- Create warehouses table first (no dependencies)
CREATE TABLE warehouses (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  name VARCHAR(255) NOT NULL,
  code VARCHAR(20) UNIQUE NOT NULL,
  address JSONB,
  contact_info JSONB,
  capacity INTEGER,
  is_active BOOLEAN DEFAULT true,
  created_at TIMESTAMPTZ DEFAULT NOW(),
  updated_at TIMESTAMPTZ DEFAULT NOW()
);

-- Create product categories table
CREATE TABLE product_categories (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  name VARCHAR(255) NOT NULL,
  description TEXT,
  parent_id UUID REFERENCES product_categories(id),
  sort_order INTEGER DEFAULT 0,
  is_active BOOLEAN DEFAULT true,
  created_at TIMESTAMPTZ DEFAULT NOW(),
  
  CONSTRAINT no_self_parent CHECK (id != parent_id)
);

-- Create updated_at trigger function
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
  NEW.updated_at = NOW();
  RETURN NEW;
END;
$$ LANGUAGE plpgsql;

-- Apply triggers
CREATE TRIGGER update_warehouses_updated_at
  BEFORE UPDATE ON warehouses
  FOR EACH ROW
  EXECUTE FUNCTION update_updated_at_column();

-- Insert migration record
INSERT INTO migrations (version, name, applied_at) 
VALUES ('001', 'initial_schema', NOW());

COMMIT;

User Management Migration

-- migrations/002_add_user_profiles.sql
-- Version: 002
-- Description: Add user profiles and role management
-- Dependencies: 001_initial_schema.sql

BEGIN;

-- Create user profiles table
CREATE TABLE user_profiles (
  id UUID PRIMARY KEY REFERENCES auth.users(id) ON DELETE CASCADE,
  full_name VARCHAR(255) NOT NULL,
  avatar_url TEXT,
  role user_role NOT NULL DEFAULT 'staff',
  warehouse_id UUID REFERENCES warehouses(id),
  is_active BOOLEAN DEFAULT true,
  created_at TIMESTAMPTZ DEFAULT NOW(),
  updated_at TIMESTAMPTZ DEFAULT NOW()
);

-- Create user roles table for granular permissions
CREATE TABLE user_roles (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  user_id UUID NOT NULL REFERENCES user_profiles(id) ON DELETE CASCADE,
  role VARCHAR(50) NOT NULL,
  permissions JSONB DEFAULT '{}',
  created_at TIMESTAMPTZ DEFAULT NOW(),
  UNIQUE(user_id, role)
);

-- Enable RLS on user tables
ALTER TABLE user_profiles ENABLE ROW LEVEL SECURITY;
ALTER TABLE user_roles ENABLE ROW LEVEL SECURITY;

-- Create basic RLS policies
CREATE POLICY "Users can view own profile" ON user_profiles
  FOR SELECT USING (auth.uid() = id);

CREATE POLICY "Users can update own profile" ON user_profiles
  FOR UPDATE USING (auth.uid() = id);

-- Add indexes
CREATE INDEX idx_user_profiles_warehouse_id ON user_profiles(warehouse_id);
CREATE INDEX idx_user_profiles_role ON user_profiles(role);
CREATE INDEX idx_user_roles_user_id ON user_roles(user_id);

-- Apply updated_at trigger
CREATE TRIGGER update_user_profiles_updated_at
  BEFORE UPDATE ON user_profiles
  FOR EACH ROW
  EXECUTE FUNCTION update_updated_at_column();

-- Insert migration record
INSERT INTO migrations (version, name, applied_at) 
VALUES ('002', 'add_user_profiles', NOW());

COMMIT;

Product Management Migration

-- migrations/003_create_products_table.sql
-- Version: 003
-- Description: Create products and related tables
-- Dependencies: 001_initial_schema.sql

BEGIN;

-- Create products table
CREATE TABLE products (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  name VARCHAR(255) NOT NULL,
  description TEXT,
  sku VARCHAR(100) UNIQUE NOT NULL,
  category_id UUID REFERENCES product_categories(id),
  brand VARCHAR(100),
  unit_of_measure unit_type NOT NULL DEFAULT 'piece',
  cost_price DECIMAL(10,2) NOT NULL DEFAULT 0.00,
  selling_price DECIMAL(10,2) NOT NULL DEFAULT 0.00,
  min_stock_level INTEGER DEFAULT 0,
  max_stock_level INTEGER,
  is_active BOOLEAN DEFAULT true,
  created_at TIMESTAMPTZ DEFAULT NOW(),
  updated_at TIMESTAMPTZ DEFAULT NOW(),
  
  -- Constraints
  CONSTRAINT positive_cost_price CHECK (cost_price >= 0),
  CONSTRAINT positive_selling_price CHECK (selling_price >= 0),
  CONSTRAINT positive_min_stock CHECK (min_stock_level >= 0),
  CONSTRAINT valid_max_stock CHECK (max_stock_level IS NULL OR max_stock_level >= min_stock_level)
);

-- Create product barcodes table
CREATE TABLE product_barcodes (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  product_id UUID NOT NULL REFERENCES products(id) ON DELETE CASCADE,
  barcode VARCHAR(50) UNIQUE NOT NULL,
  barcode_type VARCHAR(20) NOT NULL DEFAULT 'EAN13',
  is_primary BOOLEAN DEFAULT false,
  created_at TIMESTAMPTZ DEFAULT NOW()
);

-- Create inventory table
CREATE TABLE inventory (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  product_id UUID NOT NULL REFERENCES products(id) ON DELETE CASCADE,
  warehouse_id UUID NOT NULL REFERENCES warehouses(id) ON DELETE CASCADE,
  quantity_on_hand INTEGER NOT NULL DEFAULT 0,
  quantity_allocated INTEGER NOT NULL DEFAULT 0,
  quantity_available INTEGER GENERATED ALWAYS AS (quantity_on_hand - quantity_allocated) STORED,
  last_counted TIMESTAMPTZ,
  updated_at TIMESTAMPTZ DEFAULT NOW(),
  
  -- Constraints
  CONSTRAINT positive_on_hand CHECK (quantity_on_hand >= 0),
  CONSTRAINT positive_allocated CHECK (quantity_allocated >= 0),
  CONSTRAINT allocated_not_exceed_on_hand CHECK (quantity_allocated <= quantity_on_hand),
  
  -- Unique constraint
  UNIQUE(product_id, warehouse_id)
);

-- Create indexes for performance
CREATE INDEX idx_products_sku ON products(sku);
CREATE INDEX idx_products_category ON products(category_id);
CREATE INDEX idx_products_active ON products(name) WHERE is_active = true;
CREATE INDEX idx_product_barcodes_barcode ON product_barcodes(barcode);
CREATE INDEX idx_inventory_product ON inventory(product_id);
CREATE INDEX idx_inventory_warehouse ON inventory(warehouse_id);
CREATE INDEX idx_inventory_product_warehouse ON inventory(product_id, warehouse_id);

-- Ensure only one primary barcode per product
CREATE UNIQUE INDEX idx_product_primary_barcode 
ON product_barcodes(product_id) 
WHERE is_primary = true;

-- Enable RLS
ALTER TABLE products ENABLE ROW LEVEL SECURITY;
ALTER TABLE inventory ENABLE ROW LEVEL SECURITY;

-- Add updated_at triggers
CREATE TRIGGER update_products_updated_at
  BEFORE UPDATE ON products
  FOR EACH ROW
  EXECUTE FUNCTION update_updated_at_column();

CREATE TRIGGER update_inventory_updated_at
  BEFORE UPDATE ON inventory
  FOR EACH ROW
  EXECUTE FUNCTION update_updated_at_column();

-- Insert migration record
INSERT INTO migrations (version, name, applied_at) 
VALUES ('003', 'create_products_table', NOW());

COMMIT;

Migration Management System

Migration Tracking Table

-- migrations/000_migration_system.sql
-- Create migration tracking system

CREATE TABLE IF NOT EXISTS migrations (
  id SERIAL PRIMARY KEY,
  version VARCHAR(10) UNIQUE NOT NULL,
  name VARCHAR(100) NOT NULL,
  applied_at TIMESTAMPTZ DEFAULT NOW(),
  rollback_file VARCHAR(255),
  checksum VARCHAR(64),
  execution_time_ms INTEGER
);

-- Create migration lock table to prevent concurrent migrations
CREATE TABLE IF NOT EXISTS migration_lock (
  id INTEGER PRIMARY KEY DEFAULT 1,
  locked_at TIMESTAMPTZ,
  locked_by VARCHAR(100),
  CONSTRAINT single_lock CHECK (id = 1)
);

-- Function to acquire migration lock
CREATE OR REPLACE FUNCTION acquire_migration_lock(locker_name VARCHAR DEFAULT 'migration')
RETURNS BOOLEAN AS $$
DECLARE
  lock_acquired BOOLEAN := FALSE;
BEGIN
  INSERT INTO migration_lock (locked_at, locked_by)
  VALUES (NOW(), locker_name)
  ON CONFLICT (id) DO NOTHING;
  
  GET DIAGNOSTICS lock_acquired = FOUND;
  RETURN lock_acquired;
END;
$$ LANGUAGE plpgsql;

-- Function to release migration lock
CREATE OR REPLACE FUNCTION release_migration_lock()
RETURNS VOID AS $$
BEGIN
  DELETE FROM migration_lock WHERE id = 1;
END;
$$ LANGUAGE plpgsql;

Migration Validation Functions

-- Function to validate migration dependencies
CREATE OR REPLACE FUNCTION validate_migration_dependencies(
  required_version VARCHAR
)
RETURNS BOOLEAN AS $$
BEGIN
  RETURN EXISTS (
    SELECT 1 FROM migrations 
    WHERE version = required_version
  );
END;
$$ LANGUAGE plpgsql;

-- Function to check migration status
CREATE OR REPLACE FUNCTION get_migration_status()
RETURNS TABLE (
  version VARCHAR,
  name VARCHAR,
  applied_at TIMESTAMPTZ,
  execution_time_ms INTEGER
) AS $$
BEGIN
  RETURN QUERY
  SELECT m.version, m.name, m.applied_at, m.execution_time_ms
  FROM migrations m
  ORDER BY m.version;
END;
$$ LANGUAGE plpgsql;

-- Function to validate schema integrity
CREATE OR REPLACE FUNCTION validate_schema_integrity()
RETURNS TABLE (
  table_name NAME,
  constraint_violations TEXT[]
) AS $$
BEGIN
  RETURN QUERY
  SELECT 
    t.table_name,
    ARRAY_AGG(
      'Missing foreign key: ' || tc.constraint_name
    ) FILTER (WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.constraint_name IS NULL)
  FROM information_schema.tables t
  LEFT JOIN information_schema.table_constraints tc ON t.table_name = tc.table_name
  WHERE t.table_schema = 'public'
  GROUP BY t.table_name
  HAVING COUNT(tc.constraint_name) > 0;
END;
$$ LANGUAGE plpgsql;

Rollback Procedures

Rollback Migration Example

-- rollbacks/003_rollback_products_table.sql
-- Rollback for migration 003_create_products_table.sql

BEGIN;

-- Check if safe to rollback (no critical data)
DO $$
DECLARE
  product_count INTEGER;
  inventory_count INTEGER;
BEGIN
  SELECT COUNT(*) INTO product_count FROM products;
  SELECT COUNT(*) INTO inventory_count FROM inventory;
  
  IF product_count > 0 OR inventory_count > 0 THEN
    RAISE EXCEPTION 'Cannot rollback: Critical data exists. Manual intervention required.';
  END IF;
END $$;

-- Drop triggers first
DROP TRIGGER IF EXISTS update_products_updated_at ON products;
DROP TRIGGER IF EXISTS update_inventory_updated_at ON inventory;

-- Drop indexes
DROP INDEX IF EXISTS idx_products_sku;
DROP INDEX IF EXISTS idx_products_category;
DROP INDEX IF EXISTS idx_products_active;
DROP INDEX IF EXISTS idx_product_barcodes_barcode;
DROP INDEX IF EXISTS idx_inventory_product;
DROP INDEX IF EXISTS idx_inventory_warehouse;
DROP INDEX IF EXISTS idx_inventory_product_warehouse;
DROP INDEX IF EXISTS idx_product_primary_barcode;

-- Drop tables in reverse dependency order
DROP TABLE IF EXISTS inventory;
DROP TABLE IF EXISTS product_barcodes;
DROP TABLE IF EXISTS products;

-- Remove migration record
DELETE FROM migrations WHERE version = '003';

COMMIT;

Safe Rollback Function

-- Function for safe migration rollback
CREATE OR REPLACE FUNCTION rollback_migration(
  target_version VARCHAR,
  force_rollback BOOLEAN DEFAULT FALSE
)
RETURNS VOID AS $$
DECLARE
  migration_exists BOOLEAN;
  rollback_file VARCHAR;
  data_check_result BOOLEAN;
BEGIN
  -- Check if migration exists
  SELECT EXISTS(SELECT 1 FROM migrations WHERE version = target_version)
  INTO migration_exists;
  
  IF NOT migration_exists THEN
    RAISE EXCEPTION 'Migration version % not found', target_version;
  END IF;
  
  -- Get rollback file
  SELECT m.rollback_file INTO rollback_file
  FROM migrations m 
  WHERE m.version = target_version;
  
  IF rollback_file IS NULL THEN
    RAISE EXCEPTION 'No rollback file specified for migration %', target_version;
  END IF;
  
  -- Perform data safety checks unless forced
  IF NOT force_rollback THEN
    -- Add custom data validation logic here
    RAISE NOTICE 'Performing safety checks before rollback...';
  END IF;
  
  -- Execute rollback (this would be handled by migration system)
  RAISE NOTICE 'Executing rollback file: %', rollback_file;
  
  -- Log rollback
  INSERT INTO migration_rollbacks (version, rollback_at, forced) 
  VALUES (target_version, NOW(), force_rollback);
END;
$$ LANGUAGE plpgsql;

Data Migration Patterns

Large Data Migration with Batching

-- Example: Migrate large dataset in batches
CREATE OR REPLACE FUNCTION migrate_inventory_data_batched(
  batch_size INTEGER DEFAULT 1000
)
RETURNS VOID AS $$
DECLARE
  processed_count INTEGER := 0;
  total_count INTEGER;
  batch_start_time TIMESTAMPTZ;
BEGIN
  -- Get total count for progress tracking
  SELECT COUNT(*) INTO total_count FROM old_inventory_table;
  RAISE NOTICE 'Starting migration of % records', total_count;
  
  LOOP
    batch_start_time := NOW();
    
    -- Process batch
    WITH batch_data AS (
      SELECT * FROM old_inventory_table
      WHERE migrated = FALSE
      LIMIT batch_size
    )
    INSERT INTO inventory (product_id, warehouse_id, quantity_on_hand)
    SELECT 
      oit.product_id,
      oit.warehouse_id,
      oit.quantity
    FROM batch_data oit;
    
    -- Mark as migrated
    UPDATE old_inventory_table SET migrated = TRUE
    WHERE id IN (
      SELECT id FROM old_inventory_table
      WHERE migrated = FALSE
      LIMIT batch_size
    );
    
    GET DIAGNOSTICS processed_count = ROW_COUNT;
    
    -- Log progress
    RAISE NOTICE 'Migrated batch of % records in % ms', 
      processed_count, 
      EXTRACT(EPOCH FROM (NOW() - batch_start_time)) * 1000;
    
    -- Exit if no more records
    EXIT WHEN processed_count = 0;
    
    -- Small delay to prevent overwhelming the system
    PERFORM pg_sleep(0.1);
  END LOOP;
  
  RAISE NOTICE 'Migration completed. Total records: %', total_count;
END;
$$ LANGUAGE plpgsql;

Zero-Downtime Migration Strategy

-- Example: Add new column with zero downtime
-- Step 1: Add nullable column
ALTER TABLE products ADD COLUMN new_field VARCHAR(100);

-- Step 2: Populate data in background
CREATE OR REPLACE FUNCTION populate_new_field_background()
RETURNS VOID AS $$
DECLARE
  batch_size INTEGER := 1000;
  processed INTEGER;
BEGIN
  LOOP
    UPDATE products 
    SET new_field = calculate_new_field_value(old_field)
    WHERE new_field IS NULL
      AND id IN (
        SELECT id FROM products 
        WHERE new_field IS NULL 
        LIMIT batch_size
      );
    
    GET DIAGNOSTICS processed = ROW_COUNT;
    EXIT WHEN processed = 0;
    
    -- Progress tracking
    RAISE NOTICE 'Processed % records', processed;
    PERFORM pg_sleep(0.1);
  END LOOP;
END;
$$ LANGUAGE plpgsql;

-- Step 3: Make column NOT NULL after population
-- This would be in a subsequent migration
-- ALTER TABLE products ALTER COLUMN new_field SET NOT NULL;

Migration Best Practices

Pre-Migration Validation

-- Function to validate migration prerequisites
CREATE OR REPLACE FUNCTION validate_migration_prerequisites(
  migration_version VARCHAR
)
RETURNS TABLE (
  check_name VARCHAR,
  status VARCHAR,
  details TEXT
) AS $$
BEGIN
  RETURN QUERY
  
  -- Check database version
  SELECT 
    'PostgreSQL Version'::VARCHAR,
    CASE WHEN version() LIKE '%PostgreSQL 1%' THEN 'PASS' ELSE 'FAIL' END,
    version()
  
  UNION ALL
  
  -- Check available disk space
  SELECT 
    'Disk Space'::VARCHAR,
    'PASS'::VARCHAR,  -- Would implement actual check
    'Sufficient space available'::TEXT
  
  UNION ALL
  
  -- Check for blocking locks
  SELECT 
    'Active Locks'::VARCHAR,
    CASE WHEN COUNT(*) = 0 THEN 'PASS' ELSE 'WARN' END,
    'Found ' || COUNT(*) || ' active locks'
  FROM pg_locks
  WHERE granted = false
  
  UNION ALL
  
  -- Check replication lag (if applicable)
  SELECT 
    'Replication Status'::VARCHAR,
    'PASS'::VARCHAR,
    'No replication configured'::TEXT;
END;
$$ LANGUAGE plpgsql;

Post-Migration Validation

-- Function to validate migration success
CREATE OR REPLACE FUNCTION validate_migration_success(
  migration_version VARCHAR
)
RETURNS TABLE (
  validation_name VARCHAR,
  status VARCHAR,
  message TEXT
) AS $$
BEGIN
  RETURN QUERY
  
  -- Check that migration was recorded
  SELECT 
    'Migration Record'::VARCHAR,
    CASE WHEN EXISTS(SELECT 1 FROM migrations WHERE version = migration_version) 
         THEN 'PASS' ELSE 'FAIL' END,
    'Migration ' || migration_version || ' recorded'
  
  UNION ALL
  
  -- Check table integrity
  SELECT 
    'Table Integrity'::VARCHAR,
    'PASS'::VARCHAR,  -- Would implement actual integrity checks
    'All tables have valid structure'::TEXT
  
  UNION ALL
  
  -- Check constraints
  SELECT 
    'Constraint Validation'::VARCHAR,
    CASE WHEN COUNT(*) = 0 THEN 'PASS' ELSE 'FAIL' END,
    'Found ' || COUNT(*) || ' constraint violations'
  FROM information_schema.constraint_column_usage
  WHERE constraint_schema = 'public';
END;
$$ LANGUAGE plpgsql;

This comprehensive migration and versioning strategy ensures safe, reliable database schema evolution for Smart Shelf while maintaining data integrity and system availability.