#!/bin/bash ################################################################################ # Database Schema Synchronization Script # # This script compares the development and production databases and automatically # synchronizes the production database to match the development schema. # # DATABASE ENVIRONMENTS: # ---------------------- # Development Environment (this server: /root/maternal-app): # - Uses database: parentflowdev # - Location: 10.0.0.207 # - This is where we develop and test changes # # Production Environment (server: 10.0.0.240 /root/parentflowapp-prod): # - Uses database: parentflow # - Location: 10.0.0.207 (same database server, different database) # - This is the live application serving users # # Admin Database (shared between environments): # - Uses database: parentflowadmin # - Location: 10.0.0.207 # - Shared admin panel database for both dev and production # # This script syncs: parentflowdev (dev) → parentflow (production) # # Features: # - Creates missing tables in production # - Adds missing columns to existing tables # - Creates missing indexes # - Creates missing constraints # - Dry-run mode for safety # # Usage: # ./sync-database-schema.sh [--dry-run] [--verbose] # # Options: # --dry-run Show what would be changed without making changes # --verbose Show detailed output # ################################################################################ set -e # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' CYAN='\033[0;36m' NC='\033[0m' # No Color # Database configuration DB_HOST="10.0.0.207" DB_USER="postgres" DB_PASSWORD="a3ppq" DEV_DB="parentflowdev" # Development database (used by /root/maternal-app) PROD_DB="parentflow" # Production database (used by 10.0.0.240:/root/parentflowapp-prod) ADMIN_DB="parentflowadmin" # Shared admin database (both environments) # Backup configuration BACKUP_DIR="/root/maternal-app/backups/db-schema-sync" BACKUP_FILE="" # Script options DRY_RUN=false VERBOSE=false CHANGES_MADE=false BACKUP_CREATED=false # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in --dry-run) DRY_RUN=true shift ;; --verbose) VERBOSE=true shift ;; *) echo "Unknown option: $1" echo "Usage: $0 [--dry-run] [--verbose]" exit 1 ;; esac done # Helper function to run SQL on a database run_sql() { local database=$1 local sql=$2 PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $database -t -c "$sql" 2>/dev/null } # Helper function to run SQL file on a database run_sql_file() { local database=$1 local file=$2 PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $database -f "$file" 2>/dev/null } # Logging functions log_info() { echo -e "${BLUE}[INFO]${NC} $1" } log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1" } log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1" } log_error() { echo -e "${RED}[ERROR]${NC} $1" } log_verbose() { if [ "$VERBOSE" = true ]; then echo -e "${CYAN}[DEBUG]${NC} $1" fi } # Backup function create_backup() { log_info "Creating backup of production database..." # Create backup directory if it doesn't exist mkdir -p "$BACKUP_DIR" # Generate backup filename with timestamp TIMESTAMP=$(date +"%Y%m%d_%H%M%S") BACKUP_FILE="$BACKUP_DIR/${PROD_DB}_schema_backup_${TIMESTAMP}.sql" # Create schema-only backup if PGPASSWORD=$DB_PASSWORD pg_dump -h $DB_HOST -U $DB_USER -d $PROD_DB \ --schema-only --no-owner --no-acl -f "$BACKUP_FILE" 2>/dev/null; then # Compress the backup gzip "$BACKUP_FILE" BACKUP_FILE="${BACKUP_FILE}.gz" # Get backup file size BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1) log_success "Backup created: $BACKUP_FILE (Size: $BACKUP_SIZE)" BACKUP_CREATED=true # Keep only last 10 backups log_verbose "Cleaning up old backups (keeping last 10)..." ls -t "$BACKUP_DIR"/*.sql.gz 2>/dev/null | tail -n +11 | xargs -r rm -- return 0 else log_warning "Could not create backup using pg_dump (version mismatch)" log_info "Creating SQL-based backup instead..." # Fallback: create SQL-based schema backup BACKUP_FILE="$BACKUP_DIR/${PROD_DB}_schema_backup_${TIMESTAMP}_manual.sql" PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $PROD_DB > "$BACKUP_FILE" << 'EOF' -- Schema backup created by sync-database-schema.sh -- Date: $(date) -- List all tables SELECT 'Table: ' || tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename; -- Get table structures DO $$ DECLARE tbl RECORD; BEGIN FOR tbl IN SELECT tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename LOOP RAISE NOTICE 'CREATE TABLE STATEMENT FOR: %', tbl.tablename; END LOOP; END $$; EOF gzip "$BACKUP_FILE" BACKUP_FILE="${BACKUP_FILE}.gz" BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1) log_success "SQL backup created: $BACKUP_FILE (Size: $BACKUP_SIZE)" BACKUP_CREATED=true return 0 fi } # Print header echo "================================================================================" echo " DATABASE SCHEMA SYNCHRONIZATION" echo "================================================================================" echo "" echo "Development Database: $DEV_DB" echo "Production Database: $PROD_DB" echo "Dry Run Mode: $DRY_RUN" echo "Verbose Mode: $VERBOSE" echo "" echo "================================================================================" echo "" # Check database connectivity log_info "Testing database connectivity..." if ! run_sql "$DEV_DB" "SELECT 1" > /dev/null; then log_error "Cannot connect to development database: $DEV_DB" exit 1 fi if ! run_sql "$PROD_DB" "SELECT 1" > /dev/null; then log_error "Cannot connect to production database: $PROD_DB" exit 1 fi log_success "Database connectivity verified" echo "" # Create temporary directory for SQL scripts TMP_DIR=$(mktemp -d) trap "rm -rf $TMP_DIR" EXIT log_info "Temporary directory: $TMP_DIR" echo "" ################################################################################ # PRE-FLIGHT: Analyze changes needed ################################################################################ log_info "Analyzing schema differences..." echo "" # Get tables from both databases DEV_TABLES=$(run_sql "$DEV_DB" "SELECT tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename;" | xargs) PROD_TABLES=$(run_sql "$PROD_DB" "SELECT tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename;" | xargs) # Find tables in dev but not in prod MISSING_TABLES="" for table in $DEV_TABLES; do if ! echo " $PROD_TABLES " | grep -qw "$table"; then MISSING_TABLES="$MISSING_TABLES $table" fi done MISSING_TABLES=$(echo "$MISSING_TABLES" | xargs) # Check if any changes will be made WILL_MAKE_CHANGES=false if [ ! -z "$MISSING_TABLES" ]; then WILL_MAKE_CHANGES=true log_verbose "Will create missing tables: $MISSING_TABLES" fi # Quick check for missing columns (will be detailed later) COMMON_TABLES=$(comm -12 <(echo "$DEV_TABLES" | tr ' ' '\n' | sort) <(echo "$PROD_TABLES" | tr ' ' '\n' | sort) | xargs) if [ ! -z "$COMMON_TABLES" ]; then for table in $COMMON_TABLES; do # Count columns in dev DEV_COL_COUNT=$(run_sql "$DEV_DB" "SELECT COUNT(*) FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table';" | xargs) # Count columns in prod PROD_COL_COUNT=$(run_sql "$PROD_DB" "SELECT COUNT(*) FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table';" | xargs) if [ "$DEV_COL_COUNT" != "$PROD_COL_COUNT" ]; then WILL_MAKE_CHANGES=true log_verbose "Table $table has different column counts (dev: $DEV_COL_COUNT, prod: $PROD_COL_COUNT)" break fi done fi # Create backup if changes will be made (and not in dry-run mode) if [ "$WILL_MAKE_CHANGES" = true ] && [ "$DRY_RUN" = false ]; then log_info "Changes detected - creating backup before proceeding..." echo "" if ! create_backup; then log_error "Failed to create backup. Aborting synchronization." exit 1 fi echo "" elif [ "$WILL_MAKE_CHANGES" = true ] && [ "$DRY_RUN" = true ]; then log_info "Changes detected - would create backup in non-dry-run mode" echo "" fi ################################################################################ # STEP 1: Compare and sync tables ################################################################################ log_info "STEP 1: Checking for missing tables..." echo "" if [ -z "$MISSING_TABLES" ]; then log_success "No missing tables found" else log_warning "Found missing tables in production: $MISSING_TABLES" for table in $MISSING_TABLES; do log_info "Creating table: $table" # Use SQL to get table definition TABLE_DEF=$(PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $DEV_DB -t << EOF SELECT pg_get_tabledef('public', '$table'); EOF ) if [ -z "$TABLE_DEF" ]; then # Fallback: use information_schema to build CREATE TABLE TABLE_DEF=$(PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $DEV_DB -t << EOF SELECT 'CREATE TABLE $table (' || string_agg( column_name || ' ' || data_type || CASE WHEN character_maximum_length IS NOT NULL THEN '(' || character_maximum_length || ')' ELSE '' END || CASE WHEN is_nullable = 'NO' THEN ' NOT NULL' ELSE '' END || CASE WHEN column_default IS NOT NULL THEN ' DEFAULT ' || column_default ELSE '' END, ', ' ) || ');' FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table' GROUP BY table_name; EOF ) fi if [ ! -z "$TABLE_DEF" ]; then if [ "$DRY_RUN" = false ]; then if run_sql "$PROD_DB" "$TABLE_DEF"; then log_success "Created table: $table" CHANGES_MADE=true else log_warning "Could not create table: $table (may require manual migration)" fi else log_info "[DRY-RUN] Would create table: $table" if [ "$VERBOSE" = true ]; then echo "$TABLE_DEF" echo "" fi fi else log_warning "Could not generate schema for table: $table - manual migration required" fi done fi echo "" ################################################################################ # STEP 2: Compare and sync columns for existing tables ################################################################################ log_info "STEP 2: Checking for missing columns in existing tables..." echo "" # Get list of common tables COMMON_TABLES=$(run_sql "$DEV_DB" " SELECT tablename FROM pg_tables WHERE schemaname = 'public' INTERSECT SELECT tablename FROM pg_tables WHERE schemaname = 'public' AND tablename IN ( SELECT tablename FROM information_schema.tables WHERE table_schema = 'public' AND table_catalog = '$PROD_DB' ) ORDER BY tablename; " | xargs) COLUMN_CHANGES_FOUND=false for table in $COMMON_TABLES; do log_verbose "Checking columns for table: $table" # Get missing columns MISSING_COLUMNS=$(PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $DEV_DB -t << EOF SELECT column_name, data_type, column_default, is_nullable FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table' EXCEPT SELECT column_name, data_type, column_default, is_nullable FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table' AND table_catalog = '$PROD_DB' ORDER BY column_name; EOF ) if [ ! -z "$MISSING_COLUMNS" ]; then COLUMN_CHANGES_FOUND=true log_warning "Table '$table' has missing columns in production" # Get detailed column information while IFS='|' read -r col_name data_type col_default is_nullable; do # Trim whitespace col_name=$(echo "$col_name" | xargs) data_type=$(echo "$data_type" | xargs) col_default=$(echo "$col_default" | xargs) is_nullable=$(echo "$is_nullable" | xargs) if [ ! -z "$col_name" ]; then log_info " Missing column: $col_name ($data_type)" # Build ALTER TABLE statement ALTER_SQL="ALTER TABLE $table ADD COLUMN $col_name $data_type" # Add NOT NULL constraint if needed if [ "$is_nullable" = "NO" ]; then ALTER_SQL="$ALTER_SQL NOT NULL" fi # Add default value if exists if [ "$col_default" != "" ] && [ "$col_default" != "NULL" ]; then ALTER_SQL="$ALTER_SQL DEFAULT $col_default" fi ALTER_SQL="$ALTER_SQL;" if [ "$DRY_RUN" = false ]; then log_verbose " Executing: $ALTER_SQL" if run_sql "$PROD_DB" "$ALTER_SQL"; then log_success " Added column: $col_name to $table" CHANGES_MADE=true else log_error " Failed to add column: $col_name to $table" fi else log_info " [DRY-RUN] Would execute: $ALTER_SQL" fi fi done <<< "$MISSING_COLUMNS" fi done if [ "$COLUMN_CHANGES_FOUND" = false ]; then log_success "No missing columns found" fi echo "" ################################################################################ # STEP 3: Compare and sync indexes ################################################################################ log_info "STEP 3: Checking for missing indexes..." echo "" # Get missing indexes (excluding primary keys and unique constraints) MISSING_INDEXES=$(PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $DEV_DB -t << EOF SELECT schemaname, tablename, indexname, indexdef FROM pg_indexes WHERE schemaname = 'public' AND indexname NOT LIKE '%_pkey' AND indexname NOT LIKE '%_key' EXCEPT SELECT schemaname, tablename, indexname, indexdef FROM pg_indexes WHERE schemaname = 'public' AND indexname NOT LIKE '%_pkey' AND indexname NOT LIKE '%_key'; EOF ) if [ -z "$MISSING_INDEXES" ]; then log_success "No missing indexes found" else log_warning "Found missing indexes in production" # Process missing indexes while IFS='|' read -r schema table index_name index_def; do # Trim whitespace index_name=$(echo "$index_name" | xargs) index_def=$(echo "$index_def" | xargs) if [ ! -z "$index_name" ] && [ ! -z "$index_def" ]; then log_info "Missing index: $index_name on table $(echo $table | xargs)" if [ "$DRY_RUN" = false ]; then log_verbose "Executing: $index_def" if run_sql "$PROD_DB" "$index_def;"; then log_success "Created index: $index_name" CHANGES_MADE=true else log_warning "Could not create index: $index_name (may already exist or have dependencies)" fi else log_info "[DRY-RUN] Would create index: $index_name" if [ "$VERBOSE" = true ]; then echo " $index_def" fi fi fi done <<< "$MISSING_INDEXES" fi echo "" ################################################################################ # STEP 4: Check for data type mismatches ################################################################################ log_info "STEP 4: Checking for data type mismatches..." echo "" TYPE_MISMATCHES_FOUND=false for table in $COMMON_TABLES; do MISMATCHES=$(PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $DEV_DB -t << EOF SELECT d.column_name, d.data_type as dev_type, p.data_type as prod_type FROM ( SELECT column_name, data_type, ordinal_position FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table' ) d JOIN ( SELECT column_name, data_type, ordinal_position FROM information_schema.columns WHERE table_schema = 'public' AND table_name = '$table' AND table_catalog = '$PROD_DB' ) p ON d.column_name = p.column_name WHERE d.data_type != p.data_type; EOF ) if [ ! -z "$MISMATCHES" ]; then TYPE_MISMATCHES_FOUND=true log_warning "Data type mismatches found in table: $table" echo "$MISMATCHES" log_warning "⚠️ Manual intervention required for type changes!" fi done if [ "$TYPE_MISMATCHES_FOUND" = false ]; then log_success "No data type mismatches found" fi echo "" ################################################################################ # STEP 5: Summary ################################################################################ echo "================================================================================" echo " SYNCHRONIZATION SUMMARY" echo "================================================================================" echo "" if [ "$DRY_RUN" = true ]; then log_info "DRY RUN MODE: No changes were made to the production database" elif [ "$CHANGES_MADE" = true ]; then log_success "Schema synchronization completed successfully!" log_info "Production database has been updated to match development schema" if [ "$BACKUP_CREATED" = true ]; then echo "" log_info "Backup location: $BACKUP_FILE" log_info "To restore backup: gunzip -c $BACKUP_FILE | PGPASSWORD=$DB_PASSWORD psql -h $DB_HOST -U $DB_USER -d $PROD_DB" fi else log_success "Databases are already in sync - no changes needed" fi echo "" log_info "Synchronization completed at $(date)" echo "================================================================================" echo "" # Exit with appropriate code if [ "$DRY_RUN" = true ] && [ "$CHANGES_MADE" = true ]; then exit 2 # Changes would be made elif [ "$CHANGES_MADE" = true ]; then exit 0 # Changes were made successfully else exit 0 # No changes needed fi