Merge branch 'master' into feature-markdown-notes
This commit is contained in:
24
migrations/migration_list.txt
Normal file
24
migrations/migration_list.txt
Normal file
@@ -0,0 +1,24 @@
|
||||
# Database Migration Scripts - In Order of Execution
|
||||
|
||||
## Phase 1: SQLite Schema Updates (Run first)
|
||||
01_migrate_db.py - Update SQLite schema with all necessary columns and tables
|
||||
|
||||
## Phase 2: Data Migration (Run after SQLite updates)
|
||||
02_migrate_sqlite_to_postgres.py - Migrate data from updated SQLite to PostgreSQL
|
||||
|
||||
## Phase 3: PostgreSQL Schema Migrations (Run after data migration)
|
||||
03_add_dashboard_columns.py - Add missing columns to user_dashboard table
|
||||
04_add_user_preferences_columns.py - Add missing columns to user_preferences table
|
||||
05_fix_task_status_enum.py - Fix task status enum values in database
|
||||
06_add_archived_status.py - Add ARCHIVED status to task_status enum
|
||||
07_fix_company_work_config_columns.py - Fix company work config column names
|
||||
08_fix_work_region_enum.py - Fix work region enum values
|
||||
09_add_germany_to_workregion.py - Add GERMANY back to work_region enum
|
||||
10_add_company_settings_columns.py - Add missing columns to company_settings table
|
||||
|
||||
## Phase 4: Code Migrations (Run after all schema migrations)
|
||||
11_fix_company_work_config_usage.py - Update code references to CompanyWorkConfig fields
|
||||
12_fix_task_status_usage.py - Update code references to TaskStatus enum values
|
||||
13_fix_work_region_usage.py - Update code references to WorkRegion enum values
|
||||
14_fix_removed_fields.py - Handle removed fields in code
|
||||
15_repair_user_roles.py - Fix user roles from string to enum values
|
||||
79
migrations/old_migrations/00_migration_summary.py
Executable file
79
migrations/old_migrations/00_migration_summary.py
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Summary of all model migrations to be performed
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
def print_section(title, items):
|
||||
"""Print a formatted section"""
|
||||
print(f"\n{'='*60}")
|
||||
print(f"📌 {title}")
|
||||
print('='*60)
|
||||
for item in items:
|
||||
print(f" {item}")
|
||||
|
||||
def main():
|
||||
print("🔍 Model Migration Summary")
|
||||
print("="*60)
|
||||
print("\nThis will update your codebase to match the refactored models.")
|
||||
|
||||
# CompanyWorkConfig changes
|
||||
print_section("CompanyWorkConfig Field Changes", [
|
||||
"✓ work_hours_per_day → standard_hours_per_day",
|
||||
"✓ mandatory_break_minutes → break_duration_minutes",
|
||||
"✓ break_threshold_hours → break_after_hours",
|
||||
"✓ region → work_region",
|
||||
"✗ REMOVED: additional_break_minutes",
|
||||
"✗ REMOVED: additional_break_threshold_hours",
|
||||
"✗ REMOVED: region_name (use work_region.value)",
|
||||
"✗ REMOVED: created_by_id",
|
||||
"+ ADDED: standard_hours_per_week, overtime_enabled, overtime_rate, etc."
|
||||
])
|
||||
|
||||
# TaskStatus changes
|
||||
print_section("TaskStatus Enum Changes", [
|
||||
"✓ NOT_STARTED → TODO",
|
||||
"✓ COMPLETED → DONE",
|
||||
"✓ ON_HOLD → IN_REVIEW",
|
||||
"+ KEPT: ARCHIVED (separate from CANCELLED)"
|
||||
])
|
||||
|
||||
# WorkRegion changes
|
||||
print_section("WorkRegion Enum Changes", [
|
||||
"✓ UNITED_STATES → USA",
|
||||
"✓ UNITED_KINGDOM → UK",
|
||||
"✓ FRANCE → EU",
|
||||
"✓ EUROPEAN_UNION → EU",
|
||||
"✓ CUSTOM → OTHER",
|
||||
"! KEPT: GERMANY (specific labor laws)"
|
||||
])
|
||||
|
||||
# Files to be modified
|
||||
print_section("Files That Will Be Modified", [
|
||||
"Python files: app.py, routes/*.py",
|
||||
"Templates: admin_company.html, admin_work_policies.html, config.html",
|
||||
"JavaScript: static/js/*.js (for task status)",
|
||||
"Removed field references will be commented out"
|
||||
])
|
||||
|
||||
# Safety notes
|
||||
print_section("⚠️ Important Notes", [
|
||||
"BACKUP your code before running migrations",
|
||||
"Removed fields will be commented with # REMOVED:",
|
||||
"Review all changes after migration",
|
||||
"Test thoroughly, especially:",
|
||||
" - Company work policy configuration",
|
||||
" - Task status transitions",
|
||||
" - Regional preset selection",
|
||||
"Consider implementing audit logging for created_by tracking"
|
||||
])
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("🎯 To run all migrations: python migrations/run_all_migrations.py")
|
||||
print("🎯 To run individually: python migrations/01_fix_company_work_config_usage.py")
|
||||
print("="*60)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1897
migrations/old_migrations/01_migrate_db.py
Normal file
1897
migrations/old_migrations/01_migrate_db.py
Normal file
File diff suppressed because it is too large
Load Diff
408
migrations/old_migrations/02_migrate_sqlite_to_postgres.py
Normal file
408
migrations/old_migrations/02_migrate_sqlite_to_postgres.py
Normal file
@@ -0,0 +1,408 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SQLite to PostgreSQL Migration Script for TimeTrack
|
||||
This script migrates data from SQLite to PostgreSQL database.
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
import psycopg2
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from psycopg2.extras import RealDictCursor
|
||||
import json
|
||||
|
||||
# Add parent directory to path to import app
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler('migration.log'),
|
||||
logging.StreamHandler()
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SQLiteToPostgresMigration:
|
||||
def __init__(self, sqlite_path, postgres_url):
|
||||
self.sqlite_path = sqlite_path
|
||||
self.postgres_url = postgres_url
|
||||
self.sqlite_conn = None
|
||||
self.postgres_conn = None
|
||||
self.migration_stats = {}
|
||||
|
||||
def connect_databases(self):
|
||||
"""Connect to both SQLite and PostgreSQL databases"""
|
||||
try:
|
||||
# Connect to SQLite
|
||||
self.sqlite_conn = sqlite3.connect(self.sqlite_path)
|
||||
self.sqlite_conn.row_factory = sqlite3.Row
|
||||
logger.info(f"Connected to SQLite database: {self.sqlite_path}")
|
||||
|
||||
# Connect to PostgreSQL
|
||||
self.postgres_conn = psycopg2.connect(self.postgres_url)
|
||||
self.postgres_conn.autocommit = False
|
||||
logger.info("Connected to PostgreSQL database")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to databases: {e}")
|
||||
return False
|
||||
|
||||
def close_connections(self):
|
||||
"""Close database connections"""
|
||||
if self.sqlite_conn:
|
||||
self.sqlite_conn.close()
|
||||
if self.postgres_conn:
|
||||
self.postgres_conn.close()
|
||||
|
||||
def backup_postgres(self):
|
||||
"""Create a backup of existing PostgreSQL data"""
|
||||
try:
|
||||
with self.postgres_conn.cursor() as cursor:
|
||||
# Check if tables exist and have data
|
||||
cursor.execute("""
|
||||
SELECT table_name FROM information_schema.tables
|
||||
WHERE table_schema = 'public'
|
||||
""")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
if tables:
|
||||
backup_file = f"postgres_backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}.sql"
|
||||
logger.info(f"Creating PostgreSQL backup: {backup_file}")
|
||||
|
||||
# Use pg_dump for backup
|
||||
os.system(f"pg_dump '{self.postgres_url}' > {backup_file}")
|
||||
logger.info(f"Backup created: {backup_file}")
|
||||
return backup_file
|
||||
else:
|
||||
logger.info("No existing PostgreSQL tables found, skipping backup")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create backup: {e}")
|
||||
return None
|
||||
|
||||
def check_sqlite_database(self):
|
||||
"""Check if SQLite database exists and has data"""
|
||||
if not os.path.exists(self.sqlite_path):
|
||||
logger.error(f"SQLite database not found: {self.sqlite_path}")
|
||||
return False
|
||||
|
||||
try:
|
||||
cursor = self.sqlite_conn.cursor()
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
if not tables:
|
||||
logger.info("SQLite database is empty, nothing to migrate")
|
||||
return False
|
||||
|
||||
logger.info(f"Found {len(tables)} tables in SQLite database")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking SQLite database: {e}")
|
||||
return False
|
||||
|
||||
def create_postgres_tables(self, clear_existing=False):
|
||||
"""Create PostgreSQL tables using Flask-SQLAlchemy models"""
|
||||
try:
|
||||
# Import Flask app and create tables
|
||||
from app import app, db
|
||||
|
||||
with app.app_context():
|
||||
# Set the database URI to PostgreSQL
|
||||
app.config['SQLALCHEMY_DATABASE_URI'] = self.postgres_url
|
||||
|
||||
if clear_existing:
|
||||
logger.info("Clearing existing PostgreSQL data...")
|
||||
db.drop_all()
|
||||
logger.info("Dropped all existing tables")
|
||||
|
||||
# Create all tables
|
||||
db.create_all()
|
||||
logger.info("Created PostgreSQL tables")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create PostgreSQL tables: {e}")
|
||||
return False
|
||||
|
||||
def migrate_table_data(self, table_name, column_mapping=None):
|
||||
"""Migrate data from SQLite table to PostgreSQL"""
|
||||
try:
|
||||
sqlite_cursor = self.sqlite_conn.cursor()
|
||||
postgres_cursor = self.postgres_conn.cursor()
|
||||
|
||||
# Check if table exists in SQLite
|
||||
sqlite_cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,))
|
||||
if not sqlite_cursor.fetchone():
|
||||
logger.info(f"Table {table_name} does not exist in SQLite, skipping...")
|
||||
self.migration_stats[table_name] = 0
|
||||
return True
|
||||
|
||||
# Get data from SQLite
|
||||
sqlite_cursor.execute(f"SELECT * FROM {table_name}")
|
||||
rows = sqlite_cursor.fetchall()
|
||||
|
||||
if not rows:
|
||||
logger.info(f"No data found in table: {table_name}")
|
||||
self.migration_stats[table_name] = 0
|
||||
return True
|
||||
|
||||
# Get column names
|
||||
column_names = [description[0] for description in sqlite_cursor.description]
|
||||
|
||||
# Apply column mapping if provided
|
||||
if column_mapping:
|
||||
column_names = [column_mapping.get(col, col) for col in column_names]
|
||||
|
||||
# Prepare insert statement
|
||||
placeholders = ', '.join(['%s'] * len(column_names))
|
||||
columns = ', '.join([f'"{col}"' for col in column_names]) # Quote column names
|
||||
insert_sql = f'INSERT INTO "{table_name}" ({columns}) VALUES ({placeholders})' # Quote table name
|
||||
|
||||
# Convert rows to list of tuples
|
||||
data_rows = []
|
||||
for row in rows:
|
||||
data_row = []
|
||||
for i, value in enumerate(row):
|
||||
col_name = column_names[i]
|
||||
# Handle special data type conversions
|
||||
if value is None:
|
||||
data_row.append(None)
|
||||
elif isinstance(value, str) and value.startswith('{"') and value.endswith('}'):
|
||||
# Handle JSON strings
|
||||
data_row.append(value)
|
||||
elif (col_name.startswith('is_') or col_name.endswith('_enabled') or col_name in ['is_paused']) and isinstance(value, int):
|
||||
# Convert integer boolean to actual boolean for PostgreSQL
|
||||
data_row.append(bool(value))
|
||||
elif isinstance(value, str) and value == '':
|
||||
# Convert empty strings to None for PostgreSQL
|
||||
data_row.append(None)
|
||||
else:
|
||||
data_row.append(value)
|
||||
data_rows.append(tuple(data_row))
|
||||
|
||||
# Check if we should clear existing data first (for tables with unique constraints)
|
||||
if table_name in ['company', 'team', 'user']:
|
||||
postgres_cursor.execute(f'SELECT COUNT(*) FROM "{table_name}"')
|
||||
existing_count = postgres_cursor.fetchone()[0]
|
||||
if existing_count > 0:
|
||||
logger.warning(f"Table {table_name} already has {existing_count} rows. Skipping to avoid duplicates.")
|
||||
self.migration_stats[table_name] = 0
|
||||
return True
|
||||
|
||||
# Insert data in batches
|
||||
batch_size = 1000
|
||||
for i in range(0, len(data_rows), batch_size):
|
||||
batch = data_rows[i:i + batch_size]
|
||||
try:
|
||||
postgres_cursor.executemany(insert_sql, batch)
|
||||
self.postgres_conn.commit()
|
||||
except Exception as batch_error:
|
||||
logger.error(f"Error inserting batch {i//batch_size + 1} for table {table_name}: {batch_error}")
|
||||
# Try inserting rows one by one to identify problematic rows
|
||||
self.postgres_conn.rollback()
|
||||
for j, row in enumerate(batch):
|
||||
try:
|
||||
postgres_cursor.execute(insert_sql, row)
|
||||
self.postgres_conn.commit()
|
||||
except Exception as row_error:
|
||||
logger.error(f"Error inserting row {i + j} in table {table_name}: {row_error}")
|
||||
logger.error(f"Problematic row data: {row}")
|
||||
self.postgres_conn.rollback()
|
||||
|
||||
logger.info(f"Migrated {len(rows)} rows from table: {table_name}")
|
||||
self.migration_stats[table_name] = len(rows)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to migrate table {table_name}: {e}")
|
||||
self.postgres_conn.rollback()
|
||||
return False
|
||||
|
||||
def update_sequences(self):
|
||||
"""Update PostgreSQL sequences after data migration"""
|
||||
try:
|
||||
with self.postgres_conn.cursor() as cursor:
|
||||
# Get all sequences - fix the query to properly extract sequence names
|
||||
cursor.execute("""
|
||||
SELECT
|
||||
pg_get_serial_sequence(table_name, column_name) as sequence_name,
|
||||
column_name,
|
||||
table_name
|
||||
FROM information_schema.columns
|
||||
WHERE column_default LIKE 'nextval%'
|
||||
AND table_schema = 'public'
|
||||
""")
|
||||
sequences = cursor.fetchall()
|
||||
|
||||
for seq_name, col_name, table_name in sequences:
|
||||
if seq_name is None:
|
||||
continue
|
||||
# Get the maximum value for each sequence
|
||||
cursor.execute(f'SELECT MAX("{col_name}") FROM "{table_name}"')
|
||||
max_val = cursor.fetchone()[0]
|
||||
|
||||
if max_val is not None:
|
||||
# Update sequence to start from max_val + 1 - don't quote sequence name from pg_get_serial_sequence
|
||||
cursor.execute(f'ALTER SEQUENCE {seq_name} RESTART WITH {max_val + 1}')
|
||||
logger.info(f"Updated sequence {seq_name} to start from {max_val + 1}")
|
||||
|
||||
self.postgres_conn.commit()
|
||||
logger.info("Updated PostgreSQL sequences")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update sequences: {e}")
|
||||
self.postgres_conn.rollback()
|
||||
return False
|
||||
|
||||
def migrate_all_data(self):
|
||||
"""Migrate all data from SQLite to PostgreSQL"""
|
||||
# Define table migration order (respecting foreign key constraints)
|
||||
migration_order = [
|
||||
'company',
|
||||
'team',
|
||||
'project_category',
|
||||
'user',
|
||||
'project',
|
||||
'task',
|
||||
'sub_task',
|
||||
'time_entry',
|
||||
'work_config',
|
||||
'company_work_config',
|
||||
'user_preferences',
|
||||
'system_settings'
|
||||
]
|
||||
|
||||
for table_name in migration_order:
|
||||
if not self.migrate_table_data(table_name):
|
||||
logger.error(f"Migration failed at table: {table_name}")
|
||||
return False
|
||||
|
||||
# Update sequences after all data is migrated
|
||||
if not self.update_sequences():
|
||||
logger.error("Failed to update sequences")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def verify_migration(self):
|
||||
"""Verify that migration was successful"""
|
||||
try:
|
||||
sqlite_cursor = self.sqlite_conn.cursor()
|
||||
postgres_cursor = self.postgres_conn.cursor()
|
||||
|
||||
# Get table names from SQLite
|
||||
sqlite_cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
sqlite_tables = [row[0] for row in sqlite_cursor.fetchall()]
|
||||
|
||||
verification_results = {}
|
||||
|
||||
for table_name in sqlite_tables:
|
||||
if table_name == 'sqlite_sequence':
|
||||
continue
|
||||
|
||||
# Count rows in SQLite
|
||||
sqlite_cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
|
||||
sqlite_count = sqlite_cursor.fetchone()[0]
|
||||
|
||||
# Count rows in PostgreSQL
|
||||
postgres_cursor.execute(f'SELECT COUNT(*) FROM "{table_name}"')
|
||||
postgres_count = postgres_cursor.fetchone()[0]
|
||||
|
||||
verification_results[table_name] = {
|
||||
'sqlite_count': sqlite_count,
|
||||
'postgres_count': postgres_count,
|
||||
'match': sqlite_count == postgres_count
|
||||
}
|
||||
|
||||
if sqlite_count == postgres_count:
|
||||
logger.info(f"✓ Table {table_name}: {sqlite_count} rows migrated successfully")
|
||||
else:
|
||||
logger.error(f"✗ Table {table_name}: SQLite={sqlite_count}, PostgreSQL={postgres_count}")
|
||||
|
||||
return verification_results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Verification failed: {e}")
|
||||
return None
|
||||
|
||||
def run_migration(self, clear_existing=False):
|
||||
"""Run the complete migration process"""
|
||||
logger.info("Starting SQLite to PostgreSQL migration...")
|
||||
|
||||
# Connect to databases
|
||||
if not self.connect_databases():
|
||||
return False
|
||||
|
||||
try:
|
||||
# Check SQLite database
|
||||
if not self.check_sqlite_database():
|
||||
return False
|
||||
|
||||
# Create backup
|
||||
backup_file = self.backup_postgres()
|
||||
|
||||
# Create PostgreSQL tables
|
||||
if not self.create_postgres_tables(clear_existing=clear_existing):
|
||||
return False
|
||||
|
||||
# Migrate data
|
||||
if not self.migrate_all_data():
|
||||
return False
|
||||
|
||||
# Verify migration
|
||||
verification = self.verify_migration()
|
||||
if verification:
|
||||
logger.info("Migration verification completed")
|
||||
for table, stats in verification.items():
|
||||
if not stats['match']:
|
||||
logger.error(f"Migration verification failed for table: {table}")
|
||||
return False
|
||||
|
||||
logger.info("Migration completed successfully!")
|
||||
logger.info(f"Migration statistics: {self.migration_stats}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Migration failed: {e}")
|
||||
return False
|
||||
finally:
|
||||
self.close_connections()
|
||||
|
||||
def main():
|
||||
"""Main migration function"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Migrate SQLite to PostgreSQL')
|
||||
parser.add_argument('--clear-existing', action='store_true',
|
||||
help='Clear existing PostgreSQL data before migration')
|
||||
parser.add_argument('--sqlite-path', default=os.environ.get('SQLITE_PATH', '/data/timetrack.db'),
|
||||
help='Path to SQLite database')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get database paths from environment variables
|
||||
sqlite_path = args.sqlite_path
|
||||
postgres_url = os.environ.get('DATABASE_URL')
|
||||
|
||||
if not postgres_url:
|
||||
logger.error("DATABASE_URL environment variable not set")
|
||||
return 1
|
||||
|
||||
# Check if SQLite database exists
|
||||
if not os.path.exists(sqlite_path):
|
||||
logger.info(f"SQLite database not found at {sqlite_path}, skipping migration")
|
||||
return 0
|
||||
|
||||
# Run migration
|
||||
migration = SQLiteToPostgresMigration(sqlite_path, postgres_url)
|
||||
success = migration.run_migration(clear_existing=args.clear_existing)
|
||||
|
||||
return 0 if success else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
361
migrations/old_migrations/02_migrate_sqlite_to_postgres_fixed.py
Normal file
361
migrations/old_migrations/02_migrate_sqlite_to_postgres_fixed.py
Normal file
@@ -0,0 +1,361 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fixed SQLite to PostgreSQL Migration Script for TimeTrack
|
||||
This script properly handles empty SQLite databases and column mapping issues.
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
import psycopg2
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from psycopg2.extras import RealDictCursor
|
||||
import json
|
||||
|
||||
# Add parent directory to path to import app
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler('migration.log'),
|
||||
logging.StreamHandler()
|
||||
]
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SQLiteToPostgresMigration:
|
||||
def __init__(self, sqlite_path, postgres_url):
|
||||
self.sqlite_path = sqlite_path
|
||||
self.postgres_url = postgres_url
|
||||
self.sqlite_conn = None
|
||||
self.postgres_conn = None
|
||||
self.migration_stats = {}
|
||||
|
||||
# Column mapping for SQLite to PostgreSQL
|
||||
self.column_mapping = {
|
||||
'project': {
|
||||
# Map SQLite columns to PostgreSQL columns
|
||||
# Ensure company_id is properly mapped
|
||||
'company_id': 'company_id',
|
||||
'user_id': 'company_id' # Map user_id to company_id if needed
|
||||
}
|
||||
}
|
||||
|
||||
def connect_databases(self):
|
||||
"""Connect to both SQLite and PostgreSQL databases"""
|
||||
try:
|
||||
# Connect to SQLite
|
||||
self.sqlite_conn = sqlite3.connect(self.sqlite_path)
|
||||
self.sqlite_conn.row_factory = sqlite3.Row
|
||||
logger.info(f"Connected to SQLite database: {self.sqlite_path}")
|
||||
|
||||
# Connect to PostgreSQL
|
||||
self.postgres_conn = psycopg2.connect(self.postgres_url)
|
||||
self.postgres_conn.autocommit = False
|
||||
logger.info("Connected to PostgreSQL database")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to databases: {e}")
|
||||
return False
|
||||
|
||||
def close_connections(self):
|
||||
"""Close database connections"""
|
||||
if self.sqlite_conn:
|
||||
self.sqlite_conn.close()
|
||||
if self.postgres_conn:
|
||||
self.postgres_conn.close()
|
||||
|
||||
def check_sqlite_database(self):
|
||||
"""Check if SQLite database exists and has data"""
|
||||
if not os.path.exists(self.sqlite_path):
|
||||
logger.error(f"SQLite database not found: {self.sqlite_path}")
|
||||
return False
|
||||
|
||||
try:
|
||||
cursor = self.sqlite_conn.cursor()
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
tables = cursor.fetchall()
|
||||
|
||||
if not tables:
|
||||
logger.info("SQLite database is empty, nothing to migrate")
|
||||
return False
|
||||
|
||||
logger.info(f"Found {len(tables)} tables in SQLite database")
|
||||
for table in tables:
|
||||
logger.info(f" - {table[0]}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking SQLite database: {e}")
|
||||
return False
|
||||
|
||||
def clear_postgres_data(self):
|
||||
"""Clear existing data from PostgreSQL tables that will be migrated"""
|
||||
try:
|
||||
with self.postgres_conn.cursor() as cursor:
|
||||
# Tables to clear in reverse order of dependencies
|
||||
tables_to_clear = [
|
||||
'time_entry',
|
||||
'sub_task',
|
||||
'task',
|
||||
'project',
|
||||
'user',
|
||||
'team',
|
||||
'company',
|
||||
'work_config',
|
||||
'system_settings'
|
||||
]
|
||||
|
||||
for table in tables_to_clear:
|
||||
try:
|
||||
cursor.execute(f'DELETE FROM "{table}"')
|
||||
logger.info(f"Cleared table: {table}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not clear table {table}: {e}")
|
||||
self.postgres_conn.rollback()
|
||||
|
||||
self.postgres_conn.commit()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to clear PostgreSQL data: {e}")
|
||||
self.postgres_conn.rollback()
|
||||
return False
|
||||
|
||||
def migrate_table_data(self, table_name):
|
||||
"""Migrate data from SQLite table to PostgreSQL"""
|
||||
try:
|
||||
sqlite_cursor = self.sqlite_conn.cursor()
|
||||
postgres_cursor = self.postgres_conn.cursor()
|
||||
|
||||
# Check if table exists in SQLite
|
||||
sqlite_cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,))
|
||||
if not sqlite_cursor.fetchone():
|
||||
logger.info(f"Table {table_name} does not exist in SQLite, skipping...")
|
||||
self.migration_stats[table_name] = 0
|
||||
return True
|
||||
|
||||
# Get data from SQLite
|
||||
sqlite_cursor.execute(f"SELECT * FROM {table_name}")
|
||||
rows = sqlite_cursor.fetchall()
|
||||
|
||||
if not rows:
|
||||
logger.info(f"No data found in table: {table_name}")
|
||||
self.migration_stats[table_name] = 0
|
||||
return True
|
||||
|
||||
# Get column names from SQLite
|
||||
column_names = [description[0] for description in sqlite_cursor.description]
|
||||
logger.info(f"SQLite columns for {table_name}: {column_names}")
|
||||
|
||||
# Get PostgreSQL column names
|
||||
postgres_cursor.execute(f"""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = %s
|
||||
ORDER BY ordinal_position
|
||||
""", (table_name,))
|
||||
pg_columns = [row[0] for row in postgres_cursor.fetchall()]
|
||||
logger.info(f"PostgreSQL columns for {table_name}: {pg_columns}")
|
||||
|
||||
# For project table, ensure company_id is properly handled
|
||||
if table_name == 'project':
|
||||
# Check if company_id exists in the data
|
||||
for i, row in enumerate(rows):
|
||||
row_dict = dict(zip(column_names, row))
|
||||
if 'company_id' not in row_dict or row_dict['company_id'] is None:
|
||||
# If user_id exists, use it as company_id
|
||||
if 'user_id' in row_dict and row_dict['user_id'] is not None:
|
||||
logger.info(f"Mapping user_id {row_dict['user_id']} to company_id for project {row_dict.get('id')}")
|
||||
# Update the row data
|
||||
row_list = list(row)
|
||||
if 'company_id' in column_names:
|
||||
company_id_idx = column_names.index('company_id')
|
||||
user_id_idx = column_names.index('user_id')
|
||||
row_list[company_id_idx] = row_list[user_id_idx]
|
||||
else:
|
||||
# Add company_id column
|
||||
column_names.append('company_id')
|
||||
user_id_idx = column_names.index('user_id')
|
||||
row_list.append(row[user_id_idx])
|
||||
rows[i] = tuple(row_list)
|
||||
|
||||
# Filter columns to only those that exist in PostgreSQL
|
||||
valid_columns = [col for col in column_names if col in pg_columns]
|
||||
column_indices = [column_names.index(col) for col in valid_columns]
|
||||
|
||||
# Prepare insert statement
|
||||
placeholders = ', '.join(['%s'] * len(valid_columns))
|
||||
columns = ', '.join([f'"{col}"' for col in valid_columns])
|
||||
insert_sql = f'INSERT INTO "{table_name}" ({columns}) VALUES ({placeholders})'
|
||||
|
||||
# Convert rows to list of tuples with only valid columns
|
||||
data_rows = []
|
||||
for row in rows:
|
||||
data_row = []
|
||||
for i in column_indices:
|
||||
value = row[i]
|
||||
col_name = valid_columns[column_indices.index(i)]
|
||||
# Handle special data type conversions
|
||||
if value is None:
|
||||
data_row.append(None)
|
||||
elif isinstance(value, str) and value.startswith('{"') and value.endswith('}'):
|
||||
# Handle JSON strings
|
||||
data_row.append(value)
|
||||
elif (col_name.startswith('is_') or col_name.endswith('_enabled') or col_name in ['is_paused']) and isinstance(value, int):
|
||||
# Convert integer boolean to actual boolean for PostgreSQL
|
||||
data_row.append(bool(value))
|
||||
elif isinstance(value, str) and value == '':
|
||||
# Convert empty strings to None for PostgreSQL
|
||||
data_row.append(None)
|
||||
else:
|
||||
data_row.append(value)
|
||||
data_rows.append(tuple(data_row))
|
||||
|
||||
# Insert data one by one to better handle errors
|
||||
successful_inserts = 0
|
||||
for i, row in enumerate(data_rows):
|
||||
try:
|
||||
postgres_cursor.execute(insert_sql, row)
|
||||
self.postgres_conn.commit()
|
||||
successful_inserts += 1
|
||||
except Exception as row_error:
|
||||
logger.error(f"Error inserting row {i} in table {table_name}: {row_error}")
|
||||
logger.error(f"Problematic row data: {row}")
|
||||
logger.error(f"Columns: {valid_columns}")
|
||||
self.postgres_conn.rollback()
|
||||
|
||||
logger.info(f"Migrated {successful_inserts}/{len(rows)} rows from table: {table_name}")
|
||||
self.migration_stats[table_name] = successful_inserts
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to migrate table {table_name}: {e}")
|
||||
self.postgres_conn.rollback()
|
||||
return False
|
||||
|
||||
def update_sequences(self):
|
||||
"""Update PostgreSQL sequences after data migration"""
|
||||
try:
|
||||
with self.postgres_conn.cursor() as cursor:
|
||||
# Get all sequences
|
||||
cursor.execute("""
|
||||
SELECT
|
||||
pg_get_serial_sequence(table_name, column_name) as sequence_name,
|
||||
column_name,
|
||||
table_name
|
||||
FROM information_schema.columns
|
||||
WHERE column_default LIKE 'nextval%'
|
||||
AND table_schema = 'public'
|
||||
""")
|
||||
sequences = cursor.fetchall()
|
||||
|
||||
for seq_name, col_name, table_name in sequences:
|
||||
if seq_name is None:
|
||||
continue
|
||||
# Get the maximum value for each sequence
|
||||
cursor.execute(f'SELECT MAX("{col_name}") FROM "{table_name}"')
|
||||
max_val = cursor.fetchone()[0]
|
||||
|
||||
if max_val is not None:
|
||||
# Update sequence to start from max_val + 1
|
||||
cursor.execute(f'ALTER SEQUENCE {seq_name} RESTART WITH {max_val + 1}')
|
||||
logger.info(f"Updated sequence {seq_name} to start from {max_val + 1}")
|
||||
|
||||
self.postgres_conn.commit()
|
||||
logger.info("Updated PostgreSQL sequences")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update sequences: {e}")
|
||||
self.postgres_conn.rollback()
|
||||
return False
|
||||
|
||||
def run_migration(self, clear_existing=False):
|
||||
"""Run the complete migration process"""
|
||||
logger.info("Starting SQLite to PostgreSQL migration...")
|
||||
|
||||
# Connect to databases
|
||||
if not self.connect_databases():
|
||||
return False
|
||||
|
||||
try:
|
||||
# Check SQLite database
|
||||
if not self.check_sqlite_database():
|
||||
logger.info("No data to migrate from SQLite")
|
||||
return True
|
||||
|
||||
# Clear existing PostgreSQL data if requested
|
||||
if clear_existing:
|
||||
if not self.clear_postgres_data():
|
||||
logger.warning("Failed to clear some PostgreSQL data, continuing anyway...")
|
||||
|
||||
# Define table migration order (respecting foreign key constraints)
|
||||
migration_order = [
|
||||
'company',
|
||||
'team',
|
||||
'project_category',
|
||||
'user',
|
||||
'project',
|
||||
'task',
|
||||
'sub_task',
|
||||
'time_entry',
|
||||
'work_config',
|
||||
'company_work_config',
|
||||
'user_preferences',
|
||||
'system_settings'
|
||||
]
|
||||
|
||||
# Migrate data
|
||||
for table_name in migration_order:
|
||||
if not self.migrate_table_data(table_name):
|
||||
logger.error(f"Migration failed at table: {table_name}")
|
||||
|
||||
# Update sequences after all data is migrated
|
||||
if not self.update_sequences():
|
||||
logger.error("Failed to update sequences")
|
||||
|
||||
logger.info("Migration completed!")
|
||||
logger.info(f"Migration statistics: {self.migration_stats}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Migration failed: {e}")
|
||||
return False
|
||||
finally:
|
||||
self.close_connections()
|
||||
|
||||
def main():
|
||||
"""Main migration function"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Migrate SQLite to PostgreSQL')
|
||||
parser.add_argument('--clear-existing', action='store_true',
|
||||
help='Clear existing PostgreSQL data before migration')
|
||||
parser.add_argument('--sqlite-path', default=os.environ.get('SQLITE_PATH', '/data/timetrack.db'),
|
||||
help='Path to SQLite database')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get database paths from environment variables
|
||||
sqlite_path = args.sqlite_path
|
||||
postgres_url = os.environ.get('DATABASE_URL')
|
||||
|
||||
if not postgres_url:
|
||||
logger.error("DATABASE_URL environment variable not set")
|
||||
return 1
|
||||
|
||||
# Check if SQLite database exists
|
||||
if not os.path.exists(sqlite_path):
|
||||
logger.info(f"SQLite database not found at {sqlite_path}, skipping migration")
|
||||
return 0
|
||||
|
||||
# Run migration
|
||||
migration = SQLiteToPostgresMigration(sqlite_path, postgres_url)
|
||||
success = migration.run_migration(clear_existing=args.clear_existing)
|
||||
|
||||
return 0 if success else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
104
migrations/old_migrations/03_add_dashboard_columns.py
Normal file
104
migrations/old_migrations/03_add_dashboard_columns.py
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add missing columns to user_dashboard table
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def add_missing_columns():
|
||||
"""Add missing columns to user_dashboard table"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
# Check if columns exist
|
||||
cur.execute("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'user_dashboard'
|
||||
AND column_name IN ('layout', 'is_locked', 'created_at', 'updated_at',
|
||||
'name', 'is_default', 'layout_config', 'grid_columns',
|
||||
'theme', 'auto_refresh')
|
||||
""")
|
||||
existing_columns = [row[0] for row in cur.fetchall()]
|
||||
|
||||
# Add missing columns
|
||||
if 'name' not in existing_columns:
|
||||
print("Adding 'name' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN name VARCHAR(100) DEFAULT 'My Dashboard'")
|
||||
print("Added 'name' column")
|
||||
|
||||
if 'is_default' not in existing_columns:
|
||||
print("Adding 'is_default' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN is_default BOOLEAN DEFAULT TRUE")
|
||||
print("Added 'is_default' column")
|
||||
|
||||
if 'layout_config' not in existing_columns:
|
||||
print("Adding 'layout_config' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN layout_config TEXT")
|
||||
print("Added 'layout_config' column")
|
||||
|
||||
if 'grid_columns' not in existing_columns:
|
||||
print("Adding 'grid_columns' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN grid_columns INTEGER DEFAULT 6")
|
||||
print("Added 'grid_columns' column")
|
||||
|
||||
if 'theme' not in existing_columns:
|
||||
print("Adding 'theme' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN theme VARCHAR(20) DEFAULT 'light'")
|
||||
print("Added 'theme' column")
|
||||
|
||||
if 'auto_refresh' not in existing_columns:
|
||||
print("Adding 'auto_refresh' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN auto_refresh INTEGER DEFAULT 300")
|
||||
print("Added 'auto_refresh' column")
|
||||
|
||||
if 'layout' not in existing_columns:
|
||||
print("Adding 'layout' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN layout JSON")
|
||||
print("Added 'layout' column")
|
||||
|
||||
if 'is_locked' not in existing_columns:
|
||||
print("Adding 'is_locked' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN is_locked BOOLEAN DEFAULT FALSE")
|
||||
print("Added 'is_locked' column")
|
||||
|
||||
if 'created_at' not in existing_columns:
|
||||
print("Adding 'created_at' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP")
|
||||
print("Added 'created_at' column")
|
||||
|
||||
if 'updated_at' not in existing_columns:
|
||||
print("Adding 'updated_at' column to user_dashboard table...")
|
||||
cur.execute("ALTER TABLE user_dashboard ADD COLUMN updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP")
|
||||
print("Added 'updated_at' column")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("Dashboard columns migration completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_missing_columns()
|
||||
159
migrations/old_migrations/04_add_user_preferences_columns.py
Executable file
159
migrations/old_migrations/04_add_user_preferences_columns.py
Executable file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add missing columns to user_preferences table
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def add_missing_columns():
|
||||
"""Add missing columns to user_preferences table"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
# Check if table exists
|
||||
cur.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_name = 'user_preferences'
|
||||
)
|
||||
""")
|
||||
table_exists = cur.fetchone()[0]
|
||||
|
||||
if not table_exists:
|
||||
print("user_preferences table does not exist. Creating it...")
|
||||
cur.execute("""
|
||||
CREATE TABLE user_preferences (
|
||||
id SERIAL PRIMARY KEY,
|
||||
user_id INTEGER UNIQUE NOT NULL REFERENCES "user"(id),
|
||||
theme VARCHAR(20) DEFAULT 'light',
|
||||
language VARCHAR(10) DEFAULT 'en',
|
||||
timezone VARCHAR(50) DEFAULT 'UTC',
|
||||
date_format VARCHAR(20) DEFAULT 'YYYY-MM-DD',
|
||||
time_format VARCHAR(10) DEFAULT '24h',
|
||||
email_notifications BOOLEAN DEFAULT TRUE,
|
||||
email_daily_summary BOOLEAN DEFAULT FALSE,
|
||||
email_weekly_summary BOOLEAN DEFAULT TRUE,
|
||||
default_project_id INTEGER REFERENCES project(id),
|
||||
timer_reminder_enabled BOOLEAN DEFAULT TRUE,
|
||||
timer_reminder_interval INTEGER DEFAULT 60,
|
||||
dashboard_layout JSON,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
""")
|
||||
print("Created user_preferences table")
|
||||
else:
|
||||
# Check which columns exist
|
||||
cur.execute("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'user_preferences'
|
||||
AND column_name IN ('theme', 'language', 'timezone', 'date_format',
|
||||
'time_format', 'email_notifications', 'email_daily_summary',
|
||||
'email_weekly_summary', 'default_project_id',
|
||||
'timer_reminder_enabled', 'timer_reminder_interval',
|
||||
'dashboard_layout', 'created_at', 'updated_at')
|
||||
""")
|
||||
existing_columns = [row[0] for row in cur.fetchall()]
|
||||
|
||||
# Add missing columns
|
||||
if 'theme' not in existing_columns:
|
||||
print("Adding 'theme' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN theme VARCHAR(20) DEFAULT 'light'")
|
||||
print("Added 'theme' column")
|
||||
|
||||
if 'language' not in existing_columns:
|
||||
print("Adding 'language' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN language VARCHAR(10) DEFAULT 'en'")
|
||||
print("Added 'language' column")
|
||||
|
||||
if 'timezone' not in existing_columns:
|
||||
print("Adding 'timezone' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN timezone VARCHAR(50) DEFAULT 'UTC'")
|
||||
print("Added 'timezone' column")
|
||||
|
||||
if 'date_format' not in existing_columns:
|
||||
print("Adding 'date_format' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN date_format VARCHAR(20) DEFAULT 'YYYY-MM-DD'")
|
||||
print("Added 'date_format' column")
|
||||
|
||||
if 'time_format' not in existing_columns:
|
||||
print("Adding 'time_format' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN time_format VARCHAR(10) DEFAULT '24h'")
|
||||
print("Added 'time_format' column")
|
||||
|
||||
if 'email_notifications' not in existing_columns:
|
||||
print("Adding 'email_notifications' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN email_notifications BOOLEAN DEFAULT TRUE")
|
||||
print("Added 'email_notifications' column")
|
||||
|
||||
if 'email_daily_summary' not in existing_columns:
|
||||
print("Adding 'email_daily_summary' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN email_daily_summary BOOLEAN DEFAULT FALSE")
|
||||
print("Added 'email_daily_summary' column")
|
||||
|
||||
if 'email_weekly_summary' not in existing_columns:
|
||||
print("Adding 'email_weekly_summary' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN email_weekly_summary BOOLEAN DEFAULT TRUE")
|
||||
print("Added 'email_weekly_summary' column")
|
||||
|
||||
if 'default_project_id' not in existing_columns:
|
||||
print("Adding 'default_project_id' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN default_project_id INTEGER REFERENCES project(id)")
|
||||
print("Added 'default_project_id' column")
|
||||
|
||||
if 'timer_reminder_enabled' not in existing_columns:
|
||||
print("Adding 'timer_reminder_enabled' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN timer_reminder_enabled BOOLEAN DEFAULT TRUE")
|
||||
print("Added 'timer_reminder_enabled' column")
|
||||
|
||||
if 'timer_reminder_interval' not in existing_columns:
|
||||
print("Adding 'timer_reminder_interval' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN timer_reminder_interval INTEGER DEFAULT 60")
|
||||
print("Added 'timer_reminder_interval' column")
|
||||
|
||||
if 'dashboard_layout' not in existing_columns:
|
||||
print("Adding 'dashboard_layout' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN dashboard_layout JSON")
|
||||
print("Added 'dashboard_layout' column")
|
||||
|
||||
if 'created_at' not in existing_columns:
|
||||
print("Adding 'created_at' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP")
|
||||
print("Added 'created_at' column")
|
||||
|
||||
if 'updated_at' not in existing_columns:
|
||||
print("Adding 'updated_at' column to user_preferences table...")
|
||||
cur.execute("ALTER TABLE user_preferences ADD COLUMN updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP")
|
||||
print("Added 'updated_at' column")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("User preferences migration completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_missing_columns()
|
||||
244
migrations/old_migrations/05_fix_task_status_enum.py
Executable file
244
migrations/old_migrations/05_fix_task_status_enum.py
Executable file
@@ -0,0 +1,244 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix task status enum in the database to match Python enum
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def fix_task_status_enum():
|
||||
"""Update task status enum in database"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
print("Starting task status enum migration...")
|
||||
|
||||
# First check if the enum already has the correct values
|
||||
cur.execute("""
|
||||
SELECT enumlabel
|
||||
FROM pg_enum
|
||||
WHERE enumtypid = (SELECT oid FROM pg_type WHERE typname = 'taskstatus')
|
||||
ORDER BY enumsortorder
|
||||
""")
|
||||
current_values = [row[0] for row in cur.fetchall()]
|
||||
print(f"Current enum values: {current_values}")
|
||||
|
||||
# Check if migration is needed
|
||||
expected_values = ['TODO', 'IN_PROGRESS', 'IN_REVIEW', 'DONE', 'CANCELLED']
|
||||
if all(val in current_values for val in expected_values):
|
||||
print("Task status enum already has correct values. Skipping migration.")
|
||||
return
|
||||
|
||||
# Check if task table exists and has a status column
|
||||
cur.execute("""
|
||||
SELECT column_name, data_type
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'task' AND column_name = 'status'
|
||||
""")
|
||||
if not cur.fetchone():
|
||||
print("No task table or status column found. Skipping migration.")
|
||||
return
|
||||
|
||||
# Check if temporary column already exists
|
||||
cur.execute("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'task' AND column_name = 'status_temp'
|
||||
""")
|
||||
temp_exists = cur.fetchone() is not None
|
||||
|
||||
if not temp_exists:
|
||||
# First, we need to create a temporary column to hold the data
|
||||
print("1. Creating temporary column...")
|
||||
cur.execute("ALTER TABLE task ADD COLUMN status_temp VARCHAR(50)")
|
||||
cur.execute("ALTER TABLE sub_task ADD COLUMN status_temp VARCHAR(50)")
|
||||
else:
|
||||
print("1. Temporary column already exists...")
|
||||
|
||||
# Copy current status values to temp column with mapping
|
||||
print("2. Copying and mapping status values...")
|
||||
# First check what values actually exist in the database
|
||||
cur.execute("SELECT DISTINCT status::text FROM task WHERE status IS NOT NULL")
|
||||
existing_statuses = [row[0] for row in cur.fetchall()]
|
||||
print(f" Existing status values in task table: {existing_statuses}")
|
||||
|
||||
# If no statuses exist, skip the mapping
|
||||
if not existing_statuses:
|
||||
print(" No existing status values to migrate")
|
||||
else:
|
||||
# Build dynamic mapping based on what exists
|
||||
mapping_sql = "UPDATE task SET status_temp = CASE "
|
||||
has_cases = False
|
||||
if 'NOT_STARTED' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'NOT_STARTED' THEN 'TODO' "
|
||||
has_cases = True
|
||||
if 'TODO' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'TODO' THEN 'TODO' "
|
||||
has_cases = True
|
||||
if 'IN_PROGRESS' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'IN_PROGRESS' THEN 'IN_PROGRESS' "
|
||||
has_cases = True
|
||||
if 'ON_HOLD' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'ON_HOLD' THEN 'IN_REVIEW' "
|
||||
has_cases = True
|
||||
if 'IN_REVIEW' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'IN_REVIEW' THEN 'IN_REVIEW' "
|
||||
has_cases = True
|
||||
if 'COMPLETED' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'COMPLETED' THEN 'DONE' "
|
||||
has_cases = True
|
||||
if 'DONE' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'DONE' THEN 'DONE' "
|
||||
has_cases = True
|
||||
if 'CANCELLED' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'CANCELLED' THEN 'CANCELLED' "
|
||||
has_cases = True
|
||||
if 'ARCHIVED' in existing_statuses:
|
||||
mapping_sql += "WHEN status::text = 'ARCHIVED' THEN 'CANCELLED' "
|
||||
has_cases = True
|
||||
|
||||
if has_cases:
|
||||
mapping_sql += "ELSE status::text END WHERE status IS NOT NULL"
|
||||
cur.execute(mapping_sql)
|
||||
print(f" Updated {cur.rowcount} tasks")
|
||||
|
||||
# Check sub_task table
|
||||
cur.execute("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'sub_task' AND column_name = 'status'
|
||||
""")
|
||||
if cur.fetchone():
|
||||
# Get existing subtask statuses
|
||||
cur.execute("SELECT DISTINCT status::text FROM sub_task WHERE status IS NOT NULL")
|
||||
existing_subtask_statuses = [row[0] for row in cur.fetchall()]
|
||||
print(f" Existing status values in sub_task table: {existing_subtask_statuses}")
|
||||
|
||||
# If no statuses exist, skip the mapping
|
||||
if not existing_subtask_statuses:
|
||||
print(" No existing subtask status values to migrate")
|
||||
else:
|
||||
# Build dynamic mapping for subtasks
|
||||
mapping_sql = "UPDATE sub_task SET status_temp = CASE "
|
||||
has_cases = False
|
||||
if 'NOT_STARTED' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'NOT_STARTED' THEN 'TODO' "
|
||||
has_cases = True
|
||||
if 'TODO' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'TODO' THEN 'TODO' "
|
||||
has_cases = True
|
||||
if 'IN_PROGRESS' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'IN_PROGRESS' THEN 'IN_PROGRESS' "
|
||||
has_cases = True
|
||||
if 'ON_HOLD' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'ON_HOLD' THEN 'IN_REVIEW' "
|
||||
has_cases = True
|
||||
if 'IN_REVIEW' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'IN_REVIEW' THEN 'IN_REVIEW' "
|
||||
has_cases = True
|
||||
if 'COMPLETED' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'COMPLETED' THEN 'DONE' "
|
||||
has_cases = True
|
||||
if 'DONE' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'DONE' THEN 'DONE' "
|
||||
has_cases = True
|
||||
if 'CANCELLED' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'CANCELLED' THEN 'CANCELLED' "
|
||||
has_cases = True
|
||||
if 'ARCHIVED' in existing_subtask_statuses:
|
||||
mapping_sql += "WHEN status::text = 'ARCHIVED' THEN 'CANCELLED' "
|
||||
has_cases = True
|
||||
|
||||
if has_cases:
|
||||
mapping_sql += "ELSE status::text END WHERE status IS NOT NULL"
|
||||
cur.execute(mapping_sql)
|
||||
print(f" Updated {cur.rowcount} subtasks")
|
||||
|
||||
# Drop the old status columns
|
||||
print("3. Dropping old status columns...")
|
||||
cur.execute("ALTER TABLE task DROP COLUMN status")
|
||||
cur.execute("ALTER TABLE sub_task DROP COLUMN status")
|
||||
|
||||
# Drop the old enum type
|
||||
print("4. Dropping old enum type...")
|
||||
cur.execute("DROP TYPE IF EXISTS taskstatus")
|
||||
|
||||
# Create new enum type with correct values
|
||||
print("5. Creating new enum type...")
|
||||
cur.execute("""
|
||||
CREATE TYPE taskstatus AS ENUM (
|
||||
'TODO',
|
||||
'IN_PROGRESS',
|
||||
'IN_REVIEW',
|
||||
'DONE',
|
||||
'CANCELLED'
|
||||
)
|
||||
""")
|
||||
|
||||
# Add new status columns with correct enum type
|
||||
print("6. Adding new status columns...")
|
||||
cur.execute("ALTER TABLE task ADD COLUMN status taskstatus")
|
||||
cur.execute("ALTER TABLE sub_task ADD COLUMN status taskstatus")
|
||||
|
||||
# Copy data from temp columns to new status columns
|
||||
print("7. Copying data to new columns...")
|
||||
cur.execute("UPDATE task SET status = status_temp::taskstatus")
|
||||
cur.execute("UPDATE sub_task SET status = status_temp::taskstatus")
|
||||
|
||||
# Drop temporary columns
|
||||
print("8. Dropping temporary columns...")
|
||||
cur.execute("ALTER TABLE task DROP COLUMN status_temp")
|
||||
cur.execute("ALTER TABLE sub_task DROP COLUMN status_temp")
|
||||
|
||||
# Add NOT NULL constraint
|
||||
print("9. Adding NOT NULL constraints...")
|
||||
cur.execute("ALTER TABLE task ALTER COLUMN status SET NOT NULL")
|
||||
cur.execute("ALTER TABLE sub_task ALTER COLUMN status SET NOT NULL")
|
||||
|
||||
# Set default value
|
||||
print("10. Setting default values...")
|
||||
cur.execute("ALTER TABLE task ALTER COLUMN status SET DEFAULT 'TODO'")
|
||||
cur.execute("ALTER TABLE sub_task ALTER COLUMN status SET DEFAULT 'TODO'")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("\nTask status enum migration completed successfully!")
|
||||
|
||||
# Verify the new enum values
|
||||
print("\nVerifying new enum values:")
|
||||
cur.execute("""
|
||||
SELECT enumlabel
|
||||
FROM pg_enum
|
||||
WHERE enumtypid = (
|
||||
SELECT oid FROM pg_type WHERE typname = 'taskstatus'
|
||||
)
|
||||
ORDER BY enumsortorder
|
||||
""")
|
||||
for row in cur.fetchall():
|
||||
print(f" - {row[0]}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_task_status_enum()
|
||||
77
migrations/old_migrations/06_add_archived_status.py
Executable file
77
migrations/old_migrations/06_add_archived_status.py
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add ARCHIVED status back to task status enum
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def add_archived_status():
|
||||
"""Add ARCHIVED status to task status enum"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
print("Adding ARCHIVED status to taskstatus enum...")
|
||||
|
||||
# Check if ARCHIVED already exists
|
||||
cur.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_enum
|
||||
WHERE enumtypid = (SELECT oid FROM pg_type WHERE typname = 'taskstatus')
|
||||
AND enumlabel = 'ARCHIVED'
|
||||
)
|
||||
""")
|
||||
|
||||
if cur.fetchone()[0]:
|
||||
print("ARCHIVED status already exists in enum")
|
||||
return
|
||||
|
||||
# Add ARCHIVED to the enum
|
||||
cur.execute("""
|
||||
ALTER TYPE taskstatus ADD VALUE IF NOT EXISTS 'ARCHIVED' AFTER 'CANCELLED'
|
||||
""")
|
||||
|
||||
print("Successfully added ARCHIVED status to enum")
|
||||
|
||||
# Verify the enum values
|
||||
print("\nCurrent taskstatus enum values:")
|
||||
cur.execute("""
|
||||
SELECT enumlabel
|
||||
FROM pg_enum
|
||||
WHERE enumtypid = (
|
||||
SELECT oid FROM pg_type WHERE typname = 'taskstatus'
|
||||
)
|
||||
ORDER BY enumsortorder
|
||||
""")
|
||||
for row in cur.fetchall():
|
||||
print(f" - {row[0]}")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("\nMigration completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_archived_status()
|
||||
141
migrations/old_migrations/07_fix_company_work_config_columns.py
Executable file
141
migrations/old_migrations/07_fix_company_work_config_columns.py
Executable file
@@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix company_work_config table columns to match model definition
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def fix_company_work_config_columns():
|
||||
"""Rename and add columns to match the new model definition"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
# Check which columns exist
|
||||
cur.execute("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'company_work_config'
|
||||
""")
|
||||
existing_columns = [row[0] for row in cur.fetchall()]
|
||||
print(f"Existing columns: {existing_columns}")
|
||||
|
||||
# Rename columns if they exist with old names
|
||||
if 'work_hours_per_day' in existing_columns and 'standard_hours_per_day' not in existing_columns:
|
||||
print("Renaming work_hours_per_day to standard_hours_per_day...")
|
||||
cur.execute("ALTER TABLE company_work_config RENAME COLUMN work_hours_per_day TO standard_hours_per_day")
|
||||
|
||||
# Add missing columns
|
||||
if 'standard_hours_per_day' not in existing_columns and 'work_hours_per_day' not in existing_columns:
|
||||
print("Adding standard_hours_per_day column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN standard_hours_per_day FLOAT DEFAULT 8.0")
|
||||
|
||||
if 'standard_hours_per_week' not in existing_columns:
|
||||
print("Adding standard_hours_per_week column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN standard_hours_per_week FLOAT DEFAULT 40.0")
|
||||
|
||||
# Rename region to work_region if needed
|
||||
if 'region' in existing_columns and 'work_region' not in existing_columns:
|
||||
print("Renaming region to work_region...")
|
||||
cur.execute("ALTER TABLE company_work_config RENAME COLUMN region TO work_region")
|
||||
elif 'work_region' not in existing_columns:
|
||||
print("Adding work_region column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN work_region VARCHAR(50) DEFAULT 'OTHER'")
|
||||
|
||||
# Add new columns that don't exist
|
||||
if 'overtime_enabled' not in existing_columns:
|
||||
print("Adding overtime_enabled column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN overtime_enabled BOOLEAN DEFAULT TRUE")
|
||||
|
||||
if 'overtime_rate' not in existing_columns:
|
||||
print("Adding overtime_rate column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN overtime_rate FLOAT DEFAULT 1.5")
|
||||
|
||||
if 'double_time_enabled' not in existing_columns:
|
||||
print("Adding double_time_enabled column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN double_time_enabled BOOLEAN DEFAULT FALSE")
|
||||
|
||||
if 'double_time_threshold' not in existing_columns:
|
||||
print("Adding double_time_threshold column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN double_time_threshold FLOAT DEFAULT 12.0")
|
||||
|
||||
if 'double_time_rate' not in existing_columns:
|
||||
print("Adding double_time_rate column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN double_time_rate FLOAT DEFAULT 2.0")
|
||||
|
||||
if 'require_breaks' not in existing_columns:
|
||||
print("Adding require_breaks column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN require_breaks BOOLEAN DEFAULT TRUE")
|
||||
|
||||
if 'break_duration_minutes' not in existing_columns:
|
||||
# Rename mandatory_break_minutes if it exists
|
||||
if 'mandatory_break_minutes' in existing_columns:
|
||||
print("Renaming mandatory_break_minutes to break_duration_minutes...")
|
||||
cur.execute("ALTER TABLE company_work_config RENAME COLUMN mandatory_break_minutes TO break_duration_minutes")
|
||||
else:
|
||||
print("Adding break_duration_minutes column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN break_duration_minutes INTEGER DEFAULT 30")
|
||||
|
||||
if 'break_after_hours' not in existing_columns:
|
||||
# Rename break_threshold_hours if it exists
|
||||
if 'break_threshold_hours' in existing_columns:
|
||||
print("Renaming break_threshold_hours to break_after_hours...")
|
||||
cur.execute("ALTER TABLE company_work_config RENAME COLUMN break_threshold_hours TO break_after_hours")
|
||||
else:
|
||||
print("Adding break_after_hours column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN break_after_hours FLOAT DEFAULT 6.0")
|
||||
|
||||
if 'weekly_overtime_threshold' not in existing_columns:
|
||||
print("Adding weekly_overtime_threshold column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN weekly_overtime_threshold FLOAT DEFAULT 40.0")
|
||||
|
||||
if 'weekly_overtime_rate' not in existing_columns:
|
||||
print("Adding weekly_overtime_rate column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN weekly_overtime_rate FLOAT DEFAULT 1.5")
|
||||
|
||||
# Drop columns that are no longer needed
|
||||
if 'region_name' in existing_columns:
|
||||
print("Dropping region_name column...")
|
||||
cur.execute("ALTER TABLE company_work_config DROP COLUMN region_name")
|
||||
|
||||
if 'additional_break_minutes' in existing_columns:
|
||||
print("Dropping additional_break_minutes column...")
|
||||
cur.execute("ALTER TABLE company_work_config DROP COLUMN additional_break_minutes")
|
||||
|
||||
if 'additional_break_threshold_hours' in existing_columns:
|
||||
print("Dropping additional_break_threshold_hours column...")
|
||||
cur.execute("ALTER TABLE company_work_config DROP COLUMN additional_break_threshold_hours")
|
||||
|
||||
if 'created_by_id' in existing_columns:
|
||||
print("Dropping created_by_id column...")
|
||||
cur.execute("ALTER TABLE company_work_config DROP COLUMN created_by_id")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("\nCompany work config migration completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_company_work_config_columns()
|
||||
145
migrations/old_migrations/08_fix_work_region_enum.py
Executable file
145
migrations/old_migrations/08_fix_work_region_enum.py
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix work region enum values in the database
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def fix_work_region_enum():
|
||||
"""Update work region enum values in database"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
print("Starting work region enum migration...")
|
||||
|
||||
# First check if work_region column is using enum type
|
||||
cur.execute("""
|
||||
SELECT data_type
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'company_work_config'
|
||||
AND column_name = 'work_region'
|
||||
""")
|
||||
data_type = cur.fetchone()
|
||||
|
||||
if data_type and data_type[0] == 'USER-DEFINED':
|
||||
# It's an enum, we need to update it
|
||||
print("work_region is an enum type, migrating...")
|
||||
|
||||
# Create temporary column
|
||||
print("1. Creating temporary column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN work_region_temp VARCHAR(50)")
|
||||
|
||||
# Copy and map values
|
||||
print("2. Copying and mapping values...")
|
||||
cur.execute("""
|
||||
UPDATE company_work_config SET work_region_temp = CASE
|
||||
WHEN work_region::text = 'GERMANY' THEN 'EU'
|
||||
WHEN work_region::text = 'DE' THEN 'EU'
|
||||
WHEN work_region::text = 'UNITED_STATES' THEN 'USA'
|
||||
WHEN work_region::text = 'US' THEN 'USA'
|
||||
WHEN work_region::text = 'UNITED_KINGDOM' THEN 'UK'
|
||||
WHEN work_region::text = 'GB' THEN 'UK'
|
||||
WHEN work_region::text = 'FRANCE' THEN 'EU'
|
||||
WHEN work_region::text = 'FR' THEN 'EU'
|
||||
WHEN work_region::text = 'EUROPEAN_UNION' THEN 'EU'
|
||||
WHEN work_region::text = 'CUSTOM' THEN 'OTHER'
|
||||
ELSE COALESCE(work_region::text, 'OTHER')
|
||||
END
|
||||
""")
|
||||
print(f" Updated {cur.rowcount} rows")
|
||||
|
||||
# Drop old column
|
||||
print("3. Dropping old work_region column...")
|
||||
cur.execute("ALTER TABLE company_work_config DROP COLUMN work_region")
|
||||
|
||||
# Check if enum type exists and drop it
|
||||
cur.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_type WHERE typname = 'workregion'
|
||||
)
|
||||
""")
|
||||
if cur.fetchone()[0]:
|
||||
print("4. Dropping old workregion enum type...")
|
||||
cur.execute("DROP TYPE IF EXISTS workregion CASCADE")
|
||||
|
||||
# Create new enum type
|
||||
print("5. Creating new workregion enum type...")
|
||||
cur.execute("""
|
||||
CREATE TYPE workregion AS ENUM (
|
||||
'USA',
|
||||
'CANADA',
|
||||
'UK',
|
||||
'EU',
|
||||
'AUSTRALIA',
|
||||
'OTHER'
|
||||
)
|
||||
""")
|
||||
|
||||
# Add new column with enum type
|
||||
print("6. Adding new work_region column...")
|
||||
cur.execute("ALTER TABLE company_work_config ADD COLUMN work_region workregion DEFAULT 'OTHER'")
|
||||
|
||||
# Copy data back
|
||||
print("7. Copying data to new column...")
|
||||
cur.execute("UPDATE company_work_config SET work_region = work_region_temp::workregion")
|
||||
|
||||
# Drop temporary column
|
||||
print("8. Dropping temporary column...")
|
||||
cur.execute("ALTER TABLE company_work_config DROP COLUMN work_region_temp")
|
||||
|
||||
else:
|
||||
# It's already a varchar, just update the values
|
||||
print("work_region is already a varchar, updating values...")
|
||||
cur.execute("""
|
||||
UPDATE company_work_config SET work_region = CASE
|
||||
WHEN work_region = 'GERMANY' THEN 'EU'
|
||||
WHEN work_region = 'DE' THEN 'EU'
|
||||
WHEN work_region = 'UNITED_STATES' THEN 'USA'
|
||||
WHEN work_region = 'US' THEN 'USA'
|
||||
WHEN work_region = 'UNITED_KINGDOM' THEN 'UK'
|
||||
WHEN work_region = 'GB' THEN 'UK'
|
||||
WHEN work_region = 'FRANCE' THEN 'EU'
|
||||
WHEN work_region = 'FR' THEN 'EU'
|
||||
WHEN work_region = 'EUROPEAN_UNION' THEN 'EU'
|
||||
WHEN work_region = 'CUSTOM' THEN 'OTHER'
|
||||
ELSE COALESCE(work_region, 'OTHER')
|
||||
END
|
||||
""")
|
||||
print(f"Updated {cur.rowcount} rows")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("\nWork region enum migration completed successfully!")
|
||||
|
||||
# Verify the results
|
||||
print("\nCurrent work_region values in database:")
|
||||
cur.execute("SELECT DISTINCT work_region FROM company_work_config ORDER BY work_region")
|
||||
for row in cur.fetchall():
|
||||
print(f" - {row[0]}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_work_region_enum()
|
||||
78
migrations/old_migrations/09_add_germany_to_workregion.py
Executable file
78
migrations/old_migrations/09_add_germany_to_workregion.py
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add GERMANY back to work region enum
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def add_germany_to_workregion():
|
||||
"""Add GERMANY to work region enum"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
print("Adding GERMANY to workregion enum...")
|
||||
|
||||
# Check if GERMANY already exists
|
||||
cur.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_enum
|
||||
WHERE enumtypid = (SELECT oid FROM pg_type WHERE typname = 'workregion')
|
||||
AND enumlabel = 'GERMANY'
|
||||
)
|
||||
""")
|
||||
|
||||
if cur.fetchone()[0]:
|
||||
print("GERMANY already exists in enum")
|
||||
return
|
||||
|
||||
# Add GERMANY to the enum after UK
|
||||
cur.execute("""
|
||||
ALTER TYPE workregion ADD VALUE IF NOT EXISTS 'GERMANY' AFTER 'UK'
|
||||
""")
|
||||
|
||||
print("Successfully added GERMANY to enum")
|
||||
|
||||
# Update any EU records that should be Germany based on other criteria
|
||||
# For now, we'll leave existing EU records as is, but new records can choose Germany
|
||||
|
||||
# Verify the enum values
|
||||
print("\nCurrent workregion enum values:")
|
||||
cur.execute("""
|
||||
SELECT enumlabel
|
||||
FROM pg_enum
|
||||
WHERE enumtypid = (SELECT oid FROM pg_type WHERE typname = 'workregion')
|
||||
ORDER BY enumsortorder
|
||||
""")
|
||||
for row in cur.fetchall():
|
||||
print(f" - {row[0]}")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("\nMigration completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_germany_to_workregion()
|
||||
108
migrations/old_migrations/10_add_company_settings_columns.py
Executable file
108
migrations/old_migrations/10_add_company_settings_columns.py
Executable file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add missing columns to company_settings table
|
||||
"""
|
||||
|
||||
import os
|
||||
import psycopg2
|
||||
from psycopg2 import sql
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Get database URL from environment
|
||||
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://timetrack:timetrack123@localhost:5432/timetrack')
|
||||
|
||||
def add_missing_columns():
|
||||
"""Add missing columns to company_settings table"""
|
||||
# Parse database URL
|
||||
parsed = urlparse(DATABASE_URL)
|
||||
|
||||
# Connect to database
|
||||
conn = psycopg2.connect(
|
||||
host=parsed.hostname,
|
||||
port=parsed.port or 5432,
|
||||
user=parsed.username,
|
||||
password=parsed.password,
|
||||
database=parsed.path[1:] # Remove leading slash
|
||||
)
|
||||
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
# Check if table exists
|
||||
cur.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT FROM information_schema.tables
|
||||
WHERE table_name = 'company_settings'
|
||||
)
|
||||
""")
|
||||
table_exists = cur.fetchone()[0]
|
||||
|
||||
if not table_exists:
|
||||
print("company_settings table does not exist. Creating it...")
|
||||
cur.execute("""
|
||||
CREATE TABLE company_settings (
|
||||
id SERIAL PRIMARY KEY,
|
||||
company_id INTEGER UNIQUE NOT NULL REFERENCES company(id),
|
||||
work_week_start INTEGER DEFAULT 1,
|
||||
work_days VARCHAR(20) DEFAULT '1,2,3,4,5',
|
||||
allow_overlapping_entries BOOLEAN DEFAULT FALSE,
|
||||
require_project_for_time_entry BOOLEAN DEFAULT TRUE,
|
||||
allow_future_entries BOOLEAN DEFAULT FALSE,
|
||||
max_hours_per_entry FLOAT DEFAULT 24.0,
|
||||
enable_tasks BOOLEAN DEFAULT TRUE,
|
||||
enable_sprints BOOLEAN DEFAULT FALSE,
|
||||
enable_client_access BOOLEAN DEFAULT FALSE,
|
||||
notify_on_overtime BOOLEAN DEFAULT TRUE,
|
||||
overtime_threshold_daily FLOAT DEFAULT 8.0,
|
||||
overtime_threshold_weekly FLOAT DEFAULT 40.0,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
""")
|
||||
print("Created company_settings table")
|
||||
else:
|
||||
# Check which columns exist
|
||||
cur.execute("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'company_settings'
|
||||
""")
|
||||
existing_columns = [row[0] for row in cur.fetchall()]
|
||||
print(f"Existing columns: {existing_columns}")
|
||||
|
||||
# Add missing columns
|
||||
columns_to_add = {
|
||||
'work_week_start': 'INTEGER DEFAULT 1',
|
||||
'work_days': "VARCHAR(20) DEFAULT '1,2,3,4,5'",
|
||||
'allow_overlapping_entries': 'BOOLEAN DEFAULT FALSE',
|
||||
'require_project_for_time_entry': 'BOOLEAN DEFAULT TRUE',
|
||||
'allow_future_entries': 'BOOLEAN DEFAULT FALSE',
|
||||
'max_hours_per_entry': 'FLOAT DEFAULT 24.0',
|
||||
'enable_tasks': 'BOOLEAN DEFAULT TRUE',
|
||||
'enable_sprints': 'BOOLEAN DEFAULT FALSE',
|
||||
'enable_client_access': 'BOOLEAN DEFAULT FALSE',
|
||||
'notify_on_overtime': 'BOOLEAN DEFAULT TRUE',
|
||||
'overtime_threshold_daily': 'FLOAT DEFAULT 8.0',
|
||||
'overtime_threshold_weekly': 'FLOAT DEFAULT 40.0',
|
||||
'created_at': 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP',
|
||||
'updated_at': 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP'
|
||||
}
|
||||
|
||||
for column, definition in columns_to_add.items():
|
||||
if column not in existing_columns:
|
||||
print(f"Adding {column} column...")
|
||||
cur.execute(f"ALTER TABLE company_settings ADD COLUMN {column} {definition}")
|
||||
print(f"Added {column} column")
|
||||
|
||||
# Commit changes
|
||||
conn.commit()
|
||||
print("\nCompany settings migration completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during migration: {e}")
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
add_missing_columns()
|
||||
188
migrations/old_migrations/11_fix_company_work_config_usage.py
Executable file
188
migrations/old_migrations/11_fix_company_work_config_usage.py
Executable file
@@ -0,0 +1,188 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix CompanyWorkConfig field usage throughout the codebase
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Define old to new field mappings
|
||||
FIELD_MAPPINGS = {
|
||||
'work_hours_per_day': 'standard_hours_per_day',
|
||||
'mandatory_break_minutes': 'break_duration_minutes',
|
||||
'break_threshold_hours': 'break_after_hours',
|
||||
'region': 'work_region',
|
||||
}
|
||||
|
||||
# Fields that were removed
|
||||
REMOVED_FIELDS = [
|
||||
'additional_break_minutes',
|
||||
'additional_break_threshold_hours',
|
||||
'region_name',
|
||||
'created_by_id'
|
||||
]
|
||||
|
||||
def update_python_files():
|
||||
"""Update Python files with new field names"""
|
||||
python_files = [
|
||||
'app.py',
|
||||
'routes/company.py',
|
||||
]
|
||||
|
||||
for filepath in python_files:
|
||||
if not os.path.exists(filepath):
|
||||
print(f"Skipping {filepath} - file not found")
|
||||
continue
|
||||
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update field references
|
||||
for old_field, new_field in FIELD_MAPPINGS.items():
|
||||
# Update attribute access: .old_field -> .new_field
|
||||
content = re.sub(
|
||||
rf'\.{old_field}\b',
|
||||
f'.{new_field}',
|
||||
content
|
||||
)
|
||||
|
||||
# Update dictionary access: ['old_field'] -> ['new_field']
|
||||
content = re.sub(
|
||||
rf'\[[\'"]{old_field}[\'"]\]',
|
||||
f"['{new_field}']",
|
||||
content
|
||||
)
|
||||
|
||||
# Update keyword arguments: old_field= -> new_field=
|
||||
content = re.sub(
|
||||
rf'\b{old_field}=',
|
||||
f'{new_field}=',
|
||||
content
|
||||
)
|
||||
|
||||
# Handle special cases for app.py
|
||||
if filepath == 'app.py':
|
||||
# Update WorkRegion.GERMANY references where appropriate
|
||||
content = re.sub(
|
||||
r'WorkRegion\.GERMANY',
|
||||
'WorkRegion.GERMANY # Note: Germany has specific labor laws',
|
||||
content
|
||||
)
|
||||
|
||||
# Handle removed fields - comment them out with explanation
|
||||
for removed_field in ['additional_break_minutes', 'additional_break_threshold_hours']:
|
||||
content = re.sub(
|
||||
rf'^(\s*)(.*{removed_field}.*)$',
|
||||
r'\1# REMOVED: \2 # This field no longer exists in the model',
|
||||
content,
|
||||
flags=re.MULTILINE
|
||||
)
|
||||
|
||||
# Handle region_name specially in routes/company.py
|
||||
if filepath == 'routes/company.py':
|
||||
# Remove region_name assignments
|
||||
content = re.sub(
|
||||
r"work_config\.region_name = .*\n",
|
||||
"# region_name removed - using work_region enum value instead\n",
|
||||
content
|
||||
)
|
||||
|
||||
# Fix WorkRegion.CUSTOM -> WorkRegion.OTHER
|
||||
content = re.sub(
|
||||
r'WorkRegion\.CUSTOM',
|
||||
'WorkRegion.OTHER',
|
||||
content
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def update_template_files():
|
||||
"""Update template files with new field names"""
|
||||
template_files = [
|
||||
'templates/admin_company.html',
|
||||
'templates/admin_work_policies.html',
|
||||
'templates/config.html',
|
||||
]
|
||||
|
||||
for filepath in template_files:
|
||||
if not os.path.exists(filepath):
|
||||
print(f"Skipping {filepath} - file not found")
|
||||
continue
|
||||
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update field references in templates
|
||||
for old_field, new_field in FIELD_MAPPINGS.items():
|
||||
# Update Jinja2 variable access: {{ obj.old_field }} -> {{ obj.new_field }}
|
||||
content = re.sub(
|
||||
r'(\{\{[^}]*\.)' + re.escape(old_field) + r'(\s*\}\})',
|
||||
r'\1' + new_field + r'\2',
|
||||
content
|
||||
)
|
||||
|
||||
# Update form field names and IDs
|
||||
content = re.sub(
|
||||
rf'(name|id)=[\'"]{old_field}[\'"]',
|
||||
rf'\1="{new_field}"',
|
||||
content
|
||||
)
|
||||
|
||||
# Handle region_name in templates
|
||||
if 'region_name' in content:
|
||||
# Replace region_name with work_region.value
|
||||
content = re.sub(
|
||||
r'(\{\{[^}]*\.)region_name(\s*\}\})',
|
||||
r'\1work_region.value\2',
|
||||
content
|
||||
)
|
||||
|
||||
# Handle removed fields in admin_company.html
|
||||
if filepath == 'templates/admin_company.html' and 'additional_break' in content:
|
||||
# Remove entire config-item divs for removed fields
|
||||
content = re.sub(
|
||||
r'<div class="config-item">.*?additional_break.*?</div>\s*',
|
||||
'',
|
||||
content,
|
||||
flags=re.DOTALL
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def main():
|
||||
print("=== Fixing CompanyWorkConfig Field Usage ===\n")
|
||||
|
||||
print("1. Updating Python files...")
|
||||
update_python_files()
|
||||
|
||||
print("\n2. Updating template files...")
|
||||
update_template_files()
|
||||
|
||||
print("\n✅ CompanyWorkConfig migration complete!")
|
||||
print("\nNote: Some fields have been removed from the model:")
|
||||
print(" - additional_break_minutes")
|
||||
print(" - additional_break_threshold_hours")
|
||||
print(" - region_name (use work_region.value instead)")
|
||||
print(" - created_by_id")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
172
migrations/old_migrations/12_fix_task_status_usage.py
Executable file
172
migrations/old_migrations/12_fix_task_status_usage.py
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix TaskStatus enum usage throughout the codebase
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Define old to new status mappings
|
||||
STATUS_MAPPINGS = {
|
||||
'NOT_STARTED': 'TODO',
|
||||
'COMPLETED': 'DONE',
|
||||
'ON_HOLD': 'IN_REVIEW',
|
||||
}
|
||||
|
||||
def update_python_files():
|
||||
"""Update Python files with new TaskStatus values"""
|
||||
# Find all Python files that might use TaskStatus
|
||||
python_files = []
|
||||
|
||||
# Add specific known files
|
||||
known_files = ['app.py', 'routes/tasks.py', 'routes/tasks_api.py', 'routes/sprints.py', 'routes/sprints_api.py']
|
||||
python_files.extend([f for f in known_files if os.path.exists(f)])
|
||||
|
||||
# Search for more Python files in routes/
|
||||
if os.path.exists('routes'):
|
||||
python_files.extend([str(p) for p in Path('routes').glob('*.py')])
|
||||
|
||||
# Remove duplicates
|
||||
python_files = list(set(python_files))
|
||||
|
||||
for filepath in python_files:
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update TaskStatus enum references
|
||||
for old_status, new_status in STATUS_MAPPINGS.items():
|
||||
# Update enum access: TaskStatus.OLD_STATUS -> TaskStatus.NEW_STATUS
|
||||
content = re.sub(
|
||||
rf'TaskStatus\.{old_status}\b',
|
||||
f'TaskStatus.{new_status}',
|
||||
content
|
||||
)
|
||||
|
||||
# Update string comparisons: == 'OLD_STATUS' -> == 'NEW_STATUS'
|
||||
content = re.sub(
|
||||
rf"['\"]({old_status})['\"]",
|
||||
f"'{new_status}'",
|
||||
content
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def update_javascript_files():
|
||||
"""Update JavaScript files with new TaskStatus values"""
|
||||
js_files = []
|
||||
|
||||
# Find all JS files
|
||||
if os.path.exists('static/js'):
|
||||
js_files.extend([str(p) for p in Path('static/js').glob('*.js')])
|
||||
|
||||
for filepath in js_files:
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update status values in JavaScript
|
||||
for old_status, new_status in STATUS_MAPPINGS.items():
|
||||
# Update string literals
|
||||
content = re.sub(
|
||||
rf"['\"]({old_status})['\"]",
|
||||
f"'{new_status}'",
|
||||
content
|
||||
)
|
||||
|
||||
# Update in case statements or object keys
|
||||
content = re.sub(
|
||||
rf'\b{old_status}\b:',
|
||||
f'{new_status}:',
|
||||
content
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def update_template_files():
|
||||
"""Update template files with new TaskStatus values"""
|
||||
template_files = []
|
||||
|
||||
# Find all template files that might have task status
|
||||
if os.path.exists('templates'):
|
||||
template_files.extend([str(p) for p in Path('templates').glob('*.html')])
|
||||
|
||||
for filepath in template_files:
|
||||
# Skip if file doesn't contain task-related content
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
if 'task' not in content.lower() and 'status' not in content.lower():
|
||||
continue
|
||||
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update status values in templates
|
||||
for old_status, new_status in STATUS_MAPPINGS.items():
|
||||
# Update in option values: value="OLD_STATUS" -> value="NEW_STATUS"
|
||||
content = re.sub(
|
||||
rf'value=[\'"]{old_status}[\'"]',
|
||||
f'value="{new_status}"',
|
||||
content
|
||||
)
|
||||
|
||||
# Update display text (be more careful here)
|
||||
if old_status == 'NOT_STARTED':
|
||||
content = re.sub(r'>Not Started<', '>To Do<', content)
|
||||
elif old_status == 'COMPLETED':
|
||||
content = re.sub(r'>Completed<', '>Done<', content)
|
||||
elif old_status == 'ON_HOLD':
|
||||
content = re.sub(r'>On Hold<', '>In Review<', content)
|
||||
|
||||
# Update in JavaScript within templates
|
||||
content = re.sub(
|
||||
rf"['\"]({old_status})['\"]",
|
||||
f"'{new_status}'",
|
||||
content
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def main():
|
||||
print("=== Fixing TaskStatus Enum Usage ===\n")
|
||||
|
||||
print("1. Updating Python files...")
|
||||
update_python_files()
|
||||
|
||||
print("\n2. Updating JavaScript files...")
|
||||
update_javascript_files()
|
||||
|
||||
print("\n3. Updating template files...")
|
||||
update_template_files()
|
||||
|
||||
print("\n✅ TaskStatus migration complete!")
|
||||
print("\nStatus mappings applied:")
|
||||
for old, new in STATUS_MAPPINGS.items():
|
||||
print(f" - {old} → {new}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
154
migrations/old_migrations/13_fix_work_region_usage.py
Executable file
154
migrations/old_migrations/13_fix_work_region_usage.py
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix WorkRegion enum usage throughout the codebase
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Define old to new region mappings
|
||||
REGION_MAPPINGS = {
|
||||
'UNITED_STATES': 'USA',
|
||||
'UNITED_KINGDOM': 'UK',
|
||||
'FRANCE': 'EU',
|
||||
'EUROPEAN_UNION': 'EU',
|
||||
'CUSTOM': 'OTHER',
|
||||
}
|
||||
|
||||
# Note: GERMANY is kept as is - it has specific labor laws
|
||||
|
||||
def update_python_files():
|
||||
"""Update Python files with new WorkRegion values"""
|
||||
python_files = []
|
||||
|
||||
# Add known files
|
||||
known_files = ['app.py', 'routes/company.py', 'routes/system_admin.py']
|
||||
python_files.extend([f for f in known_files if os.path.exists(f)])
|
||||
|
||||
# Search for more Python files
|
||||
if os.path.exists('routes'):
|
||||
python_files.extend([str(p) for p in Path('routes').glob('*.py')])
|
||||
|
||||
# Remove duplicates
|
||||
python_files = list(set(python_files))
|
||||
|
||||
for filepath in python_files:
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Skip if no WorkRegion references
|
||||
if 'WorkRegion' not in content:
|
||||
continue
|
||||
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update WorkRegion enum references
|
||||
for old_region, new_region in REGION_MAPPINGS.items():
|
||||
# Update enum access: WorkRegion.OLD_REGION -> WorkRegion.NEW_REGION
|
||||
content = re.sub(
|
||||
rf'WorkRegion\.{old_region}\b',
|
||||
f'WorkRegion.{new_region}',
|
||||
content
|
||||
)
|
||||
|
||||
# Update string comparisons
|
||||
content = re.sub(
|
||||
rf"['\"]({old_region})['\"]",
|
||||
f"'{new_region}'",
|
||||
content
|
||||
)
|
||||
|
||||
# Add comments for GERMANY usage to note it has specific laws
|
||||
if 'WorkRegion.GERMANY' in content and '# Note:' not in content:
|
||||
content = re.sub(
|
||||
r'(WorkRegion\.GERMANY)',
|
||||
r'\1 # Germany has specific labor laws beyond EU',
|
||||
content,
|
||||
count=1 # Only comment the first occurrence
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def update_template_files():
|
||||
"""Update template files with new WorkRegion values"""
|
||||
template_files = []
|
||||
|
||||
# Find relevant templates
|
||||
if os.path.exists('templates'):
|
||||
for template in Path('templates').glob('*.html'):
|
||||
with open(template, 'r') as f:
|
||||
if 'region' in f.read().lower():
|
||||
template_files.append(str(template))
|
||||
|
||||
for filepath in template_files:
|
||||
print(f"Processing {filepath}...")
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
|
||||
# Update region values
|
||||
for old_region, new_region in REGION_MAPPINGS.items():
|
||||
# Update in option values
|
||||
content = re.sub(
|
||||
rf'value=[\'"]{old_region}[\'"]',
|
||||
f'value="{new_region}"',
|
||||
content
|
||||
)
|
||||
|
||||
# Update display names
|
||||
display_mappings = {
|
||||
'UNITED_STATES': 'United States',
|
||||
'United States': 'United States',
|
||||
'UNITED_KINGDOM': 'United Kingdom',
|
||||
'United Kingdom': 'United Kingdom',
|
||||
'FRANCE': 'European Union',
|
||||
'France': 'European Union',
|
||||
'EUROPEAN_UNION': 'European Union',
|
||||
'European Union': 'European Union',
|
||||
'CUSTOM': 'Other',
|
||||
'Custom': 'Other'
|
||||
}
|
||||
|
||||
for old_display, new_display in display_mappings.items():
|
||||
if old_display in ['France', 'FRANCE']:
|
||||
# France is now part of EU
|
||||
content = re.sub(
|
||||
rf'>{old_display}<',
|
||||
f'>{new_display}<',
|
||||
content
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
else:
|
||||
print(f" - No changes needed in {filepath}")
|
||||
|
||||
def main():
|
||||
print("=== Fixing WorkRegion Enum Usage ===\n")
|
||||
|
||||
print("1. Updating Python files...")
|
||||
update_python_files()
|
||||
|
||||
print("\n2. Updating template files...")
|
||||
update_template_files()
|
||||
|
||||
print("\n✅ WorkRegion migration complete!")
|
||||
print("\nRegion mappings applied:")
|
||||
for old, new in REGION_MAPPINGS.items():
|
||||
print(f" - {old} → {new}")
|
||||
print("\nNote: GERMANY remains as a separate option due to specific labor laws")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
227
migrations/old_migrations/14_fix_removed_fields.py
Executable file
227
migrations/old_migrations/14_fix_removed_fields.py
Executable file
@@ -0,0 +1,227 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fix references to removed fields throughout the codebase
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Fields that were removed from various models
|
||||
REMOVED_FIELDS = {
|
||||
'created_by_id': {
|
||||
'models': ['Task', 'Project', 'Sprint', 'Announcement', 'CompanyWorkConfig'],
|
||||
'replacement': 'None', # or could track via audit log
|
||||
'comment': 'Field removed - consider using audit log for creator tracking'
|
||||
},
|
||||
'region_name': {
|
||||
'models': ['CompanyWorkConfig'],
|
||||
'replacement': 'work_region.value',
|
||||
'comment': 'Use work_region enum value instead'
|
||||
},
|
||||
'additional_break_minutes': {
|
||||
'models': ['CompanyWorkConfig'],
|
||||
'replacement': 'None',
|
||||
'comment': 'Field removed - simplified break configuration'
|
||||
},
|
||||
'additional_break_threshold_hours': {
|
||||
'models': ['CompanyWorkConfig'],
|
||||
'replacement': 'None',
|
||||
'comment': 'Field removed - simplified break configuration'
|
||||
}
|
||||
}
|
||||
|
||||
def update_python_files():
|
||||
"""Update Python files to handle removed fields"""
|
||||
python_files = []
|
||||
|
||||
# Get all Python files
|
||||
for root, dirs, files in os.walk('.'):
|
||||
# Skip virtual environments and cache
|
||||
if 'venv' in root or '__pycache__' in root or '.git' in root:
|
||||
continue
|
||||
for file in files:
|
||||
if file.endswith('.py'):
|
||||
python_files.append(os.path.join(root, file))
|
||||
|
||||
for filepath in python_files:
|
||||
# Skip migration scripts
|
||||
if 'migrations/' in filepath:
|
||||
continue
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
modified = False
|
||||
|
||||
for field, info in REMOVED_FIELDS.items():
|
||||
if field not in content:
|
||||
continue
|
||||
|
||||
print(f"Processing {filepath} for {field}...")
|
||||
|
||||
# Handle different patterns
|
||||
if field == 'created_by_id':
|
||||
# Comment out lines that assign created_by_id
|
||||
content = re.sub(
|
||||
rf'^(\s*)([^#\n]*created_by_id\s*=\s*[^,\n]+,?)(.*)$',
|
||||
rf'\1# REMOVED: \2 # {info["comment"]}\3',
|
||||
content,
|
||||
flags=re.MULTILINE
|
||||
)
|
||||
|
||||
# Remove from query filters
|
||||
content = re.sub(
|
||||
rf'\.filter_by\(created_by_id=[^)]+\)',
|
||||
'.filter_by() # REMOVED: created_by_id filter',
|
||||
content
|
||||
)
|
||||
|
||||
# Remove from dictionary accesses
|
||||
content = re.sub(
|
||||
rf"['\"]created_by_id['\"]\s*:\s*[^,}}]+[,}}]",
|
||||
'# "created_by_id" removed from model',
|
||||
content
|
||||
)
|
||||
|
||||
elif field == 'region_name':
|
||||
# Replace with work_region.value
|
||||
content = re.sub(
|
||||
rf'\.region_name\b',
|
||||
'.work_region.value',
|
||||
content
|
||||
)
|
||||
content = re.sub(
|
||||
rf"\['region_name'\]",
|
||||
"['work_region'].value",
|
||||
content
|
||||
)
|
||||
|
||||
elif field in ['additional_break_minutes', 'additional_break_threshold_hours']:
|
||||
# Comment out references
|
||||
content = re.sub(
|
||||
rf'^(\s*)([^#\n]*{field}[^#\n]*)$',
|
||||
rf'\1# REMOVED: \2 # {info["comment"]}',
|
||||
content,
|
||||
flags=re.MULTILINE
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
|
||||
def update_template_files():
|
||||
"""Update template files to handle removed fields"""
|
||||
template_files = []
|
||||
|
||||
if os.path.exists('templates'):
|
||||
template_files = [str(p) for p in Path('templates').glob('*.html')]
|
||||
|
||||
for filepath in template_files:
|
||||
with open(filepath, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
original_content = content
|
||||
modified = False
|
||||
|
||||
for field, info in REMOVED_FIELDS.items():
|
||||
if field not in content:
|
||||
continue
|
||||
|
||||
print(f"Processing {filepath} for {field}...")
|
||||
|
||||
if field == 'created_by_id':
|
||||
# Remove or comment out created_by references in templates
|
||||
# Match {{...created_by_id...}} patterns
|
||||
pattern = r'\{\{[^}]*\.created_by_id[^}]*\}\}'
|
||||
content = re.sub(
|
||||
pattern,
|
||||
'<!-- REMOVED: created_by_id no longer available -->',
|
||||
content
|
||||
)
|
||||
|
||||
elif field == 'region_name':
|
||||
# Replace with work_region.value
|
||||
# Match {{...region_name...}} and replace region_name with work_region.value
|
||||
pattern = r'(\{\{[^}]*\.)region_name([^}]*\}\})'
|
||||
content = re.sub(
|
||||
pattern,
|
||||
r'\1work_region.value\2',
|
||||
content
|
||||
)
|
||||
|
||||
elif field in ['additional_break_minutes', 'additional_break_threshold_hours']:
|
||||
# Remove entire form groups for these fields
|
||||
pattern = r'<div[^>]*>(?:[^<]|<(?!/div))*' + re.escape(field) + r'.*?</div>\s*'
|
||||
content = re.sub(
|
||||
pattern,
|
||||
f'<!-- REMOVED: {field} no longer in model -->\n',
|
||||
content,
|
||||
flags=re.DOTALL
|
||||
)
|
||||
|
||||
if content != original_content:
|
||||
modified = True
|
||||
|
||||
if modified:
|
||||
with open(filepath, 'w') as f:
|
||||
f.write(content)
|
||||
print(f" ✓ Updated {filepath}")
|
||||
|
||||
def create_audit_log_migration():
|
||||
"""Create a migration to add audit fields if needed"""
|
||||
migration_content = '''#!/usr/bin/env python3
|
||||
"""
|
||||
Add audit log fields to replace removed created_by_id
|
||||
"""
|
||||
|
||||
# This is a template for adding audit logging if needed
|
||||
# to replace the removed created_by_id functionality
|
||||
|
||||
def add_audit_fields():
|
||||
"""
|
||||
Consider adding these fields to models that lost created_by_id:
|
||||
- created_by_username (store username instead of ID)
|
||||
- created_at (if not already present)
|
||||
- updated_by_username
|
||||
- updated_at
|
||||
|
||||
Or implement a separate audit log table
|
||||
"""
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Consider implementing audit logging to track who created/modified records")
|
||||
'''
|
||||
|
||||
with open('migrations/05_add_audit_fields_template.py', 'w') as f:
|
||||
f.write(migration_content)
|
||||
print("\n✓ Created template for audit field migration")
|
||||
|
||||
def main():
|
||||
print("=== Fixing References to Removed Fields ===\n")
|
||||
|
||||
print("1. Updating Python files...")
|
||||
update_python_files()
|
||||
|
||||
print("\n2. Updating template files...")
|
||||
update_template_files()
|
||||
|
||||
print("\n3. Creating audit field migration template...")
|
||||
create_audit_log_migration()
|
||||
|
||||
print("\n✅ Removed fields migration complete!")
|
||||
print("\nFields handled:")
|
||||
for field, info in REMOVED_FIELDS.items():
|
||||
print(f" - {field}: {info['comment']}")
|
||||
|
||||
print("\n⚠️ Important: Review commented-out code and decide on appropriate replacements")
|
||||
print(" Consider implementing audit logging for creator tracking")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
67
migrations/old_migrations/15_repair_user_roles.py
Normal file
67
migrations/old_migrations/15_repair_user_roles.py
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Repair user roles from string to enum values
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
# Add parent directory to path to import app
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
try:
|
||||
from app import app, db
|
||||
from models import User, Role
|
||||
except Exception as e:
|
||||
print(f"Error importing modules: {e}")
|
||||
print("This migration requires Flask app context. Skipping...")
|
||||
sys.exit(0)
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def repair_user_roles():
|
||||
with app.app_context():
|
||||
logger.info("Starting user role repair...")
|
||||
|
||||
# Map string role values to enum values
|
||||
role_mapping = {
|
||||
'Team Member': Role.TEAM_MEMBER,
|
||||
'TEAM_MEMBER': Role.TEAM_MEMBER,
|
||||
'Team Leader': Role.TEAM_LEADER,
|
||||
'TEAM_LEADER': Role.TEAM_LEADER,
|
||||
'Supervisor': Role.SUPERVISOR,
|
||||
'SUPERVISOR': Role.SUPERVISOR,
|
||||
'Administrator': Role.ADMIN,
|
||||
'ADMIN': Role.ADMIN
|
||||
}
|
||||
|
||||
users = User.query.all()
|
||||
fixed_count = 0
|
||||
|
||||
for user in users:
|
||||
original_role = user.role
|
||||
|
||||
# Fix role if it's a string or None
|
||||
if isinstance(user.role, str):
|
||||
user.role = role_mapping.get(user.role, Role.TEAM_MEMBER)
|
||||
fixed_count += 1
|
||||
elif user.role is None:
|
||||
user.role = Role.TEAM_MEMBER
|
||||
fixed_count += 1
|
||||
|
||||
if fixed_count > 0:
|
||||
db.session.commit()
|
||||
logger.info(f"Fixed roles for {fixed_count} users")
|
||||
else:
|
||||
logger.info("No role fixes needed")
|
||||
|
||||
logger.info("Role repair completed")
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
repair_user_roles()
|
||||
except Exception as e:
|
||||
logger.error(f"Migration failed: {e}")
|
||||
sys.exit(1)
|
||||
65
migrations/old_migrations/19_add_company_invitations.py
Normal file
65
migrations/old_migrations/19_add_company_invitations.py
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add company invitations table for email-based registration
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from flask import Flask
|
||||
from models import db
|
||||
from sqlalchemy import text
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def migrate():
|
||||
"""Add company_invitation table"""
|
||||
app = Flask(__name__)
|
||||
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:////data/timetrack.db')
|
||||
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
||||
|
||||
db.init_app(app)
|
||||
|
||||
with app.app_context():
|
||||
try:
|
||||
# Create company_invitation table
|
||||
create_table_sql = text("""
|
||||
CREATE TABLE IF NOT EXISTS company_invitation (
|
||||
id SERIAL PRIMARY KEY,
|
||||
company_id INTEGER NOT NULL REFERENCES company(id),
|
||||
email VARCHAR(120) NOT NULL,
|
||||
token VARCHAR(64) UNIQUE NOT NULL,
|
||||
role VARCHAR(50) DEFAULT 'Team Member',
|
||||
invited_by_id INTEGER NOT NULL REFERENCES "user"(id),
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
accepted BOOLEAN DEFAULT FALSE,
|
||||
accepted_at TIMESTAMP,
|
||||
accepted_by_user_id INTEGER REFERENCES "user"(id)
|
||||
);
|
||||
""")
|
||||
|
||||
db.session.execute(create_table_sql)
|
||||
|
||||
# Create indexes for better performance
|
||||
db.session.execute(text("CREATE INDEX IF NOT EXISTS idx_invitation_token ON company_invitation(token);"))
|
||||
db.session.execute(text("CREATE INDEX IF NOT EXISTS idx_invitation_email ON company_invitation(email);"))
|
||||
db.session.execute(text("CREATE INDEX IF NOT EXISTS idx_invitation_company ON company_invitation(company_id);"))
|
||||
db.session.execute(text("CREATE INDEX IF NOT EXISTS idx_invitation_expires ON company_invitation(expires_at);"))
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Successfully created company_invitation table")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating company_invitation table: {str(e)}")
|
||||
db.session.rollback()
|
||||
return False
|
||||
|
||||
if __name__ == '__main__':
|
||||
success = migrate()
|
||||
sys.exit(0 if success else 1)
|
||||
94
migrations/old_migrations/20_add_company_updated_at.py
Executable file
94
migrations/old_migrations/20_add_company_updated_at.py
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Add updated_at column to company table
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
# Add parent directory to path to import app
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from app import app, db
|
||||
from sqlalchemy import text
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def run_migration():
|
||||
"""Add updated_at column to company table"""
|
||||
with app.app_context():
|
||||
try:
|
||||
# Check if we're using PostgreSQL or SQLite
|
||||
database_url = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
is_postgres = 'postgresql://' in database_url or 'postgres://' in database_url
|
||||
|
||||
if is_postgres:
|
||||
# PostgreSQL migration
|
||||
logger.info("Running PostgreSQL migration to add updated_at to company table...")
|
||||
|
||||
# Check if column exists
|
||||
result = db.session.execute(text("""
|
||||
SELECT column_name
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'company' AND column_name = 'updated_at'
|
||||
"""))
|
||||
|
||||
if not result.fetchone():
|
||||
logger.info("Adding updated_at column to company table...")
|
||||
db.session.execute(text("""
|
||||
ALTER TABLE company
|
||||
ADD COLUMN updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
"""))
|
||||
|
||||
# Update existing rows to have updated_at = created_at
|
||||
db.session.execute(text("""
|
||||
UPDATE company
|
||||
SET updated_at = created_at
|
||||
WHERE updated_at IS NULL
|
||||
"""))
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Successfully added updated_at column to company table")
|
||||
else:
|
||||
logger.info("updated_at column already exists in company table")
|
||||
else:
|
||||
# SQLite migration
|
||||
logger.info("Running SQLite migration to add updated_at to company table...")
|
||||
|
||||
# For SQLite, we need to check differently
|
||||
result = db.session.execute(text("PRAGMA table_info(company)"))
|
||||
columns = [row[1] for row in result.fetchall()]
|
||||
|
||||
if 'updated_at' not in columns:
|
||||
logger.info("Adding updated_at column to company table...")
|
||||
db.session.execute(text("""
|
||||
ALTER TABLE company
|
||||
ADD COLUMN updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
"""))
|
||||
|
||||
# Update existing rows to have updated_at = created_at
|
||||
db.session.execute(text("""
|
||||
UPDATE company
|
||||
SET updated_at = created_at
|
||||
WHERE updated_at IS NULL
|
||||
"""))
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Successfully added updated_at column to company table")
|
||||
else:
|
||||
logger.info("updated_at column already exists in company table")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Migration failed: {e}")
|
||||
db.session.rollback()
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = run_migration()
|
||||
sys.exit(0 if success else 1)
|
||||
138
migrations/old_migrations/run_all_db_migrations.py
Executable file
138
migrations/old_migrations/run_all_db_migrations.py
Executable file
@@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Master database migration runner
|
||||
Runs all database schema migrations in the correct order
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# Migration state file
|
||||
MIGRATION_STATE_FILE = '/data/db_migrations_state.json'
|
||||
|
||||
# List of database schema migrations in order
|
||||
DB_MIGRATIONS = [
|
||||
'01_migrate_db.py', # SQLite schema updates (must run before data migration)
|
||||
'20_add_company_updated_at.py', # Add updated_at column BEFORE data migration
|
||||
'02_migrate_sqlite_to_postgres_fixed.py', # Fixed SQLite to PostgreSQL data migration
|
||||
'03_add_dashboard_columns.py',
|
||||
'04_add_user_preferences_columns.py',
|
||||
'05_fix_task_status_enum.py',
|
||||
'06_add_archived_status.py',
|
||||
'07_fix_company_work_config_columns.py',
|
||||
'08_fix_work_region_enum.py',
|
||||
'09_add_germany_to_workregion.py',
|
||||
'10_add_company_settings_columns.py',
|
||||
'19_add_company_invitations.py'
|
||||
]
|
||||
|
||||
def load_migration_state():
|
||||
"""Load the migration state from file"""
|
||||
if os.path.exists(MIGRATION_STATE_FILE):
|
||||
try:
|
||||
with open(MIGRATION_STATE_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
return {}
|
||||
return {}
|
||||
|
||||
def save_migration_state(state):
|
||||
"""Save the migration state to file"""
|
||||
os.makedirs(os.path.dirname(MIGRATION_STATE_FILE), exist_ok=True)
|
||||
with open(MIGRATION_STATE_FILE, 'w') as f:
|
||||
json.dump(state, f, indent=2)
|
||||
|
||||
def run_migration(migration_file):
|
||||
"""Run a single migration script"""
|
||||
script_path = os.path.join(os.path.dirname(__file__), migration_file)
|
||||
|
||||
if not os.path.exists(script_path):
|
||||
print(f"⚠️ Migration {migration_file} not found, skipping...")
|
||||
return False
|
||||
|
||||
print(f"\n🔄 Running migration: {migration_file}")
|
||||
|
||||
try:
|
||||
# Run the migration script
|
||||
result = subprocess.run(
|
||||
[sys.executable, script_path],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"✅ {migration_file} completed successfully")
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
return True
|
||||
else:
|
||||
print(f"❌ {migration_file} failed with return code {result.returncode}")
|
||||
if result.stderr:
|
||||
print(f"Error output: {result.stderr}")
|
||||
if result.stdout:
|
||||
print(f"Standard output: {result.stdout}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error running {migration_file}: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all database migrations"""
|
||||
print("=== Database Schema Migrations ===")
|
||||
print(f"Running {len(DB_MIGRATIONS)} migrations...")
|
||||
|
||||
# Load migration state
|
||||
state = load_migration_state()
|
||||
|
||||
success_count = 0
|
||||
failed_count = 0
|
||||
skipped_count = 0
|
||||
|
||||
for migration in DB_MIGRATIONS:
|
||||
# Check if migration has already been run successfully
|
||||
if state.get(migration, {}).get('status') == 'success':
|
||||
print(f"\n⏭️ Skipping {migration} (already completed)")
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
# Run the migration
|
||||
success = run_migration(migration)
|
||||
|
||||
# Update state
|
||||
state[migration] = {
|
||||
'status': 'success' if success else 'failed',
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'attempts': state.get(migration, {}).get('attempts', 0) + 1
|
||||
}
|
||||
|
||||
if success:
|
||||
success_count += 1
|
||||
else:
|
||||
failed_count += 1
|
||||
# Don't stop on failure, continue with other migrations
|
||||
print(f"⚠️ Continuing despite failure in {migration}")
|
||||
|
||||
# Save state after each migration
|
||||
save_migration_state(state)
|
||||
|
||||
# Summary
|
||||
print("\n" + "="*50)
|
||||
print("Database Migration Summary:")
|
||||
print(f"✅ Successful: {success_count}")
|
||||
print(f"❌ Failed: {failed_count}")
|
||||
print(f"⏭️ Skipped: {skipped_count}")
|
||||
print(f"📊 Total: {len(DB_MIGRATIONS)}")
|
||||
|
||||
if failed_count > 0:
|
||||
print("\n⚠️ Some migrations failed. Check the logs above for details.")
|
||||
return 1
|
||||
else:
|
||||
print("\n✨ All database migrations completed successfully!")
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
166
migrations/old_migrations/run_code_migrations.py
Executable file
166
migrations/old_migrations/run_code_migrations.py
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run code migrations during startup - updates code to match model changes
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
import hashlib
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
MIGRATION_STATE_FILE = '/data/code_migrations_state.json'
|
||||
|
||||
def get_migration_hash(script_path):
|
||||
"""Get hash of migration script to detect changes"""
|
||||
with open(script_path, 'rb') as f:
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
|
||||
def load_migration_state():
|
||||
"""Load state of previously run migrations"""
|
||||
if os.path.exists(MIGRATION_STATE_FILE):
|
||||
try:
|
||||
with open(MIGRATION_STATE_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
return {}
|
||||
return {}
|
||||
|
||||
def save_migration_state(state):
|
||||
"""Save migration state"""
|
||||
os.makedirs(os.path.dirname(MIGRATION_STATE_FILE), exist_ok=True)
|
||||
with open(MIGRATION_STATE_FILE, 'w') as f:
|
||||
json.dump(state, f, indent=2)
|
||||
|
||||
def should_run_migration(script_path, state):
|
||||
"""Check if migration should run based on state"""
|
||||
script_name = os.path.basename(script_path)
|
||||
current_hash = get_migration_hash(script_path)
|
||||
|
||||
if script_name not in state:
|
||||
return True
|
||||
|
||||
# Re-run if script has changed
|
||||
if state[script_name].get('hash') != current_hash:
|
||||
return True
|
||||
|
||||
# Skip if already run successfully
|
||||
if state[script_name].get('status') == 'success':
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def run_migration(script_path, state):
|
||||
"""Run a single migration script"""
|
||||
script_name = os.path.basename(script_path)
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Running code migration: {script_name}")
|
||||
print('='*60)
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, script_path],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
timeout=300 # 5 minute timeout
|
||||
)
|
||||
|
||||
print(result.stdout)
|
||||
if result.stderr:
|
||||
print("Warnings:", result.stderr)
|
||||
|
||||
# Update state
|
||||
state[script_name] = {
|
||||
'hash': get_migration_hash(script_path),
|
||||
'status': 'success',
|
||||
'last_run': str(datetime.now()),
|
||||
'output': result.stdout[-1000:] if result.stdout else '' # Last 1000 chars
|
||||
}
|
||||
save_migration_state(state)
|
||||
return True
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"❌ Error running {script_name}:")
|
||||
print(e.stdout)
|
||||
print(e.stderr)
|
||||
|
||||
# Update state with failure
|
||||
state[script_name] = {
|
||||
'hash': get_migration_hash(script_path),
|
||||
'status': 'failed',
|
||||
'last_run': str(datetime.now()),
|
||||
'error': str(e)
|
||||
}
|
||||
save_migration_state(state)
|
||||
return False
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f"❌ Migration {script_name} timed out!")
|
||||
state[script_name] = {
|
||||
'hash': get_migration_hash(script_path),
|
||||
'status': 'timeout',
|
||||
'last_run': str(datetime.now())
|
||||
}
|
||||
save_migration_state(state)
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all code migrations that need to be run"""
|
||||
|
||||
print("🔄 Checking for code migrations...")
|
||||
|
||||
# Get migration state
|
||||
state = load_migration_state()
|
||||
|
||||
# Get all migration scripts
|
||||
migrations_dir = Path(__file__).parent
|
||||
migration_scripts = sorted([
|
||||
str(p) for p in migrations_dir.glob('*.py')
|
||||
if p.name.startswith(('11_', '12_', '13_', '14_', '15_'))
|
||||
and 'template' not in p.name.lower()
|
||||
])
|
||||
|
||||
if not migration_scripts:
|
||||
print("No code migration scripts found.")
|
||||
return 0
|
||||
|
||||
# Check which migrations need to run
|
||||
to_run = []
|
||||
for script in migration_scripts:
|
||||
if should_run_migration(script, state):
|
||||
to_run.append(script)
|
||||
|
||||
if not to_run:
|
||||
print("✅ All code migrations are up to date.")
|
||||
return 0
|
||||
|
||||
print(f"\n📋 Found {len(to_run)} code migrations to run:")
|
||||
for script in to_run:
|
||||
print(f" - {Path(script).name}")
|
||||
|
||||
# Run migrations
|
||||
failed = []
|
||||
for script in to_run:
|
||||
if not run_migration(script, state):
|
||||
failed.append(script)
|
||||
# Continue with other migrations even if one fails
|
||||
print(f"\n⚠️ Migration {Path(script).name} failed, continuing with others...")
|
||||
|
||||
# Summary
|
||||
print("\n" + "="*60)
|
||||
if failed:
|
||||
print(f"⚠️ {len(failed)} code migrations failed:")
|
||||
for script in failed:
|
||||
print(f" - {Path(script).name}")
|
||||
print("\nThe application may not work correctly.")
|
||||
print("Check the logs and fix the issues.")
|
||||
# Don't exit with error - let the app start anyway
|
||||
return 0
|
||||
else:
|
||||
print("✅ All code migrations completed successfully!")
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
327
migrations/postgres_only_migration.py
Executable file
327
migrations/postgres_only_migration.py
Executable file
@@ -0,0 +1,327 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PostgreSQL-only migration script for TimeTrack
|
||||
Applies all schema changes from commit 4214e88 onward
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import psycopg2
|
||||
from psycopg2.extras import RealDictCursor
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostgresMigration:
|
||||
def __init__(self, database_url):
|
||||
self.database_url = database_url
|
||||
self.conn = None
|
||||
|
||||
def connect(self):
|
||||
"""Connect to PostgreSQL database"""
|
||||
try:
|
||||
self.conn = psycopg2.connect(self.database_url)
|
||||
self.conn.autocommit = False
|
||||
logger.info("Connected to PostgreSQL database")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to database: {e}")
|
||||
return False
|
||||
|
||||
def close(self):
|
||||
"""Close database connection"""
|
||||
if self.conn:
|
||||
self.conn.close()
|
||||
|
||||
def execute_migration(self, name, sql_statements):
|
||||
"""Execute a migration with proper error handling"""
|
||||
logger.info(f"Running migration: {name}")
|
||||
cursor = self.conn.cursor()
|
||||
|
||||
try:
|
||||
for statement in sql_statements:
|
||||
if statement.strip():
|
||||
cursor.execute(statement)
|
||||
self.conn.commit()
|
||||
logger.info(f"✓ {name} completed successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.conn.rollback()
|
||||
logger.error(f"✗ {name} failed: {e}")
|
||||
return False
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def check_column_exists(self, table_name, column_name):
|
||||
"""Check if a column exists in a table"""
|
||||
cursor = self.conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = %s AND column_name = %s
|
||||
)
|
||||
""", (table_name, column_name))
|
||||
exists = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return exists
|
||||
|
||||
def check_table_exists(self, table_name):
|
||||
"""Check if a table exists"""
|
||||
cursor = self.conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM information_schema.tables
|
||||
WHERE table_name = %s
|
||||
)
|
||||
""", (table_name,))
|
||||
exists = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return exists
|
||||
|
||||
def check_enum_value_exists(self, enum_name, value):
|
||||
"""Check if an enum value exists"""
|
||||
cursor = self.conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT EXISTS (
|
||||
SELECT 1 FROM pg_enum
|
||||
WHERE enumlabel = %s
|
||||
AND enumtypid = (SELECT oid FROM pg_type WHERE typname = %s)
|
||||
)
|
||||
""", (value, enum_name))
|
||||
exists = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return exists
|
||||
|
||||
def run_all_migrations(self):
|
||||
"""Run all migrations in order"""
|
||||
if not self.connect():
|
||||
return False
|
||||
|
||||
success = True
|
||||
|
||||
# 1. Add company.updated_at
|
||||
if not self.check_column_exists('company', 'updated_at'):
|
||||
success &= self.execute_migration("Add company.updated_at", [
|
||||
"""
|
||||
ALTER TABLE company
|
||||
ADD COLUMN updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
|
||||
""",
|
||||
"""
|
||||
UPDATE company SET updated_at = created_at WHERE updated_at IS NULL;
|
||||
"""
|
||||
])
|
||||
|
||||
# 2. Add user columns for 2FA and avatar
|
||||
if not self.check_column_exists('user', 'two_factor_enabled'):
|
||||
success &= self.execute_migration("Add user 2FA and avatar columns", [
|
||||
"""
|
||||
ALTER TABLE "user"
|
||||
ADD COLUMN two_factor_enabled BOOLEAN DEFAULT FALSE,
|
||||
ADD COLUMN two_factor_secret VARCHAR(32),
|
||||
ADD COLUMN avatar_url VARCHAR(255);
|
||||
"""
|
||||
])
|
||||
|
||||
# 3. Create company_invitation table
|
||||
if not self.check_table_exists('company_invitation'):
|
||||
success &= self.execute_migration("Create company_invitation table", [
|
||||
"""
|
||||
CREATE TABLE company_invitation (
|
||||
id SERIAL PRIMARY KEY,
|
||||
company_id INTEGER NOT NULL REFERENCES company(id),
|
||||
email VARCHAR(255) NOT NULL,
|
||||
role VARCHAR(50) NOT NULL,
|
||||
token VARCHAR(255) UNIQUE NOT NULL,
|
||||
invited_by_id INTEGER REFERENCES "user"(id),
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP NOT NULL,
|
||||
used_at TIMESTAMP,
|
||||
used_by_id INTEGER REFERENCES "user"(id)
|
||||
);
|
||||
""",
|
||||
"""
|
||||
CREATE INDEX idx_invitation_token ON company_invitation(token);
|
||||
""",
|
||||
"""
|
||||
CREATE INDEX idx_invitation_company ON company_invitation(company_id);
|
||||
""",
|
||||
"""
|
||||
CREATE INDEX idx_invitation_email ON company_invitation(email);
|
||||
"""
|
||||
])
|
||||
|
||||
# 4. Add user_preferences columns
|
||||
if self.check_table_exists('user_preferences'):
|
||||
columns_to_add = [
|
||||
('theme', 'VARCHAR(20) DEFAULT \'light\''),
|
||||
('language', 'VARCHAR(10) DEFAULT \'en\''),
|
||||
('timezone', 'VARCHAR(50) DEFAULT \'UTC\''),
|
||||
('date_format', 'VARCHAR(20) DEFAULT \'YYYY-MM-DD\''),
|
||||
('time_format', 'VARCHAR(10) DEFAULT \'24h\''),
|
||||
('week_start', 'INTEGER DEFAULT 1'),
|
||||
('show_weekends', 'BOOLEAN DEFAULT TRUE'),
|
||||
('compact_mode', 'BOOLEAN DEFAULT FALSE'),
|
||||
('email_notifications', 'BOOLEAN DEFAULT TRUE'),
|
||||
('push_notifications', 'BOOLEAN DEFAULT FALSE'),
|
||||
('task_reminders', 'BOOLEAN DEFAULT TRUE'),
|
||||
('daily_summary', 'BOOLEAN DEFAULT FALSE'),
|
||||
('weekly_report', 'BOOLEAN DEFAULT TRUE'),
|
||||
('mention_notifications', 'BOOLEAN DEFAULT TRUE'),
|
||||
('task_assigned_notifications', 'BOOLEAN DEFAULT TRUE'),
|
||||
('task_completed_notifications', 'BOOLEAN DEFAULT FALSE'),
|
||||
('sound_enabled', 'BOOLEAN DEFAULT TRUE'),
|
||||
('keyboard_shortcuts', 'BOOLEAN DEFAULT TRUE'),
|
||||
('auto_start_timer', 'BOOLEAN DEFAULT FALSE'),
|
||||
('idle_time_detection', 'BOOLEAN DEFAULT TRUE'),
|
||||
('pomodoro_enabled', 'BOOLEAN DEFAULT FALSE'),
|
||||
('pomodoro_duration', 'INTEGER DEFAULT 25'),
|
||||
('pomodoro_break', 'INTEGER DEFAULT 5')
|
||||
]
|
||||
|
||||
for col_name, col_def in columns_to_add:
|
||||
if not self.check_column_exists('user_preferences', col_name):
|
||||
success &= self.execute_migration(f"Add user_preferences.{col_name}", [
|
||||
f'ALTER TABLE user_preferences ADD COLUMN {col_name} {col_def};'
|
||||
])
|
||||
|
||||
# 5. Add user_dashboard columns
|
||||
if self.check_table_exists('user_dashboard'):
|
||||
if not self.check_column_exists('user_dashboard', 'layout'):
|
||||
success &= self.execute_migration("Add user_dashboard layout columns", [
|
||||
"""
|
||||
ALTER TABLE user_dashboard
|
||||
ADD COLUMN layout JSON DEFAULT '{}',
|
||||
ADD COLUMN is_locked BOOLEAN DEFAULT FALSE;
|
||||
"""
|
||||
])
|
||||
|
||||
# 6. Add company_work_config columns
|
||||
if self.check_table_exists('company_work_config'):
|
||||
columns_to_add = [
|
||||
('standard_hours_per_day', 'FLOAT DEFAULT 8.0'),
|
||||
('standard_hours_per_week', 'FLOAT DEFAULT 40.0'),
|
||||
('overtime_rate', 'FLOAT DEFAULT 1.5'),
|
||||
('double_time_enabled', 'BOOLEAN DEFAULT FALSE'),
|
||||
('double_time_threshold', 'FLOAT DEFAULT 12.0'),
|
||||
('double_time_rate', 'FLOAT DEFAULT 2.0'),
|
||||
('weekly_overtime_threshold', 'FLOAT DEFAULT 40.0'),
|
||||
('weekly_overtime_rate', 'FLOAT DEFAULT 1.5'),
|
||||
('created_at', 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP'),
|
||||
('updated_at', 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP')
|
||||
]
|
||||
|
||||
for col_name, col_def in columns_to_add:
|
||||
if not self.check_column_exists('company_work_config', col_name):
|
||||
success &= self.execute_migration(f"Add company_work_config.{col_name}", [
|
||||
f'ALTER TABLE company_work_config ADD COLUMN {col_name} {col_def};'
|
||||
])
|
||||
|
||||
# 7. Add company_settings columns
|
||||
if self.check_table_exists('company_settings'):
|
||||
columns_to_add = [
|
||||
('work_week_start', 'INTEGER DEFAULT 1'),
|
||||
('work_days', 'VARCHAR(20) DEFAULT \'1,2,3,4,5\''),
|
||||
('time_tracking_mode', 'VARCHAR(20) DEFAULT \'flexible\''),
|
||||
('allow_manual_time', 'BOOLEAN DEFAULT TRUE'),
|
||||
('require_project_selection', 'BOOLEAN DEFAULT TRUE'),
|
||||
('allow_future_entries', 'BOOLEAN DEFAULT FALSE'),
|
||||
('max_hours_per_entry', 'FLOAT DEFAULT 24.0'),
|
||||
('min_hours_per_entry', 'FLOAT DEFAULT 0.0'),
|
||||
('round_time_to', 'INTEGER DEFAULT 1'),
|
||||
('auto_break_deduction', 'BOOLEAN DEFAULT FALSE'),
|
||||
('allow_overlapping_entries', 'BOOLEAN DEFAULT FALSE'),
|
||||
('require_daily_notes', 'BOOLEAN DEFAULT FALSE'),
|
||||
('enable_tasks', 'BOOLEAN DEFAULT TRUE'),
|
||||
('enable_projects', 'BOOLEAN DEFAULT TRUE'),
|
||||
('enable_teams', 'BOOLEAN DEFAULT TRUE'),
|
||||
('enable_reports', 'BOOLEAN DEFAULT TRUE'),
|
||||
('enable_invoicing', 'BOOLEAN DEFAULT FALSE'),
|
||||
('enable_client_access', 'BOOLEAN DEFAULT FALSE'),
|
||||
('default_currency', 'VARCHAR(3) DEFAULT \'USD\''),
|
||||
('created_at', 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP'),
|
||||
('updated_at', 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP')
|
||||
]
|
||||
|
||||
for col_name, col_def in columns_to_add:
|
||||
if not self.check_column_exists('company_settings', col_name):
|
||||
success &= self.execute_migration(f"Add company_settings.{col_name}", [
|
||||
f'ALTER TABLE company_settings ADD COLUMN {col_name} {col_def};'
|
||||
])
|
||||
|
||||
# 8. Add dashboard_widget columns
|
||||
if self.check_table_exists('dashboard_widget'):
|
||||
if not self.check_column_exists('dashboard_widget', 'config'):
|
||||
success &= self.execute_migration("Add dashboard_widget config columns", [
|
||||
"""
|
||||
ALTER TABLE dashboard_widget
|
||||
ADD COLUMN config JSON DEFAULT '{}',
|
||||
ADD COLUMN is_visible BOOLEAN DEFAULT TRUE;
|
||||
"""
|
||||
])
|
||||
|
||||
# 9. Update WorkRegion enum
|
||||
if not self.check_enum_value_exists('workregion', 'GERMANY'):
|
||||
success &= self.execute_migration("Add GERMANY to WorkRegion enum", [
|
||||
"""
|
||||
ALTER TYPE workregion ADD VALUE IF NOT EXISTS 'GERMANY';
|
||||
"""
|
||||
])
|
||||
|
||||
# 10. Update TaskStatus enum
|
||||
if not self.check_enum_value_exists('taskstatus', 'ARCHIVED'):
|
||||
success &= self.execute_migration("Add ARCHIVED to TaskStatus enum", [
|
||||
"""
|
||||
ALTER TYPE taskstatus ADD VALUE IF NOT EXISTS 'ARCHIVED';
|
||||
"""
|
||||
])
|
||||
|
||||
# 11. Update WidgetType enum
|
||||
widget_types_to_add = [
|
||||
'REVENUE_CHART', 'EXPENSE_CHART', 'PROFIT_CHART', 'CASH_FLOW',
|
||||
'INVOICE_STATUS', 'CLIENT_LIST', 'PROJECT_BUDGET', 'TEAM_CAPACITY',
|
||||
'SPRINT_BURNDOWN', 'VELOCITY_CHART', 'BACKLOG_STATUS', 'RELEASE_TIMELINE',
|
||||
'CODE_COMMITS', 'BUILD_STATUS', 'DEPLOYMENT_HISTORY', 'ERROR_RATE',
|
||||
'SYSTEM_HEALTH', 'USER_ACTIVITY', 'SECURITY_ALERTS', 'AUDIT_LOG'
|
||||
]
|
||||
|
||||
for widget_type in widget_types_to_add:
|
||||
if not self.check_enum_value_exists('widgettype', widget_type):
|
||||
success &= self.execute_migration(f"Add {widget_type} to WidgetType enum", [
|
||||
f"ALTER TYPE widgettype ADD VALUE IF NOT EXISTS '{widget_type}';"
|
||||
])
|
||||
|
||||
self.close()
|
||||
|
||||
if success:
|
||||
logger.info("\n✅ All migrations completed successfully!")
|
||||
else:
|
||||
logger.error("\n❌ Some migrations failed. Check the logs above.")
|
||||
|
||||
return success
|
||||
|
||||
|
||||
def main():
|
||||
"""Main migration function"""
|
||||
# Get database URL from environment
|
||||
database_url = os.environ.get('DATABASE_URL')
|
||||
|
||||
if not database_url:
|
||||
logger.error("DATABASE_URL environment variable not set")
|
||||
return 1
|
||||
|
||||
# Run migrations
|
||||
migration = PostgresMigration(database_url)
|
||||
success = migration.run_all_migrations()
|
||||
|
||||
return 0 if success else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
131
migrations/run_postgres_migrations.py
Executable file
131
migrations/run_postgres_migrations.py
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PostgreSQL-only migration runner
|
||||
Manages migration state and runs migrations in order
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Migration state file
|
||||
MIGRATION_STATE_FILE = '/data/postgres_migrations_state.json'
|
||||
|
||||
# List of PostgreSQL migrations in order
|
||||
POSTGRES_MIGRATIONS = [
|
||||
'postgres_only_migration.py', # Main migration from commit 4214e88 onward
|
||||
]
|
||||
|
||||
|
||||
def load_migration_state():
|
||||
"""Load the migration state from file"""
|
||||
if os.path.exists(MIGRATION_STATE_FILE):
|
||||
try:
|
||||
with open(MIGRATION_STATE_FILE, 'r') as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
return {}
|
||||
return {}
|
||||
|
||||
|
||||
def save_migration_state(state):
|
||||
"""Save the migration state to file"""
|
||||
os.makedirs(os.path.dirname(MIGRATION_STATE_FILE), exist_ok=True)
|
||||
with open(MIGRATION_STATE_FILE, 'w') as f:
|
||||
json.dump(state, f, indent=2)
|
||||
|
||||
|
||||
def run_migration(migration_file):
|
||||
"""Run a single migration script"""
|
||||
script_path = os.path.join(os.path.dirname(__file__), migration_file)
|
||||
|
||||
if not os.path.exists(script_path):
|
||||
print(f"⚠️ Migration {migration_file} not found, skipping...")
|
||||
return False
|
||||
|
||||
print(f"\n🔄 Running migration: {migration_file}")
|
||||
|
||||
try:
|
||||
# Run the migration script
|
||||
result = subprocess.run(
|
||||
[sys.executable, script_path],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"✅ {migration_file} completed successfully")
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
return True
|
||||
else:
|
||||
print(f"❌ {migration_file} failed with return code {result.returncode}")
|
||||
if result.stderr:
|
||||
print(f"Error output: {result.stderr}")
|
||||
if result.stdout:
|
||||
print(f"Standard output: {result.stdout}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error running {migration_file}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all PostgreSQL migrations"""
|
||||
print("=== PostgreSQL Database Migrations ===")
|
||||
print(f"Running {len(POSTGRES_MIGRATIONS)} migrations...")
|
||||
|
||||
# Load migration state
|
||||
state = load_migration_state()
|
||||
|
||||
success_count = 0
|
||||
failed_count = 0
|
||||
skipped_count = 0
|
||||
|
||||
for migration in POSTGRES_MIGRATIONS:
|
||||
# Check if migration has already been run successfully
|
||||
if state.get(migration, {}).get('status') == 'success':
|
||||
print(f"\n⏭️ Skipping {migration} (already completed)")
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
# Run the migration
|
||||
success = run_migration(migration)
|
||||
|
||||
# Update state
|
||||
state[migration] = {
|
||||
'status': 'success' if success else 'failed',
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'attempts': state.get(migration, {}).get('attempts', 0) + 1
|
||||
}
|
||||
|
||||
if success:
|
||||
success_count += 1
|
||||
else:
|
||||
failed_count += 1
|
||||
|
||||
# Save state after each migration
|
||||
save_migration_state(state)
|
||||
|
||||
# Summary
|
||||
print("\n" + "="*50)
|
||||
print("PostgreSQL Migration Summary:")
|
||||
print(f"✅ Successful: {success_count}")
|
||||
print(f"❌ Failed: {failed_count}")
|
||||
print(f"⏭️ Skipped: {skipped_count}")
|
||||
print(f"📊 Total: {len(POSTGRES_MIGRATIONS)}")
|
||||
|
||||
if failed_count > 0:
|
||||
print("\n⚠️ Some migrations failed. Check the logs above for details.")
|
||||
return 1
|
||||
else:
|
||||
print("\n✨ All PostgreSQL migrations completed successfully!")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user