"""
US Stock Analysis Error Logger Module
Centralized error logging for US SEC EDGAR Analysis System
Logs to both file (/tmp/us_stock_errors.log) and SQLite database
"""

import logging
import sqlite3
import traceback
import os
from datetime import datetime, timedelta
from logging.handlers import RotatingFileHandler
from typing import Optional, Dict, Any

# Configuration
LOG_FILE = '/tmp/us_stock_errors.log'
CRON_LOG_FILE = '/tmp/us_stock_cron.log'
DB_PATH = '/home/mcmarketshost/public_html/us_stock_analysis.db'
MAX_LOG_SIZE = 5 * 1024 * 1024  # 5 MB
BACKUP_COUNT = 3  # Keep 3 backup files

# Create logger
logger = logging.getLogger('us_stock_analysis')
logger.setLevel(logging.DEBUG)

# File handler with rotation
file_handler = RotatingFileHandler(
    LOG_FILE,
    maxBytes=MAX_LOG_SIZE,
    backupCount=BACKUP_COUNT
)
file_handler.setLevel(logging.DEBUG)

# Console handler for critical errors
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)

# Formatter
formatter = logging.Formatter(
    '%(asctime)s | %(levelname)s | %(name)s | %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)

# Add handlers
if not logger.handlers:
    logger.addHandler(file_handler)
    logger.addHandler(console_handler)


def _get_db_conn():
    """Get database connection"""
    conn = sqlite3.connect(DB_PATH)
    conn.row_factory = sqlite3.Row
    return conn


def log_to_db(level, source, message, ticker=None, cik=None, error_type=None,
              traceback_info=None, request_url=None, response_code=None, extra_data=None):
    """Log error to SQLite database"""
    try:
        conn = _get_db_conn()
        cursor = conn.cursor()
        cursor.execute('''
            INSERT INTO us_error_log
            (timestamp, level, source, ticker, cik, error_type, message,
             traceback_info, request_url, response_code, extra_data)
            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
        ''', (
            datetime.now().isoformat(),
            level,
            source,
            ticker,
            cik,
            error_type,
            message[:2000] if message else None,
            traceback_info[:5000] if traceback_info else None,
            request_url[:500] if request_url else None,
            response_code,
            extra_data[:2000] if extra_data else None
        ))
        conn.commit()
        conn.close()
        return True
    except Exception as e:
        logger.error("Failed to log to database: {}".format(str(e)))
        return False


def log_error(source, message, ticker=None, cik=None, error_type=None, exception=None, extra_data=None):
    """Log an error with full details"""
    tb_info = None
    if exception:
        tb_info = traceback.format_exc()
        error_type = error_type or type(exception).__name__

    # Log to file
    log_msg = "{} | {} | {}".format(source, ticker or 'N/A', message)
    logger.error(log_msg)

    # Log to database
    log_to_db(
        level='ERROR',
        source=source,
        message=message,
        ticker=ticker,
        cik=cik,
        error_type=error_type,
        traceback_info=tb_info,
        extra_data=extra_data
    )


def log_warning(source, message, ticker=None, cik=None, extra_data=None):
    """Log a warning"""
    log_msg = "{} | {} | {}".format(source, ticker or 'N/A', message)
    logger.warning(log_msg)

    log_to_db(
        level='WARNING',
        source=source,
        message=message,
        ticker=ticker,
        cik=cik,
        extra_data=extra_data
    )


def log_info(source, message, ticker=None, cik=None, extra_data=None):
    """Log an info message"""
    log_msg = "{} | {} | {}".format(source, ticker or 'N/A', message)
    logger.info(log_msg)

    log_to_db(
        level='INFO',
        source=source,
        message=message,
        ticker=ticker,
        cik=cik,
        extra_data=extra_data
    )


def log_debug(source, message, ticker=None):
    """Log a debug message (file only, not database)"""
    log_msg = "{} | {} | {}".format(source, ticker or 'N/A', message)
    logger.debug(log_msg)


def log_api_error(source, url, response_code, message, ticker=None, cik=None):
    """Log an API error with URL and response code"""
    log_msg = "{} | {} | HTTP {} | {} | {}".format(source, ticker or 'N/A', response_code, url, message)
    logger.error(log_msg)

    log_to_db(
        level='ERROR',
        source=source,
        message=message,
        ticker=ticker,
        cik=cik,
        error_type='APIError',
        request_url=url,
        response_code=response_code
    )


def log_claude_error(ticker, error_message, prompt_snippet=None):
    """Log a Claude API error"""
    log_msg = "claude | {} | {}".format(ticker, error_message)
    logger.error(log_msg)

    log_to_db(
        level='ERROR',
        source='claude',
        message=error_message,
        ticker=ticker,
        error_type='ClaudeError',
        extra_data=prompt_snippet[:500] if prompt_snippet else None
    )


def log_cron_start(run_id, ticker):
    """Log cron job start"""
    try:
        conn = _get_db_conn()
        cursor = conn.cursor()
        cursor.execute('''
            INSERT INTO us_cron_log (run_id, started_at, status, ticker_processed)
            VALUES (?, ?, 'running', ?)
        ''', (run_id, datetime.now().isoformat(), ticker))
        conn.commit()
        conn.close()

        log_info('cron', 'Started processing {}'.format(ticker), ticker=ticker)
        return True
    except Exception as e:
        logger.error("Failed to log cron start: {}".format(str(e)))
        return False


def log_cron_end(run_id, success=True, error_message=None):
    """Log cron job completion"""
    try:
        conn = _get_db_conn()
        cursor = conn.cursor()
        cursor.execute('''
            UPDATE us_cron_log
            SET ended_at = ?, status = ?, success = ?, failed = ?, error_message = ?
            WHERE run_id = ?
        ''', (
            datetime.now().isoformat(),
            'completed' if success else 'failed',
            1 if success else 0,
            0 if success else 1,
            error_message,
            run_id
        ))
        conn.commit()
        conn.close()
        return True
    except Exception as e:
        logger.error("Failed to log cron end: {}".format(str(e)))
        return False


def get_recent_errors(limit=100, level=None, source=None, hours=24):
    """Get recent errors from database"""
    try:
        conn = _get_db_conn()
        cursor = conn.cursor()

        since = (datetime.now() - timedelta(hours=hours)).isoformat()

        query = "SELECT * FROM us_error_log WHERE timestamp > ?"
        params = [since]

        if level:
            query += " AND level = ?"
            params.append(level)
        if source:
            query += " AND source = ?"
            params.append(source)

        query += " ORDER BY timestamp DESC LIMIT ?"
        params.append(limit)

        cursor.execute(query, params)
        rows = cursor.fetchall()
        conn.close()

        return [dict(row) for row in rows]
    except Exception as e:
        logger.error("Failed to get recent errors: {}".format(str(e)))
        return []


def get_error_stats(hours=24):
    """Get error statistics"""
    try:
        conn = _get_db_conn()
        cursor = conn.cursor()

        since = (datetime.now() - timedelta(hours=hours)).isoformat()

        # Count by level
        cursor.execute('''
            SELECT level, COUNT(*) as count
            FROM us_error_log
            WHERE timestamp > ?
            GROUP BY level
        ''', (since,))
        by_level = {row['level']: row['count'] for row in cursor.fetchall()}

        # Count by source
        cursor.execute('''
            SELECT source, COUNT(*) as count
            FROM us_error_log
            WHERE timestamp > ?
            GROUP BY source
        ''', (since,))
        by_source = {row['source']: row['count'] for row in cursor.fetchall()}

        # Count by error type
        cursor.execute('''
            SELECT error_type, COUNT(*) as count
            FROM us_error_log
            WHERE timestamp > ? AND error_type IS NOT NULL
            GROUP BY error_type
        ''', (since,))
        by_type = {row['error_type']: row['count'] for row in cursor.fetchall()}

        # Total count
        cursor.execute('''
            SELECT COUNT(*) as total FROM us_error_log WHERE timestamp > ?
        ''', (since,))
        total = cursor.fetchone()['total']

        conn.close()

        return {
            'total': total,
            'by_level': by_level,
            'by_source': by_source,
            'by_type': by_type,
            'period_hours': hours
        }
    except Exception as e:
        logger.error("Failed to get error stats: {}".format(str(e)))
        return {}


def cleanup_old_logs(days=30):
    """Delete logs older than specified days"""
    try:
        conn = _get_db_conn()
        cursor = conn.cursor()

        cutoff = (datetime.now() - timedelta(days=days)).isoformat()

        cursor.execute('DELETE FROM us_error_log WHERE timestamp < ?', (cutoff,))
        deleted_errors = cursor.rowcount

        cursor.execute('DELETE FROM us_cron_log WHERE started_at < ?', (cutoff,))
        deleted_cron = cursor.rowcount

        conn.commit()
        conn.close()

        log_info('cleanup', 'Deleted {} error logs and {} cron logs older than {} days'.format(
            deleted_errors, deleted_cron, days))

        return {'deleted_errors': deleted_errors, 'deleted_cron': deleted_cron}
    except Exception as e:
        logger.error("Failed to cleanup old logs: {}".format(str(e)))
        return {}


# Initialize on import
if __name__ == '__main__':
    print("US Stock Analysis Error Logger")
    print("Log file: {}".format(LOG_FILE))
    print("Database: {}".format(DB_PATH))

    # Test logging
    log_info('test', 'Logger initialized successfully')
    print("\nRecent errors:")
    for err in get_recent_errors(limit=5):
        print("  {} | {} | {}".format(err['timestamp'], err['level'], err['message'][:50]))
