"""
Comprehensive Data Processing Module

This module provides utilities for processing, transforming, and analyzing
large datasets with a focus on performance and scalability.
"""

import json
import csv
from typing import Dict, List, Tuple, Optional, Any
from dataclasses import dataclass, field
from datetime import datetime
import logging

logger = logging.getLogger(__name__)


@dataclass
class DataPoint:
    """Represents a single data point with metadata."""
    id: int
    timestamp: datetime
    value: float
    tags: List[str] = field(default_factory=list)
    metadata: Dict[str, Any] = field(default_factory=dict)

    def to_dict(self) -> Dict[str, Any]:
        """Convert data point to dictionary."""
        return {
            'id': self.id,
            'timestamp': self.timestamp.isoformat(),
            'value': self.value,
            'tags': self.tags,
            'metadata': self.metadata
        }


class DataProcessor:
    """Main processor for handling data transformations."""

    def __init__(self, batch_size: int = 1000):
        """Initialize processor with batch size."""
        self.batch_size = batch_size
        self.data: List[DataPoint] = []
        self.processed_count = 0

    def add_data_point(self, point: DataPoint) -> None:
        """Add a single data point to the processor."""
        self.data.append(point)
        logger.debug(f"Added data point {point.id}")

    def add_batch(self, points: List[DataPoint]) -> None:
        """Add multiple data points at once."""
        self.data.extend(points)
        logger.info(f"Added batch of {len(points)} points")

    def filter_by_value(self, min_val: float, max_val: float) -> List[DataPoint]:
        """Filter data points by value range."""
        filtered = [p for p in self.data if min_val <= p.value <= max_val]
        logger.debug(f"Filtered to {len(filtered)} points")
        return filtered

    def filter_by_tag(self, tag: str) -> List[DataPoint]:
        """Filter data points by tag."""
        filtered = [p for p in self.data if tag in p.tags]
        return filtered

    def sort_by_timestamp(self, reverse: bool = False) -> List[DataPoint]:
        """Sort data points by timestamp."""
        return sorted(self.data, key=lambda p: p.timestamp, reverse=reverse)

    def sort_by_value(self, reverse: bool = False) -> List[DataPoint]:
        """Sort data points by value."""
        return sorted(self.data, key=lambda p: p.value, reverse=reverse)

    def calculate_statistics(self) -> Dict[str, float]:
        """Calculate basic statistics for values."""
        if not self.data:
            return {}
        values = [p.value for p in self.data]
        return {
            'mean': sum(values) / len(values),
            'min': min(values),
            'max': max(values),
            'count': len(values),
            'sum': sum(values)
        }

    def group_by_tag(self) -> Dict[str, List[DataPoint]]:
        """Group data points by tags."""
        groups: Dict[str, List[DataPoint]] = {}
        for point in self.data:
            for tag in point.tags:
                if tag not in groups:
                    groups[tag] = []
                groups[tag].append(point)
        return groups

    def export_to_json(self, filename: str) -> None:
        """Export data to JSON file."""
        data_dicts = [p.to_dict() for p in self.data]
        with open(filename, 'w') as f:
            json.dump(data_dicts, f, indent=2)
        logger.info(f"Exported {len(self.data)} points to {filename}")

    def export_to_csv(self, filename: str) -> None:
        """Export data to CSV file."""
        with open(filename, 'w', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=['id', 'timestamp', 'value', 'tags'])
            writer.writeheader()
            for point in self.data:
                writer.writerow({
                    'id': point.id,
                    'timestamp': point.timestamp,
                    'value': point.value,
                    'tags': ','.join(point.tags)
                })
        logger.info(f"Exported {len(self.data)} points to {filename}")


class AdvancedAnalyzer:
    """Advanced analysis operations on processed data."""

    def __init__(self, processor: DataProcessor):
        """Initialize analyzer with a processor instance."""
        self.processor = processor

    def find_outliers(self, std_dev_threshold: float = 2.0) -> List[DataPoint]:
        """Identify outlier data points."""
        stats = self.processor.calculate_statistics()
        mean = stats.get('mean', 0)
        values = [p.value for p in self.processor.data]

        if not values:
            return []

        variance = sum((v - mean) ** 2 for v in values) / len(values)
        std_dev = variance ** 0.5

        outliers = [p for p in self.processor.data 
                   if abs(p.value - mean) > std_dev_threshold * std_dev]
        return outliers

    def detect_trends(self, window_size: int = 10) -> List[Tuple[int, str]]:
        """Detect uptrend or downtrend in data."""
        sorted_data = self.processor.sort_by_timestamp()
        trends = []

        for i in range(len(sorted_data) - window_size):
            window = sorted_data[i:i + window_size]
            values = [p.value for p in window]

            if all(values[j] <= values[j + 1] for j in range(len(values) - 1)):
                trends.append((i, 'uptrend'))
            elif all(values[j] >= values[j + 1] for j in range(len(values) - 1)):
                trends.append((i, 'downtrend'))

        return trends

    def correlate_tags(self) -> Dict[Tuple[str, str], float]:
        """Calculate tag correlation."""
        groups = self.processor.group_by_tag()
        correlations = {}
        tags = list(groups.keys())

        for i, tag1 in enumerate(tags):
            for tag2 in tags[i + 1:]:
                shared = len(set(groups[tag1]) & set(groups[tag2]))
                total = len(set(groups[tag1]) | set(groups[tag2]))
                if total > 0:
                    correlations[(tag1, tag2)] = shared / total

        return correlations


def process_data_pipeline(input_file: str, output_file: str) -> DataProcessor:
    """Main pipeline for processing data from input to output."""
    processor = DataProcessor()

    try:
        with open(input_file, 'r') as f:
            data = json.load(f)

        for item in data:
            point = DataPoint(
                id=item['id'],
                timestamp=datetime.fromisoformat(item['timestamp']),
                value=item['value'],
                tags=item.get('tags', []),
                metadata=item.get('metadata', {})
            )
            processor.add_data_point(point)

        processor.export_to_json(output_file)
        logger.info("Pipeline completed successfully")

    except Exception as e:
        logger.error(f"Pipeline error: {e}")
        raise

    return processor


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)

    processor = DataProcessor(batch_size=500)

    sample_points = [
        DataPoint(
            id=i,
            timestamp=datetime.now(),
            value=float(i * 1.5),
            tags=['sample', 'test'] if i % 2 == 0 else ['production'],
            metadata={'source': 'test', 'batch': i // 100}
        )
        for i in range(1000)
    ]

    processor.add_batch(sample_points)

    stats = processor.calculate_statistics()
    print(f"Statistics: {stats}")

    analyzer = AdvancedAnalyzer(processor)
    outliers = analyzer.find_outliers()
    print(f"Found {len(outliers)} outliers")
