import datetime
import time
import random
import logging
import os

class PCDROrchestrator:
    """
    The PCDR (Probe, Comprehend, Document, Repeat) Orchestrator
    autonomously manages daily operational cycles.
    """

    def __init__(self, log_dir="pcdr_logs", log_file_prefix="pcdr_report"):
        """
        Initializes the PCDR Orchestrator.

        Args:
            log_dir (str): Directory to store documentation logs.
            log_file_prefix (str): Prefix for documentation log filenames.
        """
        self.log_dir = log_dir
        self.log_file_prefix = log_file_prefix
        self._setup_logging()
        self.cycle_count = 0
        self.known_targets = {
            "service_web_api": {"type": "API", "endpoint": "https://api.example.com"},
            "database_main": {"type": "DB", "endpoint": "tcp://db.example.com:5432"},
            "message_queue_prod": {"type": "Queue", "endpoint": "amqp://mq.example.com"},
        }
        # Tracks all targets ever discovered to simulate growth
        self.discovered_targets_history = set(self.known_targets.keys())

        self.logger.info("PCDR Orchestrator initialized.")

    def _setup_logging(self):
        """Sets up the logger for documentation."""
        os.makedirs(self.log_dir, exist_ok=True)
        log_filename = f"{self.log_file_prefix}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
        log_filepath = os.path.join(self.log_dir, log_filename)

        self.logger = logging.getLogger("PCDROrchestrator")
        self.logger.setLevel(logging.INFO)
        
        # Prevent adding multiple handlers if called more than once
        if not self.logger.handlers:
            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

            # Console handler for real-time feedback
            ch = logging.StreamHandler()
            ch.setLevel(logging.INFO)
            ch.setFormatter(formatter)
            self.logger.addHandler(ch)

            # File handler for permanent documentation
            fh = logging.FileHandler(log_filepath)
            fh.setLevel(logging.INFO)
            fh.setFormatter(formatter)
            self.logger.addHandler(fh)
            self.logger.info(f"Documentation will be saved to: {log_filepath}")

    def _discover_targets(self):
        """
        Simulates auto-discovery of targets.
        Periodically "discovers" new mock targets.
        """
        current_targets = list(self.known_targets.keys())

        # Simulate discovering a new target every 3 cycles
        if self.cycle_count % 3 == 0 and self.cycle_count > 0:
            new_target_name = f"service_new_discovery_{self.cycle_count // 3}"
            if new_target_name not in self.discovered_targets_history:
                new_target_data = {
                    "type": random.choice(["API", "DB", "Cache", "Microservice"]),
                    "endpoint": f"http://{new_target_name}.example.com:8080"
                }
                self.known_targets[new_target_name] = new_target_data
                self.discovered_targets_history.add(new_target_name)
                self.logger.info(f"Discovered new target: {new_target_name} ({new_target_data['type']})")
                current_targets.append(new_target_name)
        
        self.logger.debug(f"Targets discovered for probing: {current_targets}")
        return current_targets

    def _probe_target(self, target_name):
        """
        Simulates probing a single target.
        Returns mock status and metrics.
        """
        target_info = self.known_targets.get(target_name, {"type": "Unknown", "endpoint": "N/A"})
        
        status_options = ["healthy", "degraded", "unreachable"]
        # Make 'unreachable' less common
        status = random.choices(status_options, weights=[0.8, 0.15, 0.05], k=1)[0]
        
        mock_data = {
            "latency_ms": random.randint(10, 500) if status != "unreachable" else None,
            "error_rate_percent": round(random.uniform(0, 5), 2) if status != "unreachable" else None,
            "cpu_util_percent": random.randint(10, 90) if target_info["type"] in ["API", "Microservice"] and status != "unreachable" else None,
            "db_connections": random.randint(5, 100) if target_info["type"] == "DB" and status != "unreachable" else None,
        }
        
        probe_result = {
            "target": target_name,
            "type": target_info["type"],
            "endpoint": target_info["endpoint"],
            "status": status,
            "timestamp": datetime.datetime.now().isoformat(),
            "metrics": mock_data
        }
        self.logger.debug(f"Probed {target_name}: {status}")
        return probe_result

    def _comprehend_findings(self, probe_results):
        """
        Simulates comprehending the probe results to identify issues and insights.
        """
        summary = {
            "total_targets_probed": len(probe_results),
            "healthy_count": 0,
            "degraded_count": 0,
            "unreachable_count": 0,
            "average_latency_ms": 0,
            "average_error_rate_percent": 0,
        }
        anomalies = []
        recommendations = []
        
        total_latency = 0
        total_error_rate = 0
        healthy_probes_count = 0

        for result in probe_results:
            summary[f"{result['status']}_count"] += 1
            
            if result["status"] == "healthy":
                healthy_probes_count += 1
                if result["metrics"]["latency_ms"] is not None:
                    total_latency += result["metrics"]["latency_ms"]
                if result["metrics"]["error_rate_percent"] is not None:
                    total_error_rate += result["metrics"]["error_rate_percent"]
            elif result["status"] == "degraded":
                anomalies.append(f"Target '{result['target']}' is DEGRADED. Metrics: {result['metrics']}")
                recommendations.append(f"Investigate '{result['target']}' for performance issues.")
            elif result["status"] == "unreachable":
                anomalies.append(f"Target '{result['target']}' is UNREACHABLE. Immediate attention required.")
                recommendations.append(f"Urgent: Check network/service status for '{result['target']}'.")
            
            if result["metrics"]["error_rate_percent"] is not None and result["metrics"]["error_rate_percent"] > 2:
                 anomalies.append(f"Target '{result['target']}' has high error rate: {result['metrics']['error_rate_percent']}%")
                 recommendations.append(f"Review recent deployments or traffic patterns for '{result['target']}'.")

        if healthy_probes_count > 0:
            summary["average_latency_ms"] = round(total_latency / healthy_probes_count, 2)
            summary["average_error_rate_percent"] = round(total_error_rate / healthy_probes_count, 2)
        
        comprehension_output = {
            "cycle": self.cycle_count,
            "timestamp": datetime.datetime.now().isoformat(),
            "summary": summary,
            "anomalies_detected": anomalies if anomalies else ["No significant anomalies detected."],
            "recommendations": recommendations if recommendations else ["System operating normally. No specific recommendations."],
            "raw_probe_results_count": len(probe_results) # For brevity, not including all raw results in final doc
        }
        self.logger.debug(f"Comprehension complete for cycle {self.cycle_count}")
        return comprehension_output

    def _document_findings(self, comprehension_results):
        """
        Documents the comprehension results using the configured logger.
        This fulfills the auto-documentation requirement.
        """
        self.logger.info("\n--- PCDR Cycle Documentation ---")
        self.logger.info(f"Cycle: {comprehension_results['cycle']}")
        self.logger.info(f"Timestamp: {comprehension_results['timestamp']}")
        self.logger.info("Summary:")
        for key, value in comprehension_results["summary"].items():
            self.logger.info(f"  {key}: {value}")
        self.logger.info("Anomalies Detected:")
        for anomaly in comprehension_results["anomalies_detected"]:
            self.logger.info(f"  - {anomaly}")
        self.logger.info("Recommendations:")
        for recommendation in comprehension_results["recommendations"]:
            self.logger.info(f"  - {recommendation}")
        self.logger.info("--- End of Documentation ---\n")

    def run_cycle(self):
        """
        Executes a single full PCDR cycle: Probe, Comprehend, Document.
        """
        self.cycle_count += 1
        self.logger.info(f"\n--- Starting PCDR Cycle {self.cycle_count} ---")

        # 1. Probe: Auto-discover targets and probe them
        targets_to_probe = self._discover_targets()
        all_probe_results = []
        for target in targets_to_probe:
            try:
                result = self._probe_target(target)
                all_probe_results.append(result)
            except Exception as e:
                self.logger.error(f"Error probing target {target}: {e}")
                all_probe_results.append({"target": target, "status": "error", "error": str(e)})

        # 2. Comprehend: Analyze the findings
        comprehension = self._comprehend_findings(all_probe_results)

        # 3. Document: Save the insights
        self._document_findings(comprehension)

        self.logger.info(f"--- PCDR Cycle {self.cycle_count} Completed ---\n")
        return comprehension # Return for potential external use/testing

    def start(self, num_cycles=5, interval_seconds=5):
        """
        Starts the autonomous PCDR loop.

        Args:
            num_cycles (int): The number of cycles to run.
            interval_seconds (int): The delay between cycles in seconds.
        """
        self.logger.info(f"PCDR Orchestrator starting autonomous loop for {num_cycles} cycles, with {interval_seconds}s interval.")
        for i in range(num_cycles):
            self.run_cycle()
            if i < num_cycles - 1: # Don't sleep after the last cycle
                self.logger.info(f"Waiting {interval_seconds} seconds before next cycle...")
                time.sleep(interval_seconds)
        self.logger.info("PCDR Orchestrator autonomous loop finished.")

# Example of how to run this
if __name__ == "__main__":
    # It's good practice to clear handlers if running multiple orchestrator instances
    # or re-running main in an interactive environment.
    # For a simple script execution, it's often not strictly necessary.
    # However, to avoid duplicate log entries if the script is run multiple times
    # without a full interpreter reset, this can be helpful.
    # for handler in logging.getLogger("PCDROrchestrator").handlers[:]:
    #     logging.getLogger("PCDROrchestrator").removeHandler(handler)
    #     handler.close()

    # Create a temporary directory for logs for demonstration
    temp_log_dir = "temp_pcdr_logs"
    if not os.path.exists(temp_log_dir):
        os.makedirs(temp_log_dir)

    orchestrator = PCDROrchestrator(log_dir=temp_log_dir)
    orchestrator.start(num_cycles=3, interval_seconds=2) # Run 3 cycles, 2 seconds apart

    # Optional: Clean up temporary log directory after demonstration
    # import shutil
    # print(f"\nReview logs in '{temp_log_dir}' before cleanup.")
    # # time.sleep(5) # Give time to review
    # shutil.rmtree(temp_log_dir)
    # print(f"Cleaned up '{temp_log_dir}'")
