import collections
from typing import List, Dict, Any

class ConvergenceEngine:
    """
    The ConvergenceEngine facilitates multi-model consensus on complex decisions
    for Queen AIVA, ensuring robust and unified intelligence.
    """

    def __init__(self, min_agreement_threshold: int = 3):
        """
        Initializes the ConvergenceEngine with a specified minimum agreement threshold.

        Args:
            min_agreement_threshold (int): The minimum number of models that must
                                           agree on a decision for it to be considered
                                           a potential consensus candidate.
        """
        if min_agreement_threshold < 1:
            raise ValueError("Minimum agreement threshold must be at least 1.")
        self.min_agreement_threshold = min_agreement_threshold

    def achieve_consensus(self, model_decisions: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        Analyzes outputs from multiple models to achieve consensus on a complex decision.

        Args:
            model_decisions (List[Dict[str, Any]]): A list of dictionaries, where each
                                                    dictionary represents a model's output.
                                                    Each dict MUST contain:
                                                    - 'model_id' (str): Unique identifier for the model.
                                                    - 'decision' (Any): The decision proposed by the model.
                                                    - 'confidence' (float): A confidence score (0.0 to 1.0)
                                                                            for the decision.

        Returns:
            Dict[str, Any]: A comprehensive report detailing the consensus status,
                            the best decision (if found), disagreement analysis,
                            and tie-breaking details.
        """
        if not model_decisions:
            return {
                "consensus_achieved": False,
                "best_decision": None,
                "agreement_count": 0,
                "average_confidence": 0.0,
                "supporting_models": [],
                "disagreement_analysis": {
                    "dissenting_decisions": [],
                    "total_dissenting_models": 0
                },
                "tie_broken": False,
                "tie_details": "No model decisions provided."
            }

        decision_tally = collections.defaultdict(lambda: {
            "count": 0,
            "total_confidence": 0.0,
            "model_ids": []
        })

        all_model_ids = set()
        for model_output in model_decisions:
            model_id = model_output.get('model_id')
            decision = model_output.get('decision')
            confidence = model_output.get('confidence', 0.0) # Default to 0 if not provided

            if model_id is None or decision is None:
                # Skip malformed entries, or log a warning in a real system
                continue
            if not isinstance(confidence, (int, float)) or not (0.0 <= confidence <= 1.0):
                confidence = 0.0 # Sanitize confidence

            all_model_ids.add(model_id)
            decision_tally[decision]["count"] += 1
            decision_tally[decision]["total_confidence"] += confidence
            decision_tally[decision]["model_ids"].append(model_id)

        # Prepare candidates for consensus, filtering by minimum agreement threshold
        consensus_candidates = []
        for decision, data in decision_tally.items():
            if data["count"] >= self.min_agreement_threshold:
                avg_confidence = data["total_confidence"] / data["count"] if data["count"] > 0 else 0.0
                consensus_candidates.append({
                    "decision": decision,
                    "count": data["count"],
                    "total_confidence": data["total_confidence"],
                    "average_confidence": avg_confidence,
                    "supporting_models": sorted(data["model_ids"])
                })

        # Sort candidates: primary by count (desc), secondary by average_confidence (desc)
        # This handles tie-breaking for highest agreement counts.
        consensus_candidates.sort(key=lambda x: (x["count"], x["average_confidence"]), reverse=True)

        best_decision_info = None
        consensus_achieved = False
        tie_broken = False
        tie_details = "No consensus candidates met the minimum agreement threshold."

        if consensus_candidates:
            best_decision_info = consensus_candidates[0]
            consensus_achieved = True

            # Check for ties among the top candidates
            top_count = best_decision_info["count"]
            tied_candidates = [c for c in consensus_candidates if c["count"] == top_count]

            if len(tied_candidates) > 1:
                # If there's a tie in count, sort them by average confidence to pick the best
                tied_candidates.sort(key=lambda x: x["average_confidence"], reverse=True)
                best_decision_info = tied_candidates[0] # The one with highest avg confidence

                # Further check if there's still a tie in avg confidence among the top ones
                final_tied_candidates = [c for c in tied_candidates if c["average_confidence"] == best_decision_info["average_confidence"]]

                if len(final_tied_candidates) > 1:
                    # If still tied by count and avg confidence, pick the one that appeared first (stable sort/deterministic)
                    # For true determinism across runs, we could sort by decision string if it's comparable.
                    # Here, the sort order ensures a consistent choice from the original list.
                    best_decision_info = final_tied_candidates[0]
                    tie_broken = True
                    tie_details = f"Tie for {top_count} agreements broken by highest average confidence. If still tied, selected deterministically (e.g., first encountered)."
                else:
                    tie_broken = True
                    tie_details = f"Tie for {top_count} agreements broken by highest average confidence."
            else:
                tie_details = f"Clear consensus with {top_count} agreements."

        # Disagreement Analysis
        dissenting_decisions = []
        total_dissenting_models = 0
        if best_decision_info:
            for decision, data in decision_tally.items():
                if decision != best_decision_info["decision"]:
                    avg_confidence = data["total_confidence"] / data["count"] if data["count"] > 0 else 0.0
                    dissenting_decisions.append({
                        "decision": decision,
                        "count": data["count"],
                        "average_confidence": avg_confidence,
                        "supporting_models": sorted(data["model_ids"])
                    })
                    total_dissenting_models += data["count"]
        else:
            # If no consensus, all decisions are 'dissenting' from a non-existent consensus
            for decision, data in decision_tally.items():
                avg_confidence = data["total_confidence"] / data["count"] if data["count"] > 0 else 0.0
                dissenting_decisions.append({
                    "decision": decision,
                    "count": data["count"],
                    "average_confidence": avg_confidence,
                    "supporting_models": sorted(data["model_ids"])
                })
                total_dissenting_models += data["count"]

        # Sort dissenting decisions for consistent output
        dissenting_decisions.sort(key=lambda x: (x["count"], x["average_confidence"]), reverse=True)

        return {
            "consensus_achieved": consensus_achieved,
            "best_decision": best_decision_info["decision"] if best_decision_info else None,
            "agreement_count": best_decision_info["count"] if best_decision_info else 0,
            "average_confidence": best_decision_info["average_confidence"] if best_decision_info else 0.0,
            "supporting_models": best_decision_info["supporting_models"] if best_decision_info else [],
            "disagreement_analysis": {
                "dissenting_decisions": dissenting_decisions,
                "total_dissenting_models": total_dissenting_models
            },
            "tie_broken": tie_broken,
            "tie_details": tie_details
        }

# Example Usage (for testing and demonstration)
if __name__ == "__main__":
    engine = ConvergenceEngine(min_agreement_threshold=3)

    print("\n--- Scenario 1: Clear Consensus (3 models agree) ---")
    model_outputs_1 = [
        {"model_id": "M1", "decision": "Option A", "confidence": 0.8},
        {"model_id": "M2", "decision": "Option B", "confidence": 0.7},
        {"model_id": "M3", "decision": "Option A", "confidence": 0.9},
        {"model_id": "M4", "decision": "Option C", "confidence": 0.6},
        {"model_id": "M5", "decision": "Option A", "confidence": 0.85}
    ]
    result_1 = engine.achieve_consensus(model_outputs_1)
    print(result_1)
    assert result_1["consensus_achieved"] is True
    assert result_1["best_decision"] == "Option A"
    assert result_1["agreement_count"] == 3
    assert "Clear consensus" in result_1["tie_details"]
    assert len(result_1["disagreement_analysis"]["dissenting_decisions"]) == 2

    print("\n--- Scenario 2: Tie-breaking by Confidence (4 models, 2 for A, 2 for B) ---")
    model_outputs_2 = [
        {"model_id": "M1", "decision": "Option A", "confidence": 0.9},
        {"model_id": "M2", "decision": "Option B", "confidence": 0.85},
        {"model_id": "M3", "decision": "Option A", "confidence": 0.7},
        {"model_id": "M4", "decision": "Option B", "confidence": 0.95},
        {"model_id": "M5", "decision": "Option C", "confidence": 0.99}, # Dissenting, but high confidence
        {"model_id": "M6", "decision": "Option B", "confidence": 0.92} # This makes B the consensus
    ]
    result_2 = engine.achieve_consensus(model_outputs_2)
    print(result_2)
    assert result_2["consensus_achieved"] is True
    assert result_2["best_decision"] == "Option B"
    assert result_2["agreement_count"] == 3
    assert result_2["tie_broken"] is False # No tie for top count
    assert "Clear consensus" in result_2["tie_details"]
    assert len(result_2["disagreement_analysis"]["dissenting_decisions"]) == 2

    print("\n--- Scenario 3: No Consensus (less than 3 models agree) ---")
    model_outputs_3 = [
        {"model_id": "M1", "decision": "Option A", "confidence": 0.8},
        {"model_id": "M2", "decision": "Option B", "confidence": 0.7},
        {"model_id": "M3", "decision": "Option C", "confidence": 0.9}
    ]
    result_3 = engine.achieve_consensus(model_outputs_3)
    print(result_3)
    assert result_3["consensus_achieved"] is False
    assert result_3["best_decision"] is None
    assert result_3["agreement_count"] == 0
    assert "No consensus candidates" in result_3["tie_details"]
    assert len(result_3["disagreement_analysis"]["dissenting_decisions"]) == 3 # All are dissenting

    print("\n--- Scenario 4: Tie in both count and average confidence (needs deterministic tie-break) ---")
    model_outputs_4 = [
        {"model_id": "M1", "decision": "Option X", "confidence": 0.9},
        {"model_id": "M2", "decision": "Option Y", "confidence": 0.9},
        {"model_id": "M3", "decision": "Option X", "confidence": 0.8},
        {"model_id": "M4", "decision": "Option Y", "confidence": 0.8},
        {"model_id": "M5", "decision": "Option X", "confidence": 0.85},
        {"model_id": "M6", "decision": "Option Y", "confidence": 0.85}
    ]
    # Option X: 3 models, avg confidence (0.9 + 0.8 + 0.85)/3 = 0.85
    # Option Y: 3 models, avg confidence (0.9 + 0.8 + 0.85)/3 = 0.85
    # This is a perfect tie. The sorting logic should pick one deterministically.
    result_4 = engine.achieve_consensus(model_outputs_4)
    print(result_4)
    assert result_4["consensus_achieved"] is True
    # The exact decision here depends on the stable sort, but should be consistent
    assert result_4["best_decision"] in ["Option X", "Option Y"]
    assert result_4["agreement_count"] == 3
    assert result_4["tie_broken"] is True
    assert "If still tied, selected deterministically" in result_4["tie_details"]

    print("\n--- Scenario 5: Empty input ---")
    model_outputs_5 = []
    result_5 = engine.achieve_consensus(model_outputs_5)
    print(result_5)
    assert result_5["consensus_achieved"] is False
    assert result_5["best_decision"] is None
    assert result_5["agreement_count"] == 0
    assert "No model decisions provided." in result_5["tie_details"]

    print("\n--- Scenario 6: Single model, below threshold ---")
    model_outputs_6 = [
        {"model_id": "M1", "decision": "Option A", "confidence": 0.8}
    ]
    result_6 = engine.achieve_consensus(model_outputs_6)
    print(result_6)
    assert result_6["consensus_achieved"] is False
    assert result_6["best_decision"] is None
    assert result_6["agreement_count"] == 0
    assert "No consensus candidates" in result_6["tie_details"]
    assert len(result_6["disagreement_analysis"]["dissenting_decisions"]) == 1
