# /mnt/e/genesis-system/AIVA/qwen-unified/qwen_client.py

import requests
import json

class QwenClient:
    def __init__(self, api_key, base_url='https://api.elest.io/v1/qwen'):
        self.api_key = api_key
        self.base_url = base_url
        self.headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }

    def generate_text(self, prompt, model='qwen-turbo', temperature=0.7, top_p=0.9, max_tokens=2048, stream=False):
        """Generates text from a given prompt using the Qwen API.

        Args:
            prompt (str): The prompt to feed the model.
            model (str): The Qwen model to use (default: qwen-turbo).
            temperature (float): Controls randomness (default: 0.7).
            top_p (float): Controls diversity (default: 0.9).
            max_tokens (int): Maximum number of tokens in the response (default: 2048).
            stream (bool): Whether to stream the response (default: False).

        Returns:
            str: The generated text.
        """
        endpoint = f'{self.base_url}/completions'
        data = {
            'model': model,
            'prompt': prompt,
            'temperature': temperature,
            'top_p': top_p,
            'max_tokens': max_tokens,
            'stream': stream  # Added stream parameter
        }

        try:
            response = requests.post(endpoint, headers=self.headers, data=json.dumps(data), stream=stream)
            response.raise_for_status()  # Raise HTTPError for bad responses (4xx or 5xx)
            if stream:
                # Handle streaming response
                for chunk in response.iter_content(chunk_size=None, decode_unicode=True):
                    if chunk:
                        yield chunk
            else:
                # Handle non-streaming response
                response_json = response.json()
                return response_json['choices'][0]['text']

        except requests.exceptions.RequestException as e:
            raise Exception(f'Error communicating with Qwen API: {e}')


    def chat(self, messages, model='qwen-turbo', temperature=0.7, top_p=0.9, max_tokens=2048, stream=False):
        """Engages in a chat conversation with the Qwen API.

        Args:
            messages (list): A list of message dictionaries, where each dictionary has 'role' and 'content' keys.
                             e.g., [{'role': 'user', 'content': 'Hello'}, {'role': 'assistant', 'content': 'Hi'}]
            model (str): The Qwen model to use (default: qwen-turbo).
            temperature (float): Controls randomness (default: 0.7).
            top_p (float): Controls diversity (default: 0.9).
            max_tokens (int): Maximum number of tokens in the response (default: 2048).
            stream (bool): Whether to stream the response (default: False).

        Returns:
            str: The assistant's response.
        """

        endpoint = f'{self.base_url}/chat/completions'
        data = {
            'model': model,
            'messages': messages,
            'temperature': temperature,
            'top_p': top_p,
            'max_tokens': max_tokens,
            'stream': stream #Added stream parameter
        }

        try:
            response = requests.post(endpoint, headers=self.headers, data=json.dumps(data), stream=stream)
            response.raise_for_status()

            if stream:
                for chunk in response.iter_content(chunk_size=None, decode_unicode=True):
                    if chunk:
                        yield chunk

            else:
                response_json = response.json()
                return response_json['choices'][0]['message']['content']

        except requests.exceptions.RequestException as e:
            raise Exception(f'Error communicating with Qwen API: {e}')


if __name__ == '__main__':
    # Example Usage (replace with your actual API key)
    api_key = 'YOUR_API_KEY'
    client = QwenClient(api_key)

    # Example 1: Text Generation
    prompt = 'Write a short poem about the stars.'
    try:
        response = client.generate_text(prompt)
        print(f'Generated Text: {response}')
    except Exception as e:
        print(f'Error: {e}')

    # Example 2: Chat
    messages = [
        {'role': 'user', 'content': 'Hello, who are you?'},
        {'role': 'assistant', 'content': 'I am a large language model.'},
        {'role': 'user', 'content': 'What can you do?'}
    ]
    try:
        response = client.chat(messages)
        print(f'Chat Response: {response}')
    except Exception as e:
        print(f'Error: {e}')

    # Example 3: Streaming chat
    messages = [
        {'role': 'user', 'content': 'Tell me a long story about a brave knight.'}
    ]
    try:
        for chunk in client.chat(messages, stream=True):
            print(chunk, end='', flush=True)
        print()
    except Exception as e:
        print(f'Error: {e}')
