Smart Conversation Management

Effective conversation management is crucial for maintaining context, optimizing token usage, and providing seamless multi-turn interactions with AI models.

Conversation History Management

from openai import OpenAI

class ConversationManager:
    def __init__(self, api_key, base_url, system_prompt=None):
        self.client = OpenAI(api_key=api_key, base_url=base_url)
        self.messages = []
        
        if system_prompt:
            self.messages.append({
                "role": "system", 
                "content": system_prompt
            })
    
    def add_user_message(self, content):
        """Add user message"""
        self.messages.append({
            "role": "user",
            "content": content
        })
    
    def get_ai_response(self, model="gpt-3.5-turbo", **kwargs):
        """Get AI response and add to conversation history"""
        response = self.client.chat.completions.create(
            model=model,
            messages=self.messages,
            **kwargs
        )
        
        ai_message = response.choices[0].message.content
        self.messages.append({
            "role": "assistant",
            "content": ai_message
        })
        
        return ai_message
    
    def clear_history(self, keep_system=True):
        """Clear conversation history"""
        if keep_system and self.messages and self.messages[0]["role"] == "system":
            self.messages = [self.messages[0]]
        else:
            self.messages = []
    
    def get_token_count(self):
        """Estimate token usage"""
        total_chars = sum(len(msg["content"]) for msg in self.messages)
        return total_chars // 4  # Rough estimate, 1 token ≈ 4 characters

# Usage example
conv = ConversationManager(
    api_key="your-api-key",
    base_url="https://ai.machinefi.com/v1",
    system_prompt="You are a Python programming assistant"
)

# Multi-turn conversation
conv.add_user_message("How to read CSV files?")
response1 = conv.get_ai_response()
print(f"AI: {response1}")

conv.add_user_message("What if the CSV file is very large?")
response2 = conv.get_ai_response()
print(f"AI: {response2}")

print(f"Current estimated token usage: {conv.get_token_count()}")

Context Window Management

Advanced Conversation Features

Message Filtering and Summarization

Best Practices

  1. System Prompts: Always use clear, specific system prompts to set context and behavior

  2. Token Management: Monitor and manage token usage to avoid hitting model limits

  3. History Preservation: Keep important context while trimming less relevant messages

  4. Error Recovery: Implement fallback strategies when conversations exceed limits

  5. State Persistence: Consider saving conversation state for session recovery

  6. Memory Optimization: Use efficient data structures for large conversation histories