diff --git a/ldai/client.py b/ldai/client.py index e8b3c1f..8854b4b 100644 --- a/ldai/client.py +++ b/ldai/client.py @@ -125,6 +125,81 @@ def to_dict(self) -> dict: } +@dataclass(frozen=True) +class LDAIAgent: + """ + Represents an AI agent configuration with instructions and model settings. + + An agent is similar to an AIConfig but focuses on instructions rather than messages, + making it suitable for AI assistant/agent use cases. + """ + enabled: Optional[bool] = None + model: Optional[ModelConfig] = None + provider: Optional[ProviderConfig] = None + instructions: Optional[str] = None + tracker: Optional[LDAIConfigTracker] = None + + def to_dict(self) -> Dict[str, Any]: + """ + Render the given agent as a dictionary object. + """ + result: Dict[str, Any] = { + '_ldMeta': { + 'enabled': self.enabled or False, + }, + 'model': self.model.to_dict() if self.model else None, + 'provider': self.provider.to_dict() if self.provider else None, + } + if self.instructions is not None: + result['instructions'] = self.instructions + return result + + +@dataclass(frozen=True) +class LDAIAgentDefaults: + """ + Default values for AI agent configurations. + + Similar to LDAIAgent but without tracker and with optional enabled field, + used as fallback values when agent configurations are not available. + """ + enabled: Optional[bool] = None + model: Optional[ModelConfig] = None + provider: Optional[ProviderConfig] = None + instructions: Optional[str] = None + + def to_dict(self) -> Dict[str, Any]: + """ + Render the given agent defaults as a dictionary object. + """ + result: Dict[str, Any] = { + '_ldMeta': { + 'enabled': self.enabled or False, + }, + 'model': self.model.to_dict() if self.model else None, + 'provider': self.provider.to_dict() if self.provider else None, + } + if self.instructions is not None: + result['instructions'] = self.instructions + return result + + +@dataclass +class LDAIAgentConfig: + """ + Configuration for individual agent in batch requests. + + Combines agent key with its specific default configuration and variables. + """ + key: str + default_value: LDAIAgentDefaults + variables: Optional[Dict[str, Any]] = None + + +# Type alias for multiple agents +LDAIAgents = Dict[str, LDAIAgent] + + class LDAIClient: """The LaunchDarkly AI SDK client object.""" @@ -147,13 +222,144 @@ def config( :param variables: Additional variables for the model configuration. :return: The value of the model configuration along with a tracker used for gathering metrics. """ - variation = self._client.variation(key, context, default_value.to_dict()) + model, provider, messages, instructions, tracker, enabled = self.__evaluate(key, context, default_value.to_dict(), variables) + + config = AIConfig( + enabled=bool(enabled), + model=model, + messages=messages, + provider=provider, + ) + + return config, tracker + + def agent( + self, + config: LDAIAgentConfig, + context: Context, + ) -> LDAIAgent: + """ + Retrieve a single AI Config agent. + + This method retrieves a single agent configuration with instructions + dynamically interpolated using the provided variables and context data. + + Example:: + + agent = client.agent(LDAIAgentConfig( + key='research_agent', + default_value=LDAIAgentDefaults( + enabled=True, + model=ModelConfig('gpt-4'), + instructions="You are a research assistant specializing in {{topic}}." + ), + variables={'topic': 'climate change'} + ), context) + + if agent.enabled: + research_result = agent.instructions # Interpolated instructions + agent.tracker.track_success() + + :param config: The agent configuration to use. + :param context: The context to evaluate the agent configuration in. + :return: Configured LDAIAgent instance. + """ + # Track single agent usage + self._client.track( + "$ld:ai:agent:function:single", + context, + config.key, + 1 + ) + + return self.__evaluate_agent(config.key, context, config.default_value, config.variables) + + def agents( + self, + agent_configs: List[LDAIAgentConfig], + context: Context, + ) -> LDAIAgents: + """ + Retrieve multiple AI agent configurations. + + This method allows you to retrieve multiple agent configurations in a single call, + with each agent having its own default configuration and variables for instruction + interpolation. + + Example:: + + agents = client.agents([ + LDAIAgentConfig( + key='research_agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions='You are a research assistant.' + ), + variables={'topic': 'climate change'} + ), + LDAIAgentConfig( + key='writing_agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions='You are a writing assistant.' + ), + variables={'style': 'academic'} + ) + ], context) + + research_result = agents["research_agent"].instructions + agents["research_agent"].tracker.track_success() + + :param agent_configs: List of agent configurations to retrieve. + :param context: The context to evaluate the agent configurations in. + :return: Dictionary mapping agent keys to their LDAIAgent configurations. + """ + # Track multiple agents usage + agent_count = len(agent_configs) + self._client.track( + "$ld:ai:agent:function:multiple", + context, + agent_count, + agent_count + ) + + result: LDAIAgents = {} + + for config in agent_configs: + agent = self.__evaluate_agent( + config.key, + context, + config.default_value, + config.variables + ) + result[config.key] = agent + + return result + + def __evaluate( + self, + key: str, + context: Context, + default_dict: Dict[str, Any], + variables: Optional[Dict[str, Any]] = None, + ) -> Tuple[Optional[ModelConfig], Optional[ProviderConfig], Optional[List[LDMessage]], Optional[str], LDAIConfigTracker, bool]: + """ + Internal method to evaluate a configuration and extract components. + + :param key: The configuration key. + :param context: The evaluation context. + :param default_dict: Default configuration as dictionary. + :param variables: Variables for interpolation. + :return: Tuple of (model, provider, messages, instructions, tracker, enabled). + """ + variation = self._client.variation(key, context, default_dict) all_variables = {} if variables: all_variables.update(variables) all_variables['ldctx'] = context.to_dict() + # Extract messages messages = None if 'messages' in variation and isinstance(variation['messages'], list) and all( isinstance(entry, dict) for entry in variation['messages'] @@ -168,11 +374,18 @@ def config( for entry in variation['messages'] ] + # Extract instructions + instructions = None + if 'instructions' in variation and isinstance(variation['instructions'], str): + instructions = self.__interpolate_template(variation['instructions'], all_variables) + + # Extract provider config provider_config = None if 'provider' in variation and isinstance(variation['provider'], dict): provider = variation['provider'] provider_config = ProviderConfig(provider.get('name', '')) + # Extract model config model = None if 'model' in variation and isinstance(variation['model'], dict): parameters = variation['model'].get('parameters', None) @@ -183,6 +396,7 @@ def config( custom=custom ) + # Create tracker tracker = LDAIConfigTracker( self._client, variation.get('_ldMeta', {}).get('variationKey', ''), @@ -192,21 +406,46 @@ def config( ) enabled = variation.get('_ldMeta', {}).get('enabled', False) - config = AIConfig( - enabled=bool(enabled), - model=model, - messages=messages, - provider=provider_config, + + return model, provider_config, messages, instructions, tracker, enabled + + def __evaluate_agent( + self, + key: str, + context: Context, + default_value: LDAIAgentDefaults, + variables: Optional[Dict[str, Any]] = None, + ) -> LDAIAgent: + """ + Internal method to evaluate an agent configuration. + + :param key: The agent configuration key. + :param context: The evaluation context. + :param default_value: Default agent values. + :param variables: Variables for interpolation. + :return: Configured LDAIAgent instance. + """ + model, provider, messages, instructions, tracker, enabled = self.__evaluate( + key, context, default_value.to_dict(), variables ) - return config, tracker + # For agents, prioritize instructions over messages + final_instructions = instructions if instructions is not None else default_value.instructions + + return LDAIAgent( + enabled=bool(enabled) if enabled is not None else default_value.enabled, + model=model or default_value.model, + provider=provider or default_value.provider, + instructions=final_instructions, + tracker=tracker, + ) def __interpolate_template(self, template: str, variables: Dict[str, Any]) -> str: """ - Interpolate the template with the given variables. + Interpolate the template with the given variables using Mustache format. - :template: The template string. - :variables: The variables to interpolate into the template. + :param template: The template string. + :param variables: The variables to interpolate into the template. :return: The interpolated string. """ return chevron.render(template, variables) diff --git a/ldai/testing/test_agents.py b/ldai/testing/test_agents.py new file mode 100644 index 0000000..b2e80c0 --- /dev/null +++ b/ldai/testing/test_agents.py @@ -0,0 +1,342 @@ +import pytest +from ldclient import Config, Context, LDClient +from ldclient.integrations.test_data import TestData + +from ldai.client import (LDAIAgentConfig, LDAIAgentDefaults, LDAIClient, + ModelConfig, ProviderConfig) + + +@pytest.fixture +def td() -> TestData: + td = TestData.data_source() + + # Single agent with instructions + td.update( + td.flag('customer-support-agent') + .variations( + { + 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.3, 'maxTokens': 2048}}, + 'provider': {'name': 'openai'}, + 'instructions': 'You are a helpful customer support agent for {{company_name}}. Always be polite and professional.', + '_ldMeta': {'enabled': True, 'variationKey': 'agent-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Agent with context interpolation + td.update( + td.flag('personalized-agent') + .variations( + { + 'model': {'name': 'claude-3', 'parameters': {'temperature': 0.5}}, + 'instructions': 'Hello {{ldctx.name}}! I am your personal assistant. Your user key is {{ldctx.key}}.', + '_ldMeta': {'enabled': True, 'variationKey': 'personal-v1', 'version': 2, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Agent with multi-context interpolation + td.update( + td.flag('multi-context-agent') + .variations( + { + 'model': {'name': 'gpt-3.5-turbo'}, + 'instructions': 'Welcome {{ldctx.user.name}} from {{ldctx.org.name}}! Your organization tier is {{ldctx.org.tier}}.', + '_ldMeta': {'enabled': True, 'variationKey': 'multi-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Disabled agent + td.update( + td.flag('disabled-agent') + .variations( + { + 'model': {'name': 'gpt-4'}, + 'instructions': 'This agent is disabled.', + '_ldMeta': {'enabled': False, 'variationKey': 'disabled-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Agent with minimal metadata + td.update( + td.flag('minimal-agent') + .variations( + { + 'instructions': 'Minimal agent configuration.', + '_ldMeta': {'enabled': True}, + } + ) + .variation_for_all(0) + ) + + # Sales assistant agent + td.update( + td.flag('sales-assistant') + .variations( + { + 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.7}}, + 'provider': {'name': 'openai'}, + 'instructions': 'You are a sales assistant for {{company_name}}. Help customers find the right products.', + '_ldMeta': {'enabled': True, 'variationKey': 'sales-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + # Research agent for testing single agent method + td.update( + td.flag('research-agent') + .variations( + { + 'model': {'name': 'gpt-4', 'parameters': {'temperature': 0.2, 'maxTokens': 3000}}, + 'provider': {'name': 'openai'}, + 'instructions': 'You are a research assistant specializing in {{topic}}. Your expertise level should match {{ldctx.expertise}}.', + '_ldMeta': {'enabled': True, 'variationKey': 'research-v1', 'version': 1, 'mode': 'agent'}, + } + ) + .variation_for_all(0) + ) + + return td + + +@pytest.fixture +def client(td: TestData) -> LDClient: + config = Config('sdk-key', update_processor_class=td, send_events=False) + return LDClient(config=config) + + +@pytest.fixture +def ldai_client(client: LDClient) -> LDAIClient: + return LDAIClient(client) + + +def test_single_agent_method(ldai_client: LDAIClient): + """Test the single agent() method functionality.""" + context = Context.builder('user-key').set('expertise', 'advanced').build() + config = LDAIAgentConfig( + key='research-agent', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('fallback-model'), + instructions="Default instructions" + ), + variables={'topic': 'quantum computing'} + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is True + assert agent.model is not None + assert agent.model.name == 'gpt-4' + assert agent.model.get_parameter('temperature') == 0.2 + assert agent.model.get_parameter('maxTokens') == 3000 + assert agent.provider is not None + assert agent.provider.name == 'openai' + assert agent.instructions == 'You are a research assistant specializing in quantum computing. Your expertise level should match advanced.' + assert agent.tracker is not None + + +def test_single_agent_with_defaults(ldai_client: LDAIClient): + """Test single agent method with non-existent flag using defaults.""" + context = Context.create('user-key') + config = LDAIAgentConfig( + key='non-existent-agent', + default_value=LDAIAgentDefaults( + enabled=True, + model=ModelConfig('default-model', parameters={'temp': 0.8}), + provider=ProviderConfig('default-provider'), + instructions="You are a default assistant for {{task}}." + ), + variables={'task': 'general assistance'} + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is True + assert agent.model is not None and agent.model.name == 'default-model' + assert agent.model is not None and agent.model.get_parameter('temp') == 0.8 + assert agent.provider is not None and agent.provider.name == 'default-provider' + assert agent.instructions == "You are a default assistant for general assistance." + assert agent.tracker is not None + + +def test_agents_method_with_configs(ldai_client: LDAIClient): + """Test the new agents() method with LDAIAgentConfig objects.""" + context = Context.create('user-key') + + agent_configs = [ + LDAIAgentConfig( + key='customer-support-agent', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('fallback-model'), + instructions="Default support" + ), + variables={'company_name': 'Acme Corp'} + ), + LDAIAgentConfig( + key='sales-assistant', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('fallback-model'), + instructions="Default sales" + ), + variables={'company_name': 'Acme Corp'} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + assert len(agents) == 2 + assert 'customer-support-agent' in agents + assert 'sales-assistant' in agents + + support_agent = agents['customer-support-agent'] + assert support_agent.enabled is True + assert support_agent.instructions is not None and 'Acme Corp' in support_agent.instructions + + sales_agent = agents['sales-assistant'] + assert sales_agent.enabled is True + assert sales_agent.instructions is not None and 'Acme Corp' in sales_agent.instructions + assert sales_agent.model is not None and sales_agent.model.get_parameter('temperature') == 0.7 + + +def test_agents_method_different_variables_per_agent(ldai_client: LDAIClient): + """Test agents method with different variables for each agent.""" + context = Context.builder('user-key').name('Alice').build() + + agent_configs = [ + LDAIAgentConfig( + key='personalized-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Default personal" + ), + variables={} # Will use context only + ), + LDAIAgentConfig( + key='customer-support-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Default support" + ), + variables={'company_name': 'TechStart Inc'} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + personal_agent = agents['personalized-agent'] + assert personal_agent.instructions == 'Hello Alice! I am your personal assistant. Your user key is user-key.' + + support_agent = agents['customer-support-agent'] + assert support_agent.instructions == 'You are a helpful customer support agent for TechStart Inc. Always be polite and professional.' + + +def test_agents_with_multi_context_interpolation(ldai_client: LDAIClient): + """Test agents method with multi-context interpolation.""" + user_context = Context.builder('user-key').name('Alice').build() + org_context = Context.builder('org-key').kind('org').name('LaunchDarkly').set('tier', 'Enterprise').build() + context = Context.multi_builder().add(user_context).add(org_context).build() + + agent_configs = [ + LDAIAgentConfig( + key='multi-context-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Default multi-context" + ), + variables={} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + agent = agents['multi-context-agent'] + assert agent.instructions == 'Welcome Alice from LaunchDarkly! Your organization tier is Enterprise.' + + +def test_disabled_agent_single_method(ldai_client: LDAIClient): + """Test that disabled agents are properly handled in single agent method.""" + context = Context.create('user-key') + config = LDAIAgentConfig( + key='disabled-agent', + default_value=LDAIAgentDefaults(enabled=False), + variables={} + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is False + assert agent.tracker is not None + + +def test_disabled_agent_multiple_method(ldai_client: LDAIClient): + """Test that disabled agents are properly handled in multiple agents method.""" + context = Context.create('user-key') + + agent_configs = [ + LDAIAgentConfig( + key='disabled-agent', + default_value=LDAIAgentDefaults(enabled=False), + variables={} + ) + ] + + agents = ldai_client.agents(agent_configs, context) + + assert len(agents) == 1 + assert agents['disabled-agent'].enabled is False + + +def test_agent_with_missing_metadata(ldai_client: LDAIClient): + """Test agent handling when metadata is minimal or missing.""" + context = Context.create('user-key') + config = LDAIAgentConfig( + key='minimal-agent', + default_value=LDAIAgentDefaults( + enabled=False, + model=ModelConfig('default-model'), + instructions="Default instructions" + ) + ) + + agent = ldai_client.agent(config, context) + + assert agent.enabled is True # From flag + assert agent.instructions == 'Minimal agent configuration.' + assert agent.model == config.default_value.model # Falls back to default + assert agent.tracker is not None + + +def test_agent_config_dataclass(): + """Test the LDAIAgentConfig dataclass functionality.""" + config = LDAIAgentConfig( + key='test-agent', + default_value=LDAIAgentDefaults( + enabled=True, + instructions="Test instructions" + ), + variables={'key': 'value'} + ) + + assert config.key == 'test-agent' + assert config.default_value.enabled is True + assert config.default_value.instructions == "Test instructions" + assert config.variables == {'key': 'value'} + + # Test with no variables + config_no_vars = LDAIAgentConfig( + key='test-agent-2', + default_value=LDAIAgentDefaults(enabled=False) + ) + + assert config_no_vars.key == 'test-agent-2' + assert config_no_vars.variables is None