pub trait LlmProvider: Send + Sync {
// Required method
fn call<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<String, AppError>> + Send + 'async_trait>>
where 'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait;
// Provided methods
fn call_with_usage<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<LlmCallResponse, AppError>> + Send + 'async_trait>>
where 'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait { ... }
fn call_with_tools<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
_tools: Option<&'life2 [Value]>,
) -> Pin<Box<dyn Future<Output = Result<LlmCallResponse, AppError>> + Send + 'async_trait>>
where 'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Self: 'async_trait { ... }
}Expand description
Unified LLM interface (async)
Required Methods§
Sourcefn call<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<String, AppError>> + Send + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait,
fn call<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<String, AppError>> + Send + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait,
Call the LLM with a prompt and return the generated text. For backward compatibility, this returns just the text.
Provided Methods§
Sourcefn call_with_usage<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<LlmCallResponse, AppError>> + Send + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait,
fn call_with_usage<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
) -> Pin<Box<dyn Future<Output = Result<LlmCallResponse, AppError>> + Send + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait,
Call the LLM with a prompt and return the response with token usage.
Default implementation calls call and returns unknown usage.
Sourcefn call_with_tools<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
_tools: Option<&'life2 [Value]>,
) -> Pin<Box<dyn Future<Output = Result<LlmCallResponse, AppError>> + Send + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Self: 'async_trait,
fn call_with_tools<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
_tools: Option<&'life2 [Value]>,
) -> Pin<Box<dyn Future<Output = Result<LlmCallResponse, AppError>> + Send + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Self: 'async_trait,
Call the LLM with a prompt and optional tool schemas.
When tools is Some, providers that support native tool/function calling
will include the schemas in the API request body, enabling the model to
return structured tool call responses.
The default implementation ignores the tools parameter and delegates to
Self::call_with_usage. Providers should override this to pass tools
natively.