Skip to content

Embedding

Embedding

any_llm.api.embedding(model, inputs, *, provider=None, api_key=None, api_base=None, client_args=None, **kwargs)

Create an embedding.

Parameters:

Name Type Description Default
model str

Model identifier. Recommended: Use with separate provider parameter (e.g., model='gpt-4', provider='openai'). Alternative: Combined format 'provider:model' (e.g., 'openai:gpt-4'). Legacy format 'provider/model' is also supported but deprecated.

required
provider str | LLMProvider | None

Recommended: Provider name to use for the request (e.g., 'openai', 'mistral'). When provided, the model parameter should contain only the model name.

None
inputs str | list[str]

The input text to embed

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments that will be passed to the provider's client instantiation.

None
**kwargs Any

Additional provider-specific arguments that will be passed to the provider's API call.

{}

Returns:

Type Description
CreateEmbeddingResponse

The embedding of the input text

Source code in src/any_llm/api.py
def embedding(
    model: str,
    inputs: str | list[str],
    *,
    provider: str | LLMProvider | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> CreateEmbeddingResponse:
    """Create an embedding.

    Args:
        model: Model identifier. **Recommended**: Use with separate `provider` parameter (e.g., model='gpt-4', provider='openai').
            **Alternative**: Combined format 'provider:model' (e.g., 'openai:gpt-4').
            Legacy format 'provider/model' is also supported but deprecated.
        provider: **Recommended**: Provider name to use for the request (e.g., 'openai', 'mistral').
            When provided, the model parameter should contain only the model name.
        inputs: The input text to embed
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments that will be passed to the provider's client instantiation.
        **kwargs: Additional provider-specific arguments that will be passed to the provider's API call.

    Returns:
        The embedding of the input text

    """
    if provider is None:
        provider_key, model_name = AnyLLM.split_model_provider(model)
    else:
        provider_key = LLMProvider.from_string(provider)
        model_name = model

    llm = AnyLLM.create(provider_key, api_key=api_key, api_base=api_base, **client_args or {})
    return llm._embedding(model_name, inputs, **kwargs)

any_llm.api.aembedding(model, inputs, *, provider=None, api_key=None, api_base=None, client_args=None, **kwargs) async

Create an embedding asynchronously.

Parameters:

Name Type Description Default
model str

Model identifier in format 'provider/model' (e.g., 'openai/text-embedding-3-small'). If provider is provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai/gpt-4o'.

required
provider str | LLMProvider | None

Provider name to use for the request. If provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai:gpt-4o'.

None
inputs str | list[str]

The input text to embed

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments that will be passed to the provider's client instantiation.

None
**kwargs Any

Additional provider-specific arguments that will be passed to the provider's API call.

{}

Returns:

Type Description
CreateEmbeddingResponse

The embedding of the input text

Source code in src/any_llm/api.py
async def aembedding(
    model: str,
    inputs: str | list[str],
    *,
    provider: str | LLMProvider | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> CreateEmbeddingResponse:
    """Create an embedding asynchronously.

    Args:
        model: Model identifier in format 'provider/model' (e.g., 'openai/text-embedding-3-small'). If provider is provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai/gpt-4o'.
        provider: Provider name to use for the request. If provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai:gpt-4o'.
        inputs: The input text to embed
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments that will be passed to the provider's client instantiation.
        **kwargs: Additional provider-specific arguments that will be passed to the provider's API call.

    Returns:
        The embedding of the input text

    """
    if provider is None:
        provider_key, model_name = AnyLLM.split_model_provider(model)
    else:
        provider_key = LLMProvider.from_string(provider)
        model_name = model

    llm = AnyLLM.create(provider_key, api_key=api_key, api_base=api_base, **client_args or {})
    return await llm._aembedding(model_name, inputs, **kwargs)