Skip to content

Embedding

Embedding

any_llm.embedding(model, inputs, *, provider=None, api_key=None, api_base=None, **kwargs)

Create an embedding.

Parameters:

Name Type Description Default
model str

Model identifier. Recommended: Use with separate provider parameter (e.g., model='gpt-4', provider='openai'). Alternative: Combined format 'provider:model' (e.g., 'openai:gpt-4'). Legacy format 'provider/model' is also supported but deprecated.

required
provider str | ProviderName | None

Recommended: Provider name to use for the request (e.g., 'openai', 'mistral'). When provided, the model parameter should contain only the model name.

None
inputs str | list[str]

The input text to embed

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
**kwargs Any

Additional provider-specific parameters

{}

Returns:

Type Description
CreateEmbeddingResponse

The embedding of the input text

Source code in src/any_llm/api.py
def embedding(
    model: str,
    inputs: str | list[str],
    *,
    provider: str | ProviderName | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    **kwargs: Any,
) -> CreateEmbeddingResponse:
    """Create an embedding.

    Args:
        model: Model identifier. **Recommended**: Use with separate `provider` parameter (e.g., model='gpt-4', provider='openai').
            **Alternative**: Combined format 'provider:model' (e.g., 'openai:gpt-4').
            Legacy format 'provider/model' is also supported but deprecated.
        provider: **Recommended**: Provider name to use for the request (e.g., 'openai', 'mistral').
            When provided, the model parameter should contain only the model name.
        inputs: The input text to embed
        api_key: API key for the provider
        api_base: Base URL for the provider API
        **kwargs: Additional provider-specific parameters

    Returns:
        The embedding of the input text

    """
    if provider is None:
        provider_key, model_name = ProviderFactory.split_model_provider(model)
    else:
        provider_key = ProviderName.from_string(provider)
        model_name = model

    config: dict[str, str] = {}
    if api_key:
        config["api_key"] = str(api_key)
    if api_base:
        config["api_base"] = str(api_base)
    api_config = ApiConfig(**config)

    provider_instance = ProviderFactory.create_provider(provider_key, api_config)

    return provider_instance.embedding(model_name, inputs, **kwargs)

any_llm.aembedding(model, inputs, *, provider=None, api_key=None, api_base=None, **kwargs) async

Create an embedding asynchronously.

Parameters:

Name Type Description Default
model str

Model identifier in format 'provider/model' (e.g., 'openai/text-embedding-3-small'). If provider is provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai/gpt-4o'.

required
provider str | ProviderName | None

Provider name to use for the request. If provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai:gpt-4o'.

None
inputs str | list[str]

The input text to embed

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
**kwargs Any

Additional provider-specific parameters

{}

Returns:

Type Description
CreateEmbeddingResponse

The embedding of the input text

Source code in src/any_llm/api.py
async def aembedding(
    model: str,
    inputs: str | list[str],
    *,
    provider: str | ProviderName | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    **kwargs: Any,
) -> CreateEmbeddingResponse:
    """Create an embedding asynchronously.

    Args:
        model: Model identifier in format 'provider/model' (e.g., 'openai/text-embedding-3-small'). If provider is provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai/gpt-4o'.
        provider: Provider name to use for the request. If provided, we assume that the model does not contain the provider name. Otherwise, we assume that the model contains the provider name, like 'openai:gpt-4o'.
        inputs: The input text to embed
        api_key: API key for the provider
        api_base: Base URL for the provider API
        **kwargs: Additional provider-specific parameters

    Returns:
        The embedding of the input text

    """
    if provider is None:
        provider_key, model_name = ProviderFactory.split_model_provider(model)
    else:
        provider_key = ProviderName.from_string(provider)
        model_name = model

    config: dict[str, str] = {}
    if api_key:
        config["api_key"] = str(api_key)
    if api_base:
        config["api_base"] = str(api_base)
    api_config = ApiConfig(**config)

    provider_instance = ProviderFactory.create_provider(provider_key, api_config)

    return await provider_instance.aembedding(model_name, inputs, **kwargs)