Skip to content

Batch

Experimental API

The Batch API is experimental and subject to breaking changes in future versions. Use with caution in production environments.

The Batch API allows you to process multiple requests asynchronously at a lower cost.

File Path Interface

The any-llm batch API requires you to pass a path to a local JSONL file containing your batch requests. The provider implementation automatically handles uploading and file management as needed.

Different providers handle batch processing differently:

  • OpenAI: Requires uploading a file first, then creating a batch with the file ID
  • Anthropic (future): Expects file content passed directly in the request
  • Other providers: May have their own unique requirements

By accepting a local file path, any-llm abstracts these provider differences and handles the implementation details automatically.

any_llm.api.create_batch(provider, input_file_path, endpoint, *, completion_window='24h', metadata=None, api_key=None, api_base=None, client_args=None, **kwargs)

Create a batch job.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
input_file_path str

Path to a local file containing batch requests in JSONL format.

required
endpoint str

The endpoint to be used for all requests (e.g., '/v1/chat/completions')

required
completion_window str

The time frame within which the batch should be processed (default: '24h')

'24h'
metadata dict[str, str] | None

Optional custom metadata for the batch

None
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Batch

The created batch object

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
def create_batch(
    provider: str | LLMProvider,
    input_file_path: str,
    endpoint: str,
    *,
    completion_window: str = "24h",
    metadata: dict[str, str] | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Batch:
    """Create a batch job.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        input_file_path: Path to a local file containing batch requests in JSONL format.
        endpoint: The endpoint to be used for all requests (e.g., '/v1/chat/completions')
        completion_window: The time frame within which the batch should be processed (default: '24h')
        metadata: Optional custom metadata for the batch
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        The created batch object

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return llm.create_batch(
        input_file_path=input_file_path,
        endpoint=endpoint,
        completion_window=completion_window,
        metadata=metadata,
        **kwargs,
    )

any_llm.api.acreate_batch(provider, input_file_path, endpoint, *, completion_window='24h', metadata=None, api_key=None, api_base=None, client_args=None, **kwargs) async

Create a batch job asynchronously.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
input_file_path str

Path to a local file containing batch requests in JSONL format.

required
endpoint str

The endpoint to be used for all requests (e.g., '/v1/chat/completions')

required
completion_window str

The time frame within which the batch should be processed (default: '24h')

'24h'
metadata dict[str, str] | None

Optional custom metadata for the batch

None
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Batch

The created batch object

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
async def acreate_batch(
    provider: str | LLMProvider,
    input_file_path: str,
    endpoint: str,
    *,
    completion_window: str = "24h",
    metadata: dict[str, str] | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Batch:
    """Create a batch job asynchronously.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        input_file_path: Path to a local file containing batch requests in JSONL format.
        endpoint: The endpoint to be used for all requests (e.g., '/v1/chat/completions')
        completion_window: The time frame within which the batch should be processed (default: '24h')
        metadata: Optional custom metadata for the batch
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        The created batch object

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return await llm.acreate_batch(
        input_file_path=input_file_path,
        endpoint=endpoint,
        completion_window=completion_window,
        metadata=metadata,
        **kwargs,
    )

any_llm.api.retrieve_batch(provider, batch_id, *, api_key=None, api_base=None, client_args=None, **kwargs)

Retrieve a batch job.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
batch_id str

The ID of the batch to retrieve

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Batch

The batch object

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
def retrieve_batch(
    provider: str | LLMProvider,
    batch_id: str,
    *,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Batch:
    """Retrieve a batch job.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        batch_id: The ID of the batch to retrieve
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        The batch object

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return llm.retrieve_batch(batch_id, **kwargs)

any_llm.api.aretrieve_batch(provider, batch_id, *, api_key=None, api_base=None, client_args=None, **kwargs) async

Retrieve a batch job asynchronously.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
batch_id str

The ID of the batch to retrieve

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Batch

The batch object

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
async def aretrieve_batch(
    provider: str | LLMProvider,
    batch_id: str,
    *,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Batch:
    """Retrieve a batch job asynchronously.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        batch_id: The ID of the batch to retrieve
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        The batch object

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return await llm.aretrieve_batch(batch_id, **kwargs)

any_llm.api.cancel_batch(provider, batch_id, *, api_key=None, api_base=None, client_args=None, **kwargs)

Cancel a batch job.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
batch_id str

The ID of the batch to cancel

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Batch

The cancelled batch object

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
def cancel_batch(
    provider: str | LLMProvider,
    batch_id: str,
    *,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Batch:
    """Cancel a batch job.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        batch_id: The ID of the batch to cancel
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        The cancelled batch object

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return llm.cancel_batch(batch_id, **kwargs)

any_llm.api.acancel_batch(provider, batch_id, *, api_key=None, api_base=None, client_args=None, **kwargs) async

Cancel a batch job asynchronously.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
batch_id str

The ID of the batch to cancel

required
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Batch

The cancelled batch object

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
async def acancel_batch(
    provider: str | LLMProvider,
    batch_id: str,
    *,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Batch:
    """Cancel a batch job asynchronously.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        batch_id: The ID of the batch to cancel
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        The cancelled batch object

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return await llm.acancel_batch(batch_id, **kwargs)

any_llm.api.list_batches(provider, *, after=None, limit=None, api_key=None, api_base=None, client_args=None, **kwargs)

List batch jobs.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
after str | None

A cursor for pagination. Returns batches after this batch ID.

None
limit int | None

Maximum number of batches to return (default: 20)

None
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Sequence[Batch]

A list of batch objects

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
def list_batches(
    provider: str | LLMProvider,
    *,
    after: str | None = None,
    limit: int | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Sequence[Batch]:
    """List batch jobs.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        after: A cursor for pagination. Returns batches after this batch ID.
        limit: Maximum number of batches to return (default: 20)
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        A list of batch objects

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return llm.list_batches(after=after, limit=limit, **kwargs)

any_llm.api.alist_batches(provider, *, after=None, limit=None, api_key=None, api_base=None, client_args=None, **kwargs) async

List batch jobs asynchronously.

Parameters:

Name Type Description Default
provider str | LLMProvider

Provider name to use for the request (e.g., 'openai', 'mistral')

required
after str | None

A cursor for pagination. Returns batches after this batch ID.

None
limit int | None

Maximum number of batches to return (default: 20)

None
api_key str | None

API key for the provider

None
api_base str | None

Base URL for the provider API

None
client_args dict[str, Any] | None

Additional provider-specific arguments for client instantiation

None
**kwargs Any

Additional provider-specific arguments

{}

Returns:

Type Description
Sequence[Batch]

A list of batch objects

Source code in src/any_llm/api.py
@experimental(BATCH_API_EXPERIMENTAL_MESSAGE)
async def alist_batches(
    provider: str | LLMProvider,
    *,
    after: str | None = None,
    limit: int | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    client_args: dict[str, Any] | None = None,
    **kwargs: Any,
) -> Sequence[Batch]:
    """List batch jobs asynchronously.

    Args:
        provider: Provider name to use for the request (e.g., 'openai', 'mistral')
        after: A cursor for pagination. Returns batches after this batch ID.
        limit: Maximum number of batches to return (default: 20)
        api_key: API key for the provider
        api_base: Base URL for the provider API
        client_args: Additional provider-specific arguments for client instantiation
        **kwargs: Additional provider-specific arguments

    Returns:
        A list of batch objects

    """
    llm = AnyLLM.create(LLMProvider.from_string(provider), api_key=api_key, api_base=api_base, **client_args or {})
    return await llm.alist_batches(after=after, limit=limit, **kwargs)