Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/huggingface_hub/inference/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ class InferenceClient:
Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
provider (`str`, *optional*):
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"clarifai"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"ovhcloud"`, `"publicai"`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"`, `"wavespeed"` or `"zai-org"`.
Name of the provider to use for inference. Can be `"alphaneural"`, `"black-forest-labs"`, `"cerebras"`, `"clarifai"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"ovhcloud"`, `"publicai"`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"`, `"wavespeed"` or `"zai-org"`.
Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
If model is a URL or `base_url` is passed, then `provider` is not used.
token (`str`, *optional*):
Expand Down
2 changes: 1 addition & 1 deletion src/huggingface_hub/inference/_generated/_async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ class AsyncInferenceClient:
Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
provider (`str`, *optional*):
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"clarifai"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"ovhcloud"`, `"publicai"`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"`, `"wavespeed"` or `"zai-org"`.
Name of the provider to use for inference. Can be `"alphaneural"`, `"black-forest-labs"`, `"cerebras"`, `"clarifai"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"ovhcloud"`, `"publicai"`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"`, `"wavespeed"` or `"zai-org"`.
Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
If model is a URL or `base_url` is passed, then `provider` is not used.
token (`str`, *optional*):
Expand Down
9 changes: 9 additions & 0 deletions src/huggingface_hub/inference/_providers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
from typing import Literal, Optional, Union

from huggingface_hub.inference._providers.alphaneural import (
AlphaneuralConversationalTask,
AlphaneuralTextGenerationTask,
)
from huggingface_hub.inference._providers.featherless_ai import (
FeatherlessConversationalTask,
FeatherlessTextGenerationTask,
Expand Down Expand Up @@ -63,6 +67,7 @@


PROVIDER_T = Literal[
"alphaneural",
"black-forest-labs",
"cerebras",
"clarifai",
Expand Down Expand Up @@ -92,6 +97,10 @@
CONVERSATIONAL_AUTO_ROUTER = AutoRouterConversationalTask()

PROVIDERS: dict[PROVIDER_T, dict[str, TaskProviderHelper]] = {
"alphaneural": {
"conversational": AlphaneuralConversationalTask(),
"text-generation": AlphaneuralTextGenerationTask(),
},
"black-forest-labs": {
"text-to-image": BlackForestLabsTextToImageTask(),
},
Expand Down
1 change: 1 addition & 0 deletions src/huggingface_hub/inference/_providers/_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
# provider_id="Qwen2.5-Coder-32B-Instruct",
# task="conversational",
# status="live")
"alphaneural": {},
"cerebras": {},
"cohere": {},
"clarifai": {},
Expand Down
30 changes: 30 additions & 0 deletions src/huggingface_hub/inference/_providers/alphaneural.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from typing import Any, Optional, Union

from huggingface_hub.inference._common import RequestParameters, _as_dict

from ._common import BaseConversationalTask, BaseTextGenerationTask


_PROVIDER = "alphaneural"
_BASE_URL = "https://proxy.alfnrl.io"


class AlphaneuralConversationalTask(BaseConversationalTask):
def __init__(self):
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)


class AlphaneuralTextGenerationTask(BaseTextGenerationTask):
def __init__(self):
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)

def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
"""Convert OpenAI-format response to HuggingFace format."""
output = _as_dict(response)["choices"][0]
return {
"generated_text": output["text"],
"details": {
"finish_reason": output.get("finish_reason"),
"seed": output.get("seed"),
},
}
4 changes: 4 additions & 0 deletions tests/test_inference_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@

# Avoid calling APIs in VCRed tests
_RECOMMENDED_MODELS_FOR_VCR = {
"alphaneural": {
"conversational": "qwen/qwen3",
"text-generation": "qwen/qwen3",
},
"black-forest-labs": {
"text-to-image": "black-forest-labs/FLUX.1-dev",
},
Expand Down
1 change: 1 addition & 0 deletions tests/test_inference_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
filter_none,
recursive_merge,
)

from huggingface_hub.inference._providers.black_forest_labs import BlackForestLabsTextToImageTask
from huggingface_hub.inference._providers.clarifai import ClarifaiConversationalTask
from huggingface_hub.inference._providers.cohere import CohereConversationalTask
Expand Down