diff --git a/docs/inference-providers/_toctree.yml b/docs/inference-providers/_toctree.yml
index c288e25f20..abcfd26f23 100644
--- a/docs/inference-providers/_toctree.yml
+++ b/docs/inference-providers/_toctree.yml
@@ -120,6 +120,8 @@
title: Novita
- local: providers/nscale
title: Nscale
+ - local: providers/nvidia
+ title: NVIDIA
- local: providers/ovhcloud
title: OVHcloud AI Endpoints
- local: providers/publicai
diff --git a/docs/inference-providers/index.md b/docs/inference-providers/index.md
index 8feac7d0c3..911f0d932c 100644
--- a/docs/inference-providers/index.md
+++ b/docs/inference-providers/index.md
@@ -23,6 +23,7 @@ Our platform integrates with leading AI infrastructure providers, giving you acc
| [Hyperbolic](./providers/hyperbolic) | ✅ | ✅ | | | | |
| [Novita](./providers/novita) | ✅ | ✅ | | | ✅ | |
| [Nscale](./providers/nscale) | ✅ | ✅ | | ✅ | | |
+| [NVIDIA](./providers/nvidia) | ✅ | | | | | |
| [OVHcloud AI Endpoints](./providers/ovhcloud)| ✅ | ✅ | | | | |
| [Public AI](./providers/publicai) | ✅ | | | | | |
| [Replicate](./providers/replicate) | | | | ✅ | ✅ | ✅ |
diff --git a/docs/inference-providers/providers/nvidia.md b/docs/inference-providers/providers/nvidia.md
new file mode 100644
index 0000000000..9112797e08
--- /dev/null
+++ b/docs/inference-providers/providers/nvidia.md
@@ -0,0 +1,50 @@
+
+
+# NVIDIA
+
+> [!TIP]
+> All supported NVIDIA models can be found [here](https://huggingface.co/models?inference_provider=nvidia&sort=trending)
+
+
+
+
+
+NVIDIA provides high-performance AI inference infrastructure through NVIDIA NIM microservices and accelerated computing platforms, with access to production-ready models for enterprise and developer workloads.
+
+For details on pricing, usage policies, and data handling, please refer to NVIDIA's official API documentation and terms.
+
+## Resources
+- **Website**: https://www.nvidia.com/
+- **API Catalog**: https://build.nvidia.com/
+- **Documentation**: https://docs.api.nvidia.com/
+
+## Supported tasks
+
+
diff --git a/scripts/inference-providers/scripts/generate.ts b/scripts/inference-providers/scripts/generate.ts
index 21607f41f4..e98c250a00 100644
--- a/scripts/inference-providers/scripts/generate.ts
+++ b/scripts/inference-providers/scripts/generate.ts
@@ -46,6 +46,7 @@ const PROVIDERS_URLS: Record = {
//nebius: "https://nebius.com/",
novita: "https://novita.ai/",
nscale: "https://www.nscale.com/",
+ nvidia: "https://www.nvidia.com/",
ovhcloud: "https://www.ovhcloud.com/",
publicai: "https://publicai.co/",
replicate: "https://replicate.com/",
diff --git a/scripts/inference-providers/templates/providers/nvidia.handlebars b/scripts/inference-providers/templates/providers/nvidia.handlebars
new file mode 100644
index 0000000000..2c24efabe0
--- /dev/null
+++ b/scripts/inference-providers/templates/providers/nvidia.handlebars
@@ -0,0 +1,19 @@
+# NVIDIA
+
+> [!TIP]
+> All supported NVIDIA models can be found [here](https://huggingface.co/models?inference_provider=nvidia&sort=trending)
+
+{{{logoSection}}}
+
+{{{followUsSection}}}
+
+NVIDIA provides high-performance AI inference infrastructure through NVIDIA NIM microservices and accelerated computing platforms, with access to production-ready models for enterprise and developer workloads.
+
+For details on pricing, usage policies, and data handling, please refer to NVIDIA's official API documentation and terms.
+
+## Resources
+- **Website**: https://www.nvidia.com/
+- **API Catalog**: https://build.nvidia.com/
+- **Documentation**: https://docs.api.nvidia.com/
+
+{{{tasksSection}}}