diff --git a/docs/onboarding/llm-analytics/anthropic.tsx b/docs/onboarding/llm-analytics/anthropic.tsx
index a7006309715e..61e8f1f56d22 100644
--- a/docs/onboarding/llm-analytics/anthropic.tsx
+++ b/docs/onboarding/llm-analytics/anthropic.tsx
@@ -9,44 +9,26 @@ export const getAnthropicSteps = (ctx: OnboardingComponentsContext): StepDefinit
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-anthropic) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-anthropic)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-anthropic)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-anthropic)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the Anthropic SDK',
- badge: 'required',
- content: (
- <>
- Install the Anthropic SDK. The PostHog SDK instruments your LLM calls by wrapping the Anthropic
- client. The PostHog SDK **does not** proxy your calls.
+ Install the OpenTelemetry SDK, the Anthropic instrumentation, and the Anthropic SDK.
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
{
- title: 'Initialize PostHog and the Anthropic wrapper',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then pass it to our Anthropic wrapper.
+ Configure OpenTelemetry to auto-instrument Anthropic SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = Anthropic(
- api_key="sk-ant-api...", # Replace with your Anthropic API key
- posthog_client=posthog # This is an optional parameter. If it is not provided, a default client will be used.
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ AnthropicInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { Anthropic } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- )
-
- const client = new Anthropic({
- apiKey: 'sk-ant-api...', // Replace with your Anthropic API key
- posthog: phClient
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { AnthropicInstrumentation } from '@traceloop/instrumentation-anthropic'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new AnthropicInstrumentation()],
})
+ sdk.start()
`,
},
]}
/>
-
-
-
- **Note:** This also works with the `AsyncAnthropic` client as well as `AnthropicBedrock`,
- `AnthropicVertex`, and the async versions of those.
-
-
>
),
},
{
- title: 'Call Anthropic LLMs',
+ title: 'Call Anthropic',
badge: 'required',
content: (
<>
- Now, when you use the Anthropic SDK to call LLMs, PostHog automatically captures an
- `$ai_generation` event. You can enrich the event with additional data such as the trace ID,
- distinct ID, custom properties, groups, and privacy mode options.
+ Now, when you use the Anthropic SDK to call LLMs, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
- {dedent`
- **Notes:**
- - This also works when message streams are used (e.g. \`stream=True\` or \`client.messages.stream(...)\`).
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
+ **Note:** This also works with the `AsyncAnthropic` client as well as `AnthropicBedrock`,
+ `AnthropicVertex`, and the async versions of those.
+
+
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/autogen.tsx b/docs/onboarding/llm-analytics/autogen.tsx
index 304777a151d2..8c4364efa9b5 100644
--- a/docs/onboarding/llm-analytics/autogen.tsx
+++ b/docs/onboarding/llm-analytics/autogen.tsx
@@ -3,94 +3,75 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getAutoGenSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK. The AutoGen integration uses
- PostHog's OpenAI wrapper since AutoGen uses OpenAI under the hood.
-
+
+
+ See the complete [Python
+ example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-autogen)
+ on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python
+ wrapper
+ example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-autogen).
+
+
-
- >
- ),
- },
- {
- title: 'Install AutoGen',
- badge: 'required',
- content: (
- <>
-
- Install AutoGen with the OpenAI extension. PostHog instruments your LLM calls by wrapping the
- OpenAI client that AutoGen uses internally.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and AutoGen.
>
),
},
{
- title: 'Initialize PostHog and AutoGen',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and
- pass it to AutoGen's `OpenAIChatCompletionClient`.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- openai_client = OpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog,
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
- model_client = OpenAIChatCompletionClient(
- model="gpt-4o",
- openai_client=openai_client,
- )
+ OpenAIInstrumentor().instrument()
`}
/>
-
-
-
- AutoGen's `OpenAIChatCompletionClient` accepts a custom OpenAI client via the
- `openai_client` parameter. PostHog's `OpenAI` wrapper is a proper subclass of
- `openai.OpenAI`, so it works directly. PostHog captures `$ai_generation` events
- automatically without proxying your calls.
-
-
>
),
},
@@ -101,12 +82,20 @@ export const getAutoGenSteps = (ctx: OnboardingComponentsContext): StepDefinitio
<>
Use AutoGen as normal. PostHog automatically captures an `$ai_generation` event for each LLM
- call made through the wrapped OpenAI client.
+ call made through the OpenAI SDK that AutoGen uses internally.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/azure-openai.tsx b/docs/onboarding/llm-analytics/azure-openai.tsx
index 44cff9902d53..a0cb8cf6723f 100644
--- a/docs/onboarding/llm-analytics/azure-openai.tsx
+++ b/docs/onboarding/llm-analytics/azure-openai.tsx
@@ -9,11 +9,26 @@ export const getAzureOpenAISteps = (ctx: OnboardingComponentsContext): StepDefin
return [
{
- title: 'Install the SDKs',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
- Setting up analytics starts with installing the PostHog and OpenAI SDKs.
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-azure-openai)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-azure-openai)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-azure-openai)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-azure-openai)
+ examples.
+
+
+
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Azure OpenAI through PostHog's AzureOpenAI wrapper to capture all the details of the
- call. Initialize PostHog with your PostHog project token and host from [your project
- settings](https://app.posthog.com/settings/project), then pass the PostHog client along with
- your Azure OpenAI config (the API key, API version, and endpoint) to our AzureOpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = AzureOpenAI(
- api_key="",
- api_version="2024-10-21",
- azure_endpoint="https://.openai.azure.com",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { AzureOpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const client = new AzureOpenAI({
- apiKey: '',
- apiVersion: '2024-10-21',
- endpoint: 'https://.openai.azure.com',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncAzureOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -120,9 +131,8 @@ export const getAzureOpenAISteps = (ctx: OnboardingComponentsContext): StepDefin
content: (
<>
- Now, when you call Azure OpenAI, PostHog automatically captures an `$ai_generation` event. You
- can also capture or modify additional properties with the distinct ID, trace ID, properties,
- groups, and privacy mode parameters.
+ Now, when you call Azure OpenAI, PostHog automatically captures `$ai_generation` events via the
+ OpenTelemetry instrumentation.
",
- messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }],
- posthogDistinctId: "user_123", // optional
- posthogTraceId: "trace_123", // optional
- posthogProperties: { conversation_id: "abc123", paid: true }, // optional
- posthogGroups: { company: "company_id_in_your_db" }, // optional
- posthogPrivacyMode: false // optional
- });
-
- console.log(completion.choices[0].message.content)
+ import { AzureOpenAI } from 'openai'
+
+ const client = new AzureOpenAI({
+ apiKey: '',
+ apiVersion: '2024-10-21',
+ endpoint: 'https://.openai.azure.com',
+ })
+
+ const response = await client.chat.completions.create({
+ model: '',
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -168,13 +184,9 @@ export const getAzureOpenAISteps = (ctx: OnboardingComponentsContext): StepDefin
- {dedent`
- **Notes:**
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/cerebras.tsx b/docs/onboarding/llm-analytics/cerebras.tsx
index 24e595c20c32..8d15f140c62b 100644
--- a/docs/onboarding/llm-analytics/cerebras.tsx
+++ b/docs/onboarding/llm-analytics/cerebras.tsx
@@ -9,45 +9,25 @@ export const getCerebrasSteps = (ctx: OnboardingComponentsContext): StepDefiniti
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-cerebras) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-cerebras)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-cerebras)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-cerebras)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Cerebras through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the Cerebras config (the base URL and API key) to our OpenAI
- wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.cerebras.ai/v1",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.cerebras.ai/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +130,8 @@ export const getCerebrasSteps = (ctx: OnboardingComponentsContext): StepDefiniti
content: (
<>
- Now, when you call Cerebras with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Cerebras, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'llama-3.3-70b',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -202,12 +184,7 @@ export const getCerebrasSteps = (ctx: OnboardingComponentsContext): StepDefiniti
{dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+ **Note:** If you want to capture LLM events anonymously, omit the \`posthog.distinct_id\` resource attribute. See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
`}
diff --git a/docs/onboarding/llm-analytics/cohere.tsx b/docs/onboarding/llm-analytics/cohere.tsx
index 29ba9e973bea..ea11c3a1f5f9 100644
--- a/docs/onboarding/llm-analytics/cohere.tsx
+++ b/docs/onboarding/llm-analytics/cohere.tsx
@@ -9,45 +9,25 @@ export const getCohereSteps = (ctx: OnboardingComponentsContext): StepDefinition
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-cohere) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-cohere)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-cohere) and
+ [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-cohere)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Cohere through the OpenAI-compatible API and generate a response. We'll use PostHog's
- OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog
- project token and host from [your project settings](https://app.posthog.com/settings/project),
- then pass the PostHog client along with the Cohere config (the base URL and API key) to our
- OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.cohere.ai/compatibility/v1",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.cohere.ai/compatibility/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +130,8 @@ export const getCohereSteps = (ctx: OnboardingComponentsContext): StepDefinition
content: (
<>
- Now, when you call Cohere with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Cohere, PostHog automatically captures `$ai_generation`
+ events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'command-a-03-2025',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -201,14 +183,9 @@ export const getCohereSteps = (ctx: OnboardingComponentsContext): StepDefinition
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/deepseek.tsx b/docs/onboarding/llm-analytics/deepseek.tsx
index d7f8d3af0aba..809b5fe52923 100644
--- a/docs/onboarding/llm-analytics/deepseek.tsx
+++ b/docs/onboarding/llm-analytics/deepseek.tsx
@@ -9,45 +9,25 @@ export const getDeepSeekSteps = (ctx: OnboardingComponentsContext): StepDefiniti
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-deepseek) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-deepseek)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-deepseek)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-deepseek)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call DeepSeek through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the DeepSeek config (the base URL and API key) to our OpenAI
- wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.deepseek.com",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.deepseek.com',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +130,8 @@ export const getDeepSeekSteps = (ctx: OnboardingComponentsContext): StepDefiniti
content: (
<>
- Now, when you call DeepSeek with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call DeepSeek, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'deepseek-chat',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -201,14 +183,9 @@ export const getDeepSeekSteps = (ctx: OnboardingComponentsContext): StepDefiniti
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/fireworks-ai.tsx b/docs/onboarding/llm-analytics/fireworks-ai.tsx
index 4c68e36cfd95..55c24443a9ec 100644
--- a/docs/onboarding/llm-analytics/fireworks-ai.tsx
+++ b/docs/onboarding/llm-analytics/fireworks-ai.tsx
@@ -9,45 +9,26 @@ export const getFireworksAISteps = (ctx: OnboardingComponentsContext): StepDefin
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-fireworks-ai)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-fireworks-ai)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-fireworks-ai)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-fireworks-ai)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Fireworks AI through the OpenAI client and generate a response. We'll use PostHog's
- OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog
- project token and host from [your project settings](https://app.posthog.com/settings/project),
- then pass the PostHog client along with the Fireworks AI config (the base URL and API key) to
- our OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.fireworks.ai/inference/v1",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.fireworks.ai/inference/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +131,8 @@ export const getFireworksAISteps = (ctx: OnboardingComponentsContext): StepDefin
content: (
<>
- Now, when you call Fireworks AI with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Fireworks AI, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'accounts/fireworks/models/llama-v3p3-70b-instruct',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -201,14 +184,9 @@ export const getFireworksAISteps = (ctx: OnboardingComponentsContext): StepDefin
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/groq.tsx b/docs/onboarding/llm-analytics/groq.tsx
index 43cf630c8512..f5a7865c71eb 100644
--- a/docs/onboarding/llm-analytics/groq.tsx
+++ b/docs/onboarding/llm-analytics/groq.tsx
@@ -9,45 +9,25 @@ export const getGroqSteps = (ctx: OnboardingComponentsContext): StepDefinition[]
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-groq) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-groq)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-groq) and
+ [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-groq)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Groq through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the Groq config (the base URL and API key) to our OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.groq.com/openai/v1",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.groq.com/openai/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -152,9 +130,8 @@ export const getGroqSteps = (ctx: OnboardingComponentsContext): StepDefinition[]
content: (
<>
- Now, when you call Groq with the OpenAI SDK, PostHog automatically captures an `$ai_generation`
- event. You can also capture or modify additional properties with the distinct ID, trace ID,
- properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Groq, PostHog automatically captures `$ai_generation`
+ events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'llama-3.3-70b-versatile',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -200,14 +183,9 @@ export const getGroqSteps = (ctx: OnboardingComponentsContext): StepDefinition[]
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/helicone.tsx b/docs/onboarding/llm-analytics/helicone.tsx
index 65e9cf3e016c..c020c21f1c28 100644
--- a/docs/onboarding/llm-analytics/helicone.tsx
+++ b/docs/onboarding/llm-analytics/helicone.tsx
@@ -9,10 +9,24 @@ export const getHeliconeSteps = (ctx: OnboardingComponentsContext): StepDefiniti
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-helicone) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-helicone)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-helicone)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-helicone)
+ examples.
+
+
+
Helicone is an open-source AI gateway that provides access to 100+ LLM providers through an
@@ -21,41 +35,7 @@ export const getHeliconeSteps = (ctx: OnboardingComponentsContext): StepDefiniti
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
-
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Helicone through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the Helicone config (the base URL and API key) to our OpenAI
- wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://ai-gateway.helicone.ai/",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://ai-gateway.helicone.ai/',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -161,9 +138,8 @@ export const getHeliconeSteps = (ctx: OnboardingComponentsContext): StepDefiniti
content: (
<>
- Now, when you call Helicone with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you call Helicone with the OpenAI SDK, PostHog automatically captures `$ai_generation`
+ events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'gpt-5-mini',
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -209,14 +189,9 @@ export const getHeliconeSteps = (ctx: OnboardingComponentsContext): StepDefiniti
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/hugging-face.tsx b/docs/onboarding/llm-analytics/hugging-face.tsx
index 6eb0146b3c24..583d78337744 100644
--- a/docs/onboarding/llm-analytics/hugging-face.tsx
+++ b/docs/onboarding/llm-analytics/hugging-face.tsx
@@ -9,45 +9,26 @@ export const getHuggingFaceSteps = (ctx: OnboardingComponentsContext): StepDefin
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-hugging-face)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-hugging-face)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-hugging-face)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-hugging-face)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Hugging Face Inference API through the OpenAI-compatible endpoint and generate a
- response. We'll use PostHog's OpenAI provider to capture all the details of the call. Initialize
- PostHog with your PostHog project token and host from [your project
- settings](https://app.posthog.com/settings/project), then pass the PostHog client along with the
- Hugging Face config (the base URL and API key) to our OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://router.huggingface.co/v1/",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://router.huggingface.co/v1/',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +131,8 @@ export const getHuggingFaceSteps = (ctx: OnboardingComponentsContext): StepDefin
content: (
<>
- Now, when you call Hugging Face with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Hugging Face, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'meta-llama/Llama-3.3-70B-Instruct',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -202,12 +185,7 @@ export const getHuggingFaceSteps = (ctx: OnboardingComponentsContext): StepDefin
{dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+ **Note:** If you want to capture LLM events anonymously, omit the \`posthog.distinct_id\` resource attribute. See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
`}
diff --git a/docs/onboarding/llm-analytics/instructor.tsx b/docs/onboarding/llm-analytics/instructor.tsx
index 0930a1fc0a60..bf06ff413ac9 100644
--- a/docs/onboarding/llm-analytics/instructor.tsx
+++ b/docs/onboarding/llm-analytics/instructor.tsx
@@ -3,51 +3,32 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getInstructorSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-instructor)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-instructor)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-instructor)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-instructor)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install Instructor and OpenAI SDKs',
- badge: 'required',
- content: (
- <>
-
- Install Instructor and the OpenAI SDK. PostHog instruments your LLM calls by wrapping the OpenAI
- client, which Instructor uses under the hood.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and Instructor.
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and
- pass it to Instructor.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- openai_client = OpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
- client = instructor.from_openai(openai_client)
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import Instructor from '@instructor-ai/instructor'
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
- import { z } from 'zod'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- apiKey: 'your_openai_api_key',
- posthog: phClient,
- });
-
- const client = Instructor({ client: openai, mode: 'TOOLS' })
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
-
- PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it works directly
- with `instructor.from_openai()`. PostHog captures `$ai_generation` events automatically
- without proxying your calls.
-
-
>
),
},
{
- title: 'Use Instructor with structured outputs',
+ title: 'Extract structured data',
badge: 'required',
content: (
<>
- Now use Instructor to extract structured data from LLM responses. PostHog automatically captures
- an `$ai_generation` event for each call.
+ Use Instructor to extract structured data from LLM responses. PostHog automatically captures an
+ `$ai_generation` event for each call made through the OpenAI SDK that Instructor wraps.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/langchain.tsx b/docs/onboarding/llm-analytics/langchain.tsx
index 56a3125cd28c..c77081b708c0 100644
--- a/docs/onboarding/llm-analytics/langchain.tsx
+++ b/docs/onboarding/llm-analytics/langchain.tsx
@@ -9,44 +9,26 @@ export const getLangChainSteps = (ctx: OnboardingComponentsContext): StepDefinit
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-langchain) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-langchain)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-langchain)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-langchain)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install LangChain and OpenAI SDKs',
- badge: 'required',
- content: (
- <>
- Install LangChain. The PostHog SDK instruments your LLM calls by wrapping LangChain. The PostHog
- SDK **does not** proxy your calls.
+ Install the OpenTelemetry SDK, the LangChain instrumentation, and LangChain with OpenAI.
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
{
- title: 'Initialize PostHog and LangChain',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then pass it to the LangChain
- `CallbackHandler` wrapper. Optionally, you can provide a user distinct ID, trace ID, PostHog
- properties, [groups](https://posthog.com/docs/product-analytics/group-analytics), and privacy
- mode.
+ Configure OpenTelemetry to auto-instrument LangChain calls and export traces to PostHog. PostHog
+ converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.langchain import LangchainInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- callback_handler = CallbackHandler(
- client=posthog, # This is an optional parameter. If it is not provided, a default client will be used.
- distinct_id="user_123", # optional
- trace_id="trace_456", # optional
- properties={"conversation_id": "abc123"}, # optional
- groups={"company": "company_id_in_your_db"}, # optional
- privacy_mode=False # optional
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ LangchainInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { PostHog } from 'posthog-node';
- import { LangChainCallbackHandler } from '@posthog/ai';
- import { ChatOpenAI } from '@langchain/openai';
- import { ChatPromptTemplate } from '@langchain/core/prompts';
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const callbackHandler = new LangChainCallbackHandler({
- client: phClient,
- distinctId: 'user_123', // optional
- traceId: 'trace_456', // optional
- properties: { conversationId: 'abc123' }, // optional
- groups: { company: 'company_id_in_your_db' }, // optional
- privacyMode: false, // optional
- debug: false // optional - when true, logs all events to console
- });
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { LangChainInstrumentation } from '@traceloop/instrumentation-langchain'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new LangChainInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
-
- **Note:** If you want to capture LLM events anonymously, **don't** pass a distinct ID to the
- `CallbackHandler`. See our docs on [anonymous vs identified
- events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
-
-
>
),
},
@@ -163,8 +132,8 @@ export const getLangChainSteps = (ctx: OnboardingComponentsContext): StepDefinit
content: (
<>
- When you invoke your chain, pass the `callback_handler` in the `config` as part of your
- `callbacks`:
+ Use LangChain as normal. The OpenTelemetry instrumentation automatically captures
+ `$ai_generation` events for each LLM call — no callback handlers needed.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
PostHog automatically captures an `$ai_generation` event along with these properties:
diff --git a/docs/onboarding/llm-analytics/langgraph.tsx b/docs/onboarding/llm-analytics/langgraph.tsx
index a39cef507875..4af98aaa0d0c 100644
--- a/docs/onboarding/llm-analytics/langgraph.tsx
+++ b/docs/onboarding/llm-analytics/langgraph.tsx
@@ -3,50 +3,32 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getLangGraphSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-langgraph) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-langgraph)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-langgraph)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-langgraph)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install LangGraph',
- badge: 'required',
- content: (
- <>
- Install LangGraph and LangChain. PostHog instruments your LLM calls through LangChain-compatible
- callback handlers that LangGraph supports.
+ Install the OpenTelemetry SDK, the LangChain instrumentation, and LangGraph with OpenAI.
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a LangChain `CallbackHandler`.
+ Configure OpenTelemetry to auto-instrument LangChain calls and export traces to PostHog.
+ LangGraph is built on LangChain, so the same instrumentation captures all LLM calls. PostHog
+ converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.langchain import LangchainInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- callback_handler = CallbackHandler(
- client=posthog,
- distinct_id="user_123", # optional
- trace_id="trace_456", # optional
- properties={"conversation_id": "abc123"}, # optional
- groups={"company": "company_id_in_your_db"}, # optional
- privacy_mode=False # optional
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ LangchainInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { PostHog } from 'posthog-node';
- import { LangChainCallbackHandler } from '@posthog/ai';
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const callbackHandler = new LangChainCallbackHandler({
- client: phClient,
- distinctId: 'user_123', // optional
- traceId: 'trace_456', // optional
- properties: { conversationId: 'abc123' }, // optional
- groups: { company: 'company_id_in_your_db' }, // optional
- privacyMode: false, // optional
- });
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { LangChainInstrumentation } from '@traceloop/instrumentation-langchain'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new LangChainInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
-
- LangGraph is built on LangChain, so it supports LangChain-compatible callback handlers.
- PostHog's `CallbackHandler` captures `$ai_generation` events and trace hierarchy
- automatically without proxying your calls.
-
-
>
),
},
@@ -145,8 +133,8 @@ export const getLangGraphSteps = (ctx: OnboardingComponentsContext): StepDefinit
content: (
<>
- Pass the `callback_handler` in the `config` when invoking your LangGraph graph. PostHog
- automatically captures generation events for each LLM call.
+ Use LangGraph as normal. The OpenTelemetry instrumentation automatically captures
+ `$ai_generation` events for each LLM call — no callback handlers needed.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
PostHog automatically captures \`$ai_generation\` events and creates a trace hierarchy based on how LangGraph components are nested. You can expect captured events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/llamaindex.tsx b/docs/onboarding/llm-analytics/llamaindex.tsx
index dbfe6c219c36..c2f97ff1de86 100644
--- a/docs/onboarding/llm-analytics/llamaindex.tsx
+++ b/docs/onboarding/llm-analytics/llamaindex.tsx
@@ -3,94 +3,77 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getLlamaIndexSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK. The LlamaIndex integration uses
- PostHog's OpenAI wrapper.
-
+
+
+ See the complete [Python
+ example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-llamaindex)
+ on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python
+ wrapper
+ example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-llamaindex).
+
+
-
- >
- ),
- },
- {
- title: 'Install LlamaIndex',
- badge: 'required',
- content: (
- <>
- Install LlamaIndex with the OpenAI integration. PostHog instruments your LLM calls by wrapping
- the OpenAI client that LlamaIndex uses.
+ Install LlamaIndex, OpenAI, and the OpenTelemetry SDK with the LlamaIndex instrumentation.
>
),
},
{
- title: 'Initialize PostHog and LlamaIndex',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and
- pass it to LlamaIndex's `OpenAI` LLM class.
+ Configure OpenTelemetry to auto-instrument LlamaIndex calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.llamaindex import LlamaIndexInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- openai_client = OpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
- llm = LlamaOpenAI(
- model="gpt-5-mini",
- api_key="your_openai_api_key",
- )
- llm._client = openai_client
+ LlamaIndexInstrumentor().instrument()
`}
/>
-
-
-
- PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it can replace the
- internal client used by LlamaIndex's OpenAI LLM. PostHog captures `$ai_generation` events
- automatically without proxying your calls. **Note:** This approach accesses an internal
- attribute (`_client`) which may change in future LlamaIndex versions. Check for updates if
- you encounter issues after upgrading LlamaIndex.
-
-
>
),
},
@@ -100,15 +83,18 @@ export const getLlamaIndexSteps = (ctx: OnboardingComponentsContext): StepDefini
content: (
<>
- Use LlamaIndex as normal. PostHog automatically captures an `$ai_generation` event for each LLM
- call made through the wrapped client.
+ Use LlamaIndex as normal. The OpenTelemetry instrumentation automatically captures
+ `$ai_generation` events for each LLM call.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/manual.tsx b/docs/onboarding/llm-analytics/manual.tsx
index 7636d439414f..f34322177351 100644
--- a/docs/onboarding/llm-analytics/manual.tsx
+++ b/docs/onboarding/llm-analytics/manual.tsx
@@ -298,13 +298,16 @@ export const getManualSteps = (ctx: OnboardingComponentsContext): StepDefinition
))}
-
+ >
+ ),
+ },
+ {
+ title: 'Event properties',
+ content: (
+ <>
- {dedent`
- ### Event Properties
-
- Each event type has specific properties. See the tabs below for detailed property documentation for each event type.
- `}
+ Each event type has specific properties. See the tabs below for detailed property documentation
+ for each event type.
diff --git a/docs/onboarding/llm-analytics/mastra.tsx b/docs/onboarding/llm-analytics/mastra.tsx
index f6d9e8b4f11e..2a7b5f1ae414 100644
--- a/docs/onboarding/llm-analytics/mastra.tsx
+++ b/docs/onboarding/llm-analytics/mastra.tsx
@@ -9,126 +9,122 @@ export const getMastraSteps = (ctx: OnboardingComponentsContext): StepDefinition
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
- Setting up analytics starts with installing the PostHog SDK.
+
+
+ See the complete [Node.js
+ example](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-mastra) on
+ GitHub. If you're using the PostHog SDK wrapper instead, see the [Node.js wrapper
+ example](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-mastra).
+
+
-
- >
- ),
- },
- {
- title: 'Install Mastra',
- badge: 'required',
- content: (
- <>
- Install Mastra and a model provider SDK. Mastra uses the Vercel AI SDK under the hood, so you
- can use any Vercel AI-compatible model provider.
+ Install Mastra with the official `@mastra/posthog` exporter. Mastra's observability system sends
+ traces to PostHog as `$ai_generation` events automatically.
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
{
- title: 'Initialize PostHog and wrap your model',
+ title: 'Configure Mastra with the PostHog exporter',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then use `withTracing` from `@posthog/ai`
- to wrap the model you pass to your Mastra agent.
+ Initialize Mastra with an `Observability` config that uses the `PosthogExporter`. Pass your
+ PostHog project token and host from [your project
+ settings](https://app.posthog.com/settings/project).
',
- { host: '' }
- );
-
- const openaiClient = createOpenAI({
- apiKey: 'your_openai_api_key',
- compatibility: 'strict'
- });
-
- const agent = new Agent({
- name: "my-agent",
- instructions: "You are a helpful assistant.",
- model: withTracing(openaiClient("gpt-4o"), phClient, {
- posthogDistinctId: "user_123", // optional
- posthogTraceId: "trace_123", // optional
- posthogProperties: { conversationId: "abc123" }, // optional
- posthogPrivacyMode: false, // optional
- posthogGroups: { company: "companyIdInYourDb" }, // optional
+ import { Mastra } from '@mastra/core'
+ import { Agent } from '@mastra/core/agent'
+ import { Observability } from '@mastra/observability'
+ import { PosthogExporter } from '@mastra/posthog'
+
+ const weatherAgent = new Agent({
+ id: 'weather-agent',
+ name: 'Weather Agent',
+ instructions: 'You are a helpful assistant with access to weather data.',
+ model: { id: 'openai/gpt-4o-mini' },
+ })
+
+ const mastra = new Mastra({
+ agents: { weatherAgent },
+ observability: new Observability({
+ configs: {
+ posthog: {
+ serviceName: 'my-app',
+ exporters: [
+ new PosthogExporter({
+ apiKey: '',
+ host: '',
+ defaultDistinctId: 'user_123', // fallback if no userId in metadata
+ }),
+ ],
+ },
+ },
}),
- });
+ })
`}
/>
-
-
- You can enrich LLM events with additional data by passing parameters such as the trace ID,
- distinct ID, custom properties, groups, and privacy mode options.
-
>
),
},
{
- title: 'Use your Mastra agent',
+ title: 'Run your agent',
badge: 'required',
content: (
<>
- Now, when your Mastra agent makes LLM calls, PostHog automatically captures an `$ai_generation`
- event for each one.
+ Use Mastra as normal. The `PosthogExporter` automatically captures `$ai_generation` events for
+ each LLM call, including token usage, cost, latency, and the full conversation.
+
+
+
+ Pass `tracingOptions.metadata` to `generate()` to attach per-request metadata. The `userId`
+ field maps to PostHog's distinct ID, `sessionId` maps to `$ai_session_id`, and any other keys
+ are passed through as custom event properties.
- **Note:** If you want to capture LLM events anonymously, **don't** pass a distinct ID to the
- request. See our docs on [anonymous vs identified
- events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+ **Note:** If you want to capture LLM events anonymously, omit `userId` from
+ `tracingOptions.metadata` and don't set `defaultDistinctId`. See our docs on [anonymous vs
+ identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn
+ more.
diff --git a/docs/onboarding/llm-analytics/mirascope.tsx b/docs/onboarding/llm-analytics/mirascope.tsx
index 701c382990d8..ee79679d3782 100644
--- a/docs/onboarding/llm-analytics/mirascope.tsx
+++ b/docs/onboarding/llm-analytics/mirascope.tsx
@@ -3,113 +3,110 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getMirascopeSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK. The Mirascope integration uses
- PostHog's OpenAI wrapper since Mirascope supports passing a custom OpenAI client.
-
+
+
+ See the complete [Python
+ example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-mirascope)
+ on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python
+ wrapper
+ example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-mirascope).
+
+
-
- >
- ),
- },
- {
- title: 'Install Mirascope',
- badge: 'required',
- content: (
- <>
-
- Install Mirascope with OpenAI support. PostHog instruments your LLM calls by wrapping the OpenAI
- client that Mirascope uses under the hood.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and Mirascope.
>
),
},
{
- title: 'Initialize PostHog and Mirascope',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and
- pass it to Mirascope's `@call` decorator via the `client` parameter.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- openai_client = OpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`}
/>
-
-
-
- Mirascope's `@call` decorator accepts a `client` parameter for passing a custom OpenAI
- client. PostHog's `OpenAI` wrapper is a proper subclass of `openai.OpenAI`, so it works
- directly. PostHog captures `$ai_generation` events automatically without proxying your
- calls.
-
-
>
),
},
{
- title: 'Make your first call',
+ title: 'Call your LLMs',
badge: 'required',
content: (
<>
- Use Mirascope as normal, passing the wrapped client to the call decorator. PostHog automatically
- captures an `$ai_generation` event for each LLM call.
+ Use Mirascope as normal. PostHog automatically captures an `$ai_generation` event for each LLM
+ call made through the OpenAI SDK that Mirascope uses internally.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/mistral.tsx b/docs/onboarding/llm-analytics/mistral.tsx
index d83bb88637af..cd07f4c281fe 100644
--- a/docs/onboarding/llm-analytics/mistral.tsx
+++ b/docs/onboarding/llm-analytics/mistral.tsx
@@ -9,45 +9,25 @@ export const getMistralSteps = (ctx: OnboardingComponentsContext): StepDefinitio
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-mistral) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-mistral)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-mistral)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-mistral)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Mistral through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the Mistral config (the base URL and API key) to our OpenAI
- wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.mistral.ai/v1",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.mistral.ai/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +130,8 @@ export const getMistralSteps = (ctx: OnboardingComponentsContext): StepDefinitio
content: (
<>
- Now, when you call Mistral with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Mistral, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'mistral-large-latest',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -201,14 +183,9 @@ export const getMistralSteps = (ctx: OnboardingComponentsContext): StepDefinitio
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/ollama.tsx b/docs/onboarding/llm-analytics/ollama.tsx
index d943490e6605..86bd2cd5f104 100644
--- a/docs/onboarding/llm-analytics/ollama.tsx
+++ b/docs/onboarding/llm-analytics/ollama.tsx
@@ -9,21 +9,25 @@ export const getOllamaSteps = (ctx: OnboardingComponentsContext): StepDefinition
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
+
- **Note:** Make sure Ollama is running locally before making API calls. You can start it with
- `ollama serve`.
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-ollama) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-ollama)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-ollama) and
+ [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-ollama)
+ examples.
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
-
-
- >
- ),
- },
- {
- title: 'Initialize PostHog and OpenAI client',
- badge: 'required',
- content: (
- <>
-
- We call Ollama through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the Ollama config (the base URL) to our OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="http://localhost:11434/v1",
- api_key="ollama",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'http://localhost:11434/v1',
- apiKey: 'ollama',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -159,9 +130,8 @@ export const getOllamaSteps = (ctx: OnboardingComponentsContext): StepDefinition
content: (
<>
- Now, when you call Ollama with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Ollama, PostHog automatically captures `$ai_generation`
+ events via the OpenTelemetry instrumentation.
{dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+ **Note:** If you want to capture LLM events anonymously, omit the \`posthog.distinct_id\` resource attribute. See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
`}
diff --git a/docs/onboarding/llm-analytics/openai.tsx b/docs/onboarding/llm-analytics/openai.tsx
index b80b5a6959aa..5a2ee45ea190 100644
--- a/docs/onboarding/llm-analytics/openai.tsx
+++ b/docs/onboarding/llm-analytics/openai.tsx
@@ -9,45 +9,25 @@ export const getOpenAISteps = (ctx: OnboardingComponentsContext): StepDefinition
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-openai) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-openai)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-openai) and
+ [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-openai)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then pass it to our OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog # This is an optional parameter. If it is not provided, a default client will be used.
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- apiKey: 'your_openai_api_key',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -148,9 +130,8 @@ export const getOpenAISteps = (ctx: OnboardingComponentsContext): StepDefinition
content: (
<>
- Now, when you use the OpenAI SDK to call LLMs, PostHog automatically captures an
- `$ai_generation` event. You can enrich the event with additional data such as the trace ID,
- distinct ID, custom properties, groups, and privacy mode options.
+ Now, when you use the OpenAI SDK to call OpenAI, PostHog automatically captures `$ai_generation`
+ events via the OpenTelemetry instrumentation.
- {dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
@@ -223,23 +201,33 @@ export const getOpenAISteps = (ctx: OnboardingComponentsContext): StepDefinition
content: (
<>
- PostHog can also capture embedding generations as `$ai_embedding` events. Just make sure to use
- the same `posthog.ai.openai` client to do so:
+ PostHog can also capture embedding generations as `$ai_embedding` events. The OpenTelemetry
+ instrumentation automatically captures these when you use the embeddings API:
>
),
diff --git a/docs/onboarding/llm-analytics/openrouter.tsx b/docs/onboarding/llm-analytics/openrouter.tsx
index cd883119b766..dab91e919c58 100644
--- a/docs/onboarding/llm-analytics/openrouter.tsx
+++ b/docs/onboarding/llm-analytics/openrouter.tsx
@@ -9,23 +9,26 @@ export const getOpenRouterSteps = (ctx: OnboardingComponentsContext): StepDefini
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
+
- OpenRouter also offers a native [Broadcast
- feature](https://openrouter.ai/docs/guides/features/broadcast/posthog) that can
- automatically send LLM analytics data to PostHog without requiring SDK instrumentation. This
- is a simpler option if you don't need the additional customization that our SDK provides.
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-openrouter)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-openrouter)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-openrouter)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-openrouter)
+ examples.
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
-
-
- >
- ),
- },
- {
- title: 'Initialize PostHog and OpenAI client',
- badge: 'required',
- content: (
- <>
-
- We call OpenRouter through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the OpenRouter config (the base URL and API key) to our OpenAI
- wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://openrouter.ai/api/v1",
- api_key="",
- posthog_client=posthog # This is an optional parameter. If it is not provided, a default client will be used.
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://openrouter.ai/api/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -162,9 +131,8 @@ export const getOpenRouterSteps = (ctx: OnboardingComponentsContext): StepDefini
content: (
<>
- Now, when you call OpenRouter with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call OpenRouter, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'gpt-5-mini',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -211,12 +185,7 @@ export const getOpenRouterSteps = (ctx: OnboardingComponentsContext): StepDefini
{dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+ **Note:** If you want to capture LLM events anonymously, omit the \`posthog.distinct_id\` resource attribute. See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
`}
diff --git a/docs/onboarding/llm-analytics/perplexity.tsx b/docs/onboarding/llm-analytics/perplexity.tsx
index 84279dec69a4..c3c58ffc3d6a 100644
--- a/docs/onboarding/llm-analytics/perplexity.tsx
+++ b/docs/onboarding/llm-analytics/perplexity.tsx
@@ -9,45 +9,26 @@ export const getPerplexitySteps = (ctx: OnboardingComponentsContext): StepDefini
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-perplexity)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-perplexity)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-perplexity)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-perplexity)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Perplexity through the OpenAI client and generate a response. We'll use PostHog's OpenAI
- provider to capture all the details of the call. Initialize PostHog with your PostHog project
- token and host from [your project settings](https://app.posthog.com/settings/project), then pass
- the PostHog client along with the Perplexity config (the base URL and API key) to our OpenAI
- wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.perplexity.ai",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.perplexity.ai',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
/>
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
>
),
},
@@ -153,9 +131,8 @@ export const getPerplexitySteps = (ctx: OnboardingComponentsContext): StepDefini
content: (
<>
- Now, when you call Perplexity with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
+ Now, when you use the OpenAI SDK to call Perplexity, PostHog automatically captures
+ `$ai_generation` events via the OpenTelemetry instrumentation.
',
+ })
+
+ const response = await client.chat.completions.create({
+ model: 'sonar',
+ max_completion_tokens: 1024,
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -202,12 +185,7 @@ export const getPerplexitySteps = (ctx: OnboardingComponentsContext): StepDefini
{dedent`
- **Notes:**
- - We also support the old \`chat.completions\` API.
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+ **Note:** If you want to capture LLM events anonymously, omit the \`posthog.distinct_id\` resource attribute. See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
`}
diff --git a/docs/onboarding/llm-analytics/portkey.tsx b/docs/onboarding/llm-analytics/portkey.tsx
index 00209a6f178a..f7e6f50bed66 100644
--- a/docs/onboarding/llm-analytics/portkey.tsx
+++ b/docs/onboarding/llm-analytics/portkey.tsx
@@ -9,10 +9,24 @@ export const getPortkeySteps = (ctx: OnboardingComponentsContext): StepDefinitio
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-portkey) and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-portkey)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-portkey)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-portkey)
+ examples.
+
+
+
Portkey acts as an AI gateway that routes requests to 250+ LLM providers. The model string
@@ -21,10 +35,7 @@ export const getPortkeySteps = (ctx: OnboardingComponentsContext): StepDefinitio
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- Install the OpenAI and Portkey SDKs. The PostHog SDK instruments your LLM calls by wrapping the
- OpenAI client. The PostHog SDK **does not** proxy your calls.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
"},
+ )
+
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
- language: 'bash',
+ language: 'typescript',
file: 'Node',
code: dedent`
- npm install openai portkey-ai
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '',
+ })
+ ),
+ ],
+ instrumentations: [new OpenAIInstrumentation()],
+ })
+ sdk.start()
`,
},
]}
@@ -79,14 +133,13 @@ export const getPortkeySteps = (ctx: OnboardingComponentsContext): StepDefinitio
),
},
{
- title: 'Initialize PostHog and Portkey-routed client',
+ title: 'Call Portkey',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then pass it along with the Portkey gateway
- URL and your Portkey API key to our OpenAI wrapper.
+ Now, when you call Portkey with the OpenAI SDK, PostHog automatically captures `$ai_generation`
+ events via the OpenTelemetry instrumentation.
",
- host=""
- )
-
- client = OpenAI(
+ client = openai.OpenAI(
base_url=PORTKEY_GATEWAY_URL,
api_key="",
- posthog_client=posthog
)
- `,
- },
- {
- language: 'typescript',
- file: 'Node',
- code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
- import { PORTKEY_GATEWAY_URL } from 'portkey-ai'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: PORTKEY_GATEWAY_URL,
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
- `,
- },
- ]}
- />
-
-
- **Note:** This also works with the `AsyncOpenAI` client.
-
-
-
- These SDKs **do not** proxy your calls. They only fire off an async call to PostHog in the
- background to send the data. You can also use LLM analytics with other SDKs or our API, but
- you will need to capture the data in the right format. See the schema in the [manual capture
- section](https://posthog.com/docs/llm-analytics/installation/manual-capture) for more
- details.
-
-
- >
- ),
- },
- {
- title: 'Call Portkey',
- badge: 'required',
- content: (
- <>
-
- Now, when you call Portkey with the OpenAI SDK, PostHog automatically captures an
- `$ai_generation` event. You can also capture or modify additional properties with the distinct
- ID, trace ID, properties, groups, and privacy mode parameters.
-
-
- /gpt-5-mini",
- messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }],
- posthogDistinctId: "user_123", // optional
- posthogTraceId: "trace_123", // optional
- posthogProperties: { conversation_id: "abc123", paid: true }, // optional
- posthogGroups: { company: "company_id_in_your_db" }, // optional
- posthogPrivacyMode: false // optional
- });
-
- console.log(completion.choices[0].message.content)
+ import OpenAI from 'openai'
+ import { PORTKEY_GATEWAY_URL } from 'portkey-ai'
+
+ const client = new OpenAI({
+ baseURL: PORTKEY_GATEWAY_URL,
+ apiKey: '',
+ })
+
+ const response = await client.chat.completions.create({
+ model: '@/gpt-5-mini',
+ messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
+ })
+
+ console.log(response.choices[0].message.content)
`,
},
]}
@@ -209,14 +191,9 @@ export const getPortkeySteps = (ctx: OnboardingComponentsContext): StepDefinitio
- {dedent`
- **Notes:**
- - This works with responses where \`stream=True\`.
- - If you want to capture LLM events anonymously, **don't** pass a distinct ID to the request.
- - The \`@\` prefix is the name you chose when setting up the integration in your [Portkey dashboard](https://app.portkey.ai/).
-
- See our docs on [anonymous vs identified events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
- `}
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
diff --git a/docs/onboarding/llm-analytics/pydantic-ai.tsx b/docs/onboarding/llm-analytics/pydantic-ai.tsx
index 0affe67681f5..da1e0666f092 100644
--- a/docs/onboarding/llm-analytics/pydantic-ai.tsx
+++ b/docs/onboarding/llm-analytics/pydantic-ai.tsx
@@ -3,95 +3,77 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getPydanticAISteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK. The Pydantic AI integration uses
- PostHog's OpenAI wrapper.
-
+
+
+ See the complete [Python
+ example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-pydantic-ai)
+ on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python
+ wrapper
+ example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-pydantic-ai).
+
+
-
- >
- ),
- },
- {
- title: 'Install Pydantic AI',
- badge: 'required',
- content: (
- <>
-
- Install Pydantic AI with OpenAI support. PostHog instruments your LLM calls by wrapping the
- OpenAI client that Pydantic AI uses.
-
+ Install the OpenTelemetry SDK and Pydantic AI.
>
),
},
{
- title: 'Initialize PostHog and Pydantic AI',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog `AsyncOpenAI`
- wrapper, pass it to an `OpenAIProvider`, and use that with Pydantic AI's `OpenAIChatModel`.
+ Configure OpenTelemetry to export traces to PostHog and enable Pydantic AI's built-in OTel
+ instrumentation. PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
- )
- openai_client = AsyncOpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- provider = OpenAIProvider(openai_client=openai_client)
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
- model = OpenAIChatModel(
- "gpt-5-mini",
- provider=provider
- )
+ # Enable automatic OTel instrumentation for all Pydantic AI agents
+ Agent.instrument_all()
`}
/>
-
-
-
- PostHog's `AsyncOpenAI` wrapper is a proper subclass of `openai.AsyncOpenAI`, so it works
- directly as the client for Pydantic AI's `OpenAIProvider`. PostHog captures `$ai_generation`
- events automatically without proxying your calls.
-
-
>
),
},
@@ -101,27 +83,32 @@ export const getPydanticAISteps = (ctx: OnboardingComponentsContext): StepDefini
content: (
<>
- Create a Pydantic AI agent with the model and run it. PostHog automatically captures an
- `$ai_generation` event for each LLM call.
+ Create a Pydantic AI agent and run it. PostHog automatically captures an `$ai_generation` event
+ for each LLM call via the OTel instrumentation.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/semantic-kernel.tsx b/docs/onboarding/llm-analytics/semantic-kernel.tsx
index 1388015ba892..cccee595933c 100644
--- a/docs/onboarding/llm-analytics/semantic-kernel.tsx
+++ b/docs/onboarding/llm-analytics/semantic-kernel.tsx
@@ -3,121 +3,118 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getSemanticKernelSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK. The Semantic Kernel integration
- uses PostHog's OpenAI wrapper.
-
+
+
+ See the complete [Python
+ example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-semantic-kernel)
+ on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python
+ wrapper
+ example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-semantic-kernel).
+
+
-
- >
- ),
- },
- {
- title: 'Install Semantic Kernel',
- badge: 'required',
- content: (
- <>
-
- Install Semantic Kernel with OpenAI support. PostHog instruments your LLM calls by wrapping the
- OpenAI client that Semantic Kernel uses under the hood.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and Semantic Kernel.
>
),
},
{
- title: 'Initialize PostHog and Semantic Kernel',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog `AsyncOpenAI` wrapper
- and pass it to Semantic Kernel's `OpenAIChatCompletion` service.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- openai_client = AsyncOpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
- kernel = Kernel()
- kernel.add_service(
- OpenAIChatCompletion(
- ai_model_id="gpt-5-mini",
- async_client=openai_client,
- )
- )
+ OpenAIInstrumentor().instrument()
`}
/>
-
-
-
- PostHog's `AsyncOpenAI` wrapper is a proper subclass of `openai.AsyncOpenAI`, so it works
- directly as the `async_client` parameter in Semantic Kernel's `OpenAIChatCompletion`.
- PostHog captures `$ai_generation` events automatically without proxying your calls.
-
-
>
),
},
{
- title: 'Run your kernel function',
+ title: 'Run your kernel',
badge: 'required',
content: (
<>
Use Semantic Kernel as normal. PostHog automatically captures an `$ai_generation` event for each
- LLM call made through the wrapped client.
+ LLM call made through the OpenAI SDK that Semantic Kernel uses internally.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/smolagents.tsx b/docs/onboarding/llm-analytics/smolagents.tsx
index 7710fe7e1aef..1bebaa8c940f 100644
--- a/docs/onboarding/llm-analytics/smolagents.tsx
+++ b/docs/onboarding/llm-analytics/smolagents.tsx
@@ -3,91 +3,75 @@ import { OnboardingComponentsContext, createInstallation } from 'scenes/onboardi
import { StepDefinition } from '../steps'
export const getSmolagentsSteps = (ctx: OnboardingComponentsContext): StepDefinition[] => {
- const { CodeBlock, CalloutBox, Markdown, dedent, snippets } = ctx
+ const { CodeBlock, CalloutBox, Markdown, Blockquote, dedent, snippets } = ctx
const NotableGenerationProperties = snippets?.NotableGenerationProperties
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK. The smolagents integration uses
- PostHog's OpenAI wrapper.
-
+
+
+ See the complete [Python
+ example](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-smolagents)
+ on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see the [Python
+ wrapper
+ example](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-smolagents).
+
+
-
- >
- ),
- },
- {
- title: 'Install smolagents and OpenAI',
- badge: 'required',
- content: (
- <>
-
- Install smolagents and the OpenAI SDK. PostHog instruments your LLM calls by wrapping the OpenAI
- client, which you can pass to smolagents' `OpenAIServerModel`.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and smolagents.
>
),
},
{
- title: 'Initialize PostHog and smolagents',
+ title: 'Set up OpenTelemetry tracing',
badge: 'required',
content: (
<>
- Initialize PostHog with your project token and host from [your project
- settings](https://app.posthog.com/settings/project), then create a PostHog OpenAI wrapper and
- pass it to smolagents' `OpenAIServerModel`.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- openai_client = OpenAI(
- api_key="your_openai_api_key",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
- model = OpenAIServerModel(
- model_id="gpt-5-mini",
- client=openai_client,
- )
+ OpenAIInstrumentor().instrument()
`}
/>
-
-
-
- PostHog's `OpenAI` wrapper is a drop-in replacement for `openai.OpenAI`. By passing it as
- the `client` to `OpenAIServerModel`, all LLM calls made by smolagents are automatically
- captured as `$ai_generation` events.
-
-
>
),
},
@@ -98,25 +82,31 @@ export const getSmolagentsSteps = (ctx: OnboardingComponentsContext): StepDefini
<>
Use smolagents as normal. PostHog automatically captures an `$ai_generation` event for each LLM
- call made through the wrapped OpenAI client.
+ call made through the OpenAI SDK that smolagents uses internally.
+
+
+ **Note:** If you want to capture LLM events anonymously, omit the `posthog.distinct_id`
+ resource attribute. See our docs on [anonymous vs identified
+ events](https://posthog.com/docs/data/anonymous-vs-identified-events) to learn more.
+
+
+
{dedent`
You can expect captured \`$ai_generation\` events to have the following properties:
diff --git a/docs/onboarding/llm-analytics/together-ai.tsx b/docs/onboarding/llm-analytics/together-ai.tsx
index 8a2fb8b74377..684473633c37 100644
--- a/docs/onboarding/llm-analytics/together-ai.tsx
+++ b/docs/onboarding/llm-analytics/together-ai.tsx
@@ -9,45 +9,26 @@ export const getTogetherAISteps = (ctx: OnboardingComponentsContext): StepDefini
return [
{
- title: 'Install the PostHog SDK',
+ title: 'Install dependencies',
badge: 'required',
content: (
<>
-
- Setting up analytics starts with installing the PostHog SDK for your language. LLM analytics
- works best with our Python and Node SDKs.
-
+
+
+ See the complete
+ [Node.js](https://github.com/PostHog/posthog-js/tree/main/examples/example-ai-together-ai)
+ and
+ [Python](https://github.com/PostHog/posthog-python/tree/master/examples/example-ai-together-ai)
+ examples on GitHub. If you're using the PostHog SDK wrapper instead of OpenTelemetry, see
+ the [Node.js
+ wrapper](https://github.com/PostHog/posthog-js/tree/e08ff1be/examples/example-ai-together-ai)
+ and [Python
+ wrapper](https://github.com/PostHog/posthog-python/tree/7223c52/examples/example-ai-together-ai)
+ examples.
+
+
-
- >
- ),
- },
- {
- title: 'Install the OpenAI SDK',
- badge: 'required',
- content: (
- <>
-
- Install the OpenAI SDK. The PostHog SDK instruments your LLM calls by wrapping the OpenAI
- client. The PostHog SDK **does not** proxy your calls.
-
+ Install the OpenTelemetry SDK, the OpenAI instrumentation, and the OpenAI SDK.
- We call Together AI through the OpenAI client and generate a response. We'll use PostHog's
- OpenAI provider to capture all the details of the call. Initialize PostHog with your PostHog
- project token and host from [your project settings](https://app.posthog.com/settings/project),
- then pass the PostHog client along with the Together AI config (the base URL and API key) to our
- OpenAI wrapper.
+ Configure OpenTelemetry to auto-instrument OpenAI SDK calls and export traces to PostHog.
+ PostHog converts `gen_ai.*` spans into `$ai_generation` events automatically.
",
- host=""
+ from opentelemetry import trace
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+ from opentelemetry.sdk.resources import Resource, SERVICE_NAME
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
+ from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor
+
+ resource = Resource(attributes={
+ SERVICE_NAME: "my-app",
+ "posthog.distinct_id": "user_123", # optional: identifies the user in PostHog
+ "foo": "bar", # custom properties are passed through
+ })
+
+ exporter = OTLPSpanExporter(
+ endpoint="/i/v0/ai/otel",
+ headers={"Authorization": "Bearer "},
)
- client = OpenAI(
- base_url="https://api.together.xyz/v1",
- api_key="",
- posthog_client=posthog
- )
+ provider = TracerProvider(resource=resource)
+ provider.add_span_processor(SimpleSpanProcessor(exporter))
+ trace.set_tracer_provider(provider)
+
+ OpenAIInstrumentor().instrument()
`,
},
{
language: 'typescript',
file: 'Node',
code: dedent`
- import { OpenAI } from '@posthog/ai'
- import { PostHog } from 'posthog-node'
-
- const phClient = new PostHog(
- '',
- { host: '' }
- );
-
- const openai = new OpenAI({
- baseURL: 'https://api.together.xyz/v1',
- apiKey: '',
- posthog: phClient,
- });
-
- // ... your code here ...
-
- // IMPORTANT: Shutdown the client when you're done to ensure all events are sent
- phClient.shutdown()
+ import { NodeSDK, tracing } from '@opentelemetry/sdk-node'
+ import { resourceFromAttributes } from '@opentelemetry/resources'
+ import { PostHogTraceExporter } from '@posthog/ai/otel'
+ import { OpenAIInstrumentation } from '@opentelemetry/instrumentation-openai'
+
+ const sdk = new NodeSDK({
+ resource: resourceFromAttributes({
+ 'service.name': 'my-app',
+ 'posthog.distinct_id': 'user_123', // optional: identifies the user in PostHog
+ foo: 'bar', // custom properties are passed through
+ }),
+ spanProcessors: [
+ new tracing.SimpleSpanProcessor(
+ new PostHogTraceExporter({
+ apiKey: '',
+ host: '